xref: /aosp_15_r20/external/cronet/third_party/re2/src/re2/dfa.cc (revision 6777b5387eb2ff775bb5750e3f5d96f37fb7352b)
1 // Copyright 2008 The RE2 Authors.  All Rights Reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
4 
5 // A DFA (deterministic finite automaton)-based regular expression search.
6 //
7 // The DFA search has two main parts: the construction of the automaton,
8 // which is represented by a graph of State structures, and the execution
9 // of the automaton over a given input string.
10 //
11 // The basic idea is that the State graph is constructed so that the
12 // execution can simply start with a state s, and then for each byte c in
13 // the input string, execute "s = s->next[c]", checking at each point whether
14 // the current s represents a matching state.
15 //
16 // The simple explanation just given does convey the essence of this code,
17 // but it omits the details of how the State graph gets constructed as well
18 // as some performance-driven optimizations to the execution of the automaton.
19 // All these details are explained in the comments for the code following
20 // the definition of class DFA.
21 //
22 // See http://swtch.com/~rsc/regexp/ for a very bare-bones equivalent.
23 
24 #include <stddef.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <string.h>
28 #include <algorithm>
29 #include <atomic>
30 #include <deque>
31 #include <new>
32 #include <string>
33 #include <utility>
34 #include <vector>
35 
36 #include "absl/base/call_once.h"
37 #include "absl/base/macros.h"
38 #include "absl/base/thread_annotations.h"
39 #include "absl/container/flat_hash_map.h"
40 #include "absl/container/flat_hash_set.h"
41 #include "absl/strings/str_format.h"
42 #include "absl/synchronization/mutex.h"
43 #include "absl/types/span.h"
44 #include "util/logging.h"
45 #include "util/strutil.h"
46 #include "re2/pod_array.h"
47 #include "re2/prog.h"
48 #include "re2/re2.h"
49 #include "re2/sparse_set.h"
50 
51 // Silence "zero-sized array in struct/union" warning for DFA::State::next_.
52 #ifdef _MSC_VER
53 #pragma warning(disable: 4200)
54 #endif
55 
56 namespace re2 {
57 
58 // Controls whether the DFA should bail out early if the NFA would be faster.
59 static bool dfa_should_bail_when_slow = true;
60 
TESTING_ONLY_set_dfa_should_bail_when_slow(bool b)61 void Prog::TESTING_ONLY_set_dfa_should_bail_when_slow(bool b) {
62   dfa_should_bail_when_slow = b;
63 }
64 
65 // Changing this to true compiles in prints that trace execution of the DFA.
66 // Generates a lot of output -- only useful for debugging.
67 static const bool ExtraDebug = false;
68 
69 // A DFA implementation of a regular expression program.
70 // Since this is entirely a forward declaration mandated by C++,
71 // some of the comments here are better understood after reading
72 // the comments in the sections that follow the DFA definition.
73 class DFA {
74  public:
75   DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem);
76   ~DFA();
ok() const77   bool ok() const { return !init_failed_; }
kind()78   Prog::MatchKind kind() { return kind_; }
79 
80   // Searches for the regular expression in text, which is considered
81   // as a subsection of context for the purposes of interpreting flags
82   // like ^ and $ and \A and \z.
83   // Returns whether a match was found.
84   // If a match is found, sets *ep to the end point of the best match in text.
85   // If "anchored", the match must begin at the start of text.
86   // If "want_earliest_match", the match that ends first is used, not
87   //   necessarily the best one.
88   // If "run_forward" is true, the DFA runs from text.begin() to text.end().
89   //   If it is false, the DFA runs from text.end() to text.begin(),
90   //   returning the leftmost end of the match instead of the rightmost one.
91   // If the DFA cannot complete the search (for example, if it is out of
92   //   memory), it sets *failed and returns false.
93   bool Search(absl::string_view text, absl::string_view context, bool anchored,
94               bool want_earliest_match, bool run_forward, bool* failed,
95               const char** ep, SparseSet* matches);
96 
97   // Builds out all states for the entire DFA.
98   // If cb is not empty, it receives one callback per state built.
99   // Returns the number of states built.
100   // FOR TESTING OR EXPERIMENTAL PURPOSES ONLY.
101   int BuildAllStates(const Prog::DFAStateCallback& cb);
102 
103   // Computes min and max for matching strings.  Won't return strings
104   // bigger than maxlen.
105   bool PossibleMatchRange(std::string* min, std::string* max, int maxlen);
106 
107   // These data structures are logically private, but C++ makes it too
108   // difficult to mark them as such.
109   class RWLocker;
110   class StateSaver;
111   class Workq;
112 
113   // A single DFA state.  The DFA is represented as a graph of these
114   // States, linked by the next_ pointers.  If in state s and reading
115   // byte c, the next state should be s->next_[c].
116   struct State {
IsMatchre2::DFA::State117     inline bool IsMatch() const { return (flag_ & kFlagMatch) != 0; }
118 
119     template <typename H>
AbslHashValue(H h,const State & a)120     friend H AbslHashValue(H h, const State& a) {
121       const absl::Span<const int> ainst(a.inst_, a.ninst_);
122       return H::combine(std::move(h), a.flag_, ainst);
123     }
124 
operator ==(const State & a,const State & b)125     friend bool operator==(const State& a, const State& b) {
126       const absl::Span<const int> ainst(a.inst_, a.ninst_);
127       const absl::Span<const int> binst(b.inst_, b.ninst_);
128       return &a == &b || (a.flag_ == b.flag_ && ainst == binst);
129     }
130 
131     int* inst_;         // Instruction pointers in the state.
132     int ninst_;         // # of inst_ pointers.
133     uint32_t flag_;     // Empty string bitfield flags in effect on the way
134                         // into this state, along with kFlagMatch if this
135                         // is a matching state.
136 
137     std::atomic<State*> next_[];    // Outgoing arrows from State,
138                                     // one per input byte class
139   };
140 
141   enum {
142     kByteEndText = 256,         // imaginary byte at end of text
143 
144     kFlagEmptyMask = 0xFF,      // State.flag_: bits holding kEmptyXXX flags
145     kFlagMatch = 0x0100,        // State.flag_: this is a matching state
146     kFlagLastWord = 0x0200,     // State.flag_: last byte was a word char
147     kFlagNeedShift = 16,        // needed kEmpty bits are or'ed in shifted left
148   };
149 
150   struct StateHash {
operator ()re2::DFA::StateHash151     size_t operator()(const State* a) const {
152       DCHECK(a != NULL);
153       return absl::Hash<State>()(*a);
154     }
155   };
156 
157   struct StateEqual {
operator ()re2::DFA::StateEqual158     bool operator()(const State* a, const State* b) const {
159       DCHECK(a != NULL);
160       DCHECK(b != NULL);
161       return *a == *b;
162     }
163   };
164 
165   typedef absl::flat_hash_set<State*, StateHash, StateEqual> StateSet;
166 
167  private:
168   // Make it easier to swap in a scalable reader-writer mutex.
169   using CacheMutex = absl::Mutex;
170 
171   enum {
172     // Indices into start_ for unanchored searches.
173     // Add kStartAnchored for anchored searches.
174     kStartBeginText = 0,          // text at beginning of context
175     kStartBeginLine = 2,          // text at beginning of line
176     kStartAfterWordChar = 4,      // text follows a word character
177     kStartAfterNonWordChar = 6,   // text follows non-word character
178     kMaxStart = 8,
179 
180     kStartAnchored = 1,
181   };
182 
183   // Resets the DFA State cache, flushing all saved State* information.
184   // Releases and reacquires cache_mutex_ via cache_lock, so any
185   // State* existing before the call are not valid after the call.
186   // Use a StateSaver to preserve important states across the call.
187   // cache_mutex_.r <= L < mutex_
188   // After: cache_mutex_.w <= L < mutex_
189   void ResetCache(RWLocker* cache_lock);
190 
191   // Looks up and returns the State corresponding to a Workq.
192   // L >= mutex_
193   State* WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag);
194 
195   // Looks up and returns a State matching the inst, ninst, and flag.
196   // L >= mutex_
197   State* CachedState(int* inst, int ninst, uint32_t flag);
198 
199   // Clear the cache entirely.
200   // Must hold cache_mutex_.w or be in destructor.
201   void ClearCache();
202 
203   // Converts a State into a Workq: the opposite of WorkqToCachedState.
204   // L >= mutex_
205   void StateToWorkq(State* s, Workq* q);
206 
207   // Runs a State on a given byte, returning the next state.
208   State* RunStateOnByteUnlocked(State*, int);  // cache_mutex_.r <= L < mutex_
209   State* RunStateOnByte(State*, int);          // L >= mutex_
210 
211   // Runs a Workq on a given byte followed by a set of empty-string flags,
212   // producing a new Workq in nq.  If a match instruction is encountered,
213   // sets *ismatch to true.
214   // L >= mutex_
215   void RunWorkqOnByte(Workq* q, Workq* nq,
216                       int c, uint32_t flag, bool* ismatch);
217 
218   // Runs a Workq on a set of empty-string flags, producing a new Workq in nq.
219   // L >= mutex_
220   void RunWorkqOnEmptyString(Workq* q, Workq* nq, uint32_t flag);
221 
222   // Adds the instruction id to the Workq, following empty arrows
223   // according to flag.
224   // L >= mutex_
225   void AddToQueue(Workq* q, int id, uint32_t flag);
226 
227   // For debugging, returns a text representation of State.
228   static std::string DumpState(State* state);
229 
230   // For debugging, returns a text representation of a Workq.
231   static std::string DumpWorkq(Workq* q);
232 
233   // Search parameters
234   struct SearchParams {
SearchParamsre2::DFA::SearchParams235     SearchParams(absl::string_view text, absl::string_view context,
236                  RWLocker* cache_lock)
237       : text(text),
238         context(context),
239         anchored(false),
240         can_prefix_accel(false),
241         want_earliest_match(false),
242         run_forward(false),
243         start(NULL),
244         cache_lock(cache_lock),
245         failed(false),
246         ep(NULL),
247         matches(NULL) {}
248 
249     absl::string_view text;
250     absl::string_view context;
251     bool anchored;
252     bool can_prefix_accel;
253     bool want_earliest_match;
254     bool run_forward;
255     State* start;
256     RWLocker* cache_lock;
257     bool failed;     // "out" parameter: whether search gave up
258     const char* ep;  // "out" parameter: end pointer for match
259     SparseSet* matches;
260 
261    private:
262     SearchParams(const SearchParams&) = delete;
263     SearchParams& operator=(const SearchParams&) = delete;
264   };
265 
266   // Before each search, the parameters to Search are analyzed by
267   // AnalyzeSearch to determine the state in which to start.
268   struct StartInfo {
StartInfore2::DFA::StartInfo269     StartInfo() : start(NULL) {}
270     std::atomic<State*> start;
271   };
272 
273   // Fills in params->start and params->can_prefix_accel using
274   // the other search parameters.  Returns true on success,
275   // false on failure.
276   // cache_mutex_.r <= L < mutex_
277   bool AnalyzeSearch(SearchParams* params);
278   bool AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
279                            uint32_t flags);
280 
281   // The generic search loop, inlined to create specialized versions.
282   // cache_mutex_.r <= L < mutex_
283   // Might unlock and relock cache_mutex_ via params->cache_lock.
284   template <bool can_prefix_accel,
285             bool want_earliest_match,
286             bool run_forward>
287   inline bool InlinedSearchLoop(SearchParams* params);
288 
289   // The specialized versions of InlinedSearchLoop.  The three letters
290   // at the ends of the name denote the true/false values used as the
291   // last three parameters of InlinedSearchLoop.
292   // cache_mutex_.r <= L < mutex_
293   // Might unlock and relock cache_mutex_ via params->cache_lock.
294   bool SearchFFF(SearchParams* params);
295   bool SearchFFT(SearchParams* params);
296   bool SearchFTF(SearchParams* params);
297   bool SearchFTT(SearchParams* params);
298   bool SearchTFF(SearchParams* params);
299   bool SearchTFT(SearchParams* params);
300   bool SearchTTF(SearchParams* params);
301   bool SearchTTT(SearchParams* params);
302 
303   // The main search loop: calls an appropriate specialized version of
304   // InlinedSearchLoop.
305   // cache_mutex_.r <= L < mutex_
306   // Might unlock and relock cache_mutex_ via params->cache_lock.
307   bool FastSearchLoop(SearchParams* params);
308 
309 
310   // Looks up bytes in bytemap_ but handles case c == kByteEndText too.
ByteMap(int c)311   int ByteMap(int c) {
312     if (c == kByteEndText)
313       return prog_->bytemap_range();
314     return prog_->bytemap()[c];
315   }
316 
317   // Constant after initialization.
318   Prog* prog_;              // The regular expression program to run.
319   Prog::MatchKind kind_;    // The kind of DFA.
320   bool init_failed_;        // initialization failed (out of memory)
321 
322   absl::Mutex mutex_;  // mutex_ >= cache_mutex_.r
323 
324   // Scratch areas, protected by mutex_.
325   Workq* q0_;             // Two pre-allocated work queues.
326   Workq* q1_;
327   PODArray<int> stack_;   // Pre-allocated stack for AddToQueue
328 
329   // State* cache.  Many threads use and add to the cache simultaneously,
330   // holding cache_mutex_ for reading and mutex_ (above) when adding.
331   // If the cache fills and needs to be discarded, the discarding is done
332   // while holding cache_mutex_ for writing, to avoid interrupting other
333   // readers.  Any State* pointers are only valid while cache_mutex_
334   // is held.
335   CacheMutex cache_mutex_;
336   int64_t mem_budget_;     // Total memory budget for all States.
337   int64_t state_budget_;   // Amount of memory remaining for new States.
338   StateSet state_cache_;   // All States computed so far.
339   StartInfo start_[kMaxStart];
340 
341   DFA(const DFA&) = delete;
342   DFA& operator=(const DFA&) = delete;
343 };
344 
345 // Shorthand for casting to uint8_t*.
BytePtr(const void * v)346 static inline const uint8_t* BytePtr(const void* v) {
347   return reinterpret_cast<const uint8_t*>(v);
348 }
349 
350 // Work queues
351 
352 // Marks separate thread groups of different priority
353 // in the work queue when in leftmost-longest matching mode.
354 #define Mark (-1)
355 
356 // Separates the match IDs from the instructions in inst_.
357 // Used only for "many match" DFA states.
358 #define MatchSep (-2)
359 
360 // Internally, the DFA uses a sparse array of
361 // program instruction pointers as a work queue.
362 // In leftmost longest mode, marks separate sections
363 // of workq that started executing at different
364 // locations in the string (earlier locations first).
365 class DFA::Workq : public SparseSet {
366  public:
367   // Constructor: n is number of normal slots, maxmark number of mark slots.
Workq(int n,int maxmark)368   Workq(int n, int maxmark) :
369     SparseSet(n+maxmark),
370     n_(n),
371     maxmark_(maxmark),
372     nextmark_(n),
373     last_was_mark_(true) {
374   }
375 
is_mark(int i)376   bool is_mark(int i) { return i >= n_; }
377 
maxmark()378   int maxmark() { return maxmark_; }
379 
clear()380   void clear() {
381     SparseSet::clear();
382     nextmark_ = n_;
383   }
384 
mark()385   void mark() {
386     if (last_was_mark_)
387       return;
388     last_was_mark_ = false;
389     SparseSet::insert_new(nextmark_++);
390   }
391 
size()392   int size() {
393     return n_ + maxmark_;
394   }
395 
insert(int id)396   void insert(int id) {
397     if (contains(id))
398       return;
399     insert_new(id);
400   }
401 
insert_new(int id)402   void insert_new(int id) {
403     last_was_mark_ = false;
404     SparseSet::insert_new(id);
405   }
406 
407  private:
408   int n_;                // size excluding marks
409   int maxmark_;          // maximum number of marks
410   int nextmark_;         // id of next mark
411   bool last_was_mark_;   // last inserted was mark
412 
413   Workq(const Workq&) = delete;
414   Workq& operator=(const Workq&) = delete;
415 };
416 
DFA(Prog * prog,Prog::MatchKind kind,int64_t max_mem)417 DFA::DFA(Prog* prog, Prog::MatchKind kind, int64_t max_mem)
418   : prog_(prog),
419     kind_(kind),
420     init_failed_(false),
421     q0_(NULL),
422     q1_(NULL),
423     mem_budget_(max_mem) {
424   if (ExtraDebug)
425     absl::FPrintF(stderr, "\nkind %d\n%s\n", kind_, prog_->DumpUnanchored());
426   int nmark = 0;
427   if (kind_ == Prog::kLongestMatch)
428     nmark = prog_->size();
429   // See DFA::AddToQueue() for why this is so.
430   int nstack = prog_->inst_count(kInstCapture) +
431                prog_->inst_count(kInstEmptyWidth) +
432                prog_->inst_count(kInstNop) +
433                nmark + 1;  // + 1 for start inst
434 
435   // Account for space needed for DFA, q0, q1, stack.
436   mem_budget_ -= sizeof(DFA);
437   mem_budget_ -= (prog_->size() + nmark) *
438                  (sizeof(int)+sizeof(int)) * 2;  // q0, q1
439   mem_budget_ -= nstack * sizeof(int);  // stack
440   if (mem_budget_ < 0) {
441     init_failed_ = true;
442     return;
443   }
444 
445   state_budget_ = mem_budget_;
446 
447   // Make sure there is a reasonable amount of working room left.
448   // At minimum, the search requires room for two states in order
449   // to limp along, restarting frequently.  We'll get better performance
450   // if there is room for a larger number of states, say 20.
451   // Note that a state stores list heads only, so we use the program
452   // list count for the upper bound, not the program size.
453   int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
454   int64_t one_state = sizeof(State) + nnext*sizeof(std::atomic<State*>) +
455                       (prog_->list_count()+nmark)*sizeof(int);
456   if (state_budget_ < 20*one_state) {
457     init_failed_ = true;
458     return;
459   }
460 
461   q0_ = new Workq(prog_->size(), nmark);
462   q1_ = new Workq(prog_->size(), nmark);
463   stack_ = PODArray<int>(nstack);
464 }
465 
~DFA()466 DFA::~DFA() {
467   delete q0_;
468   delete q1_;
469   ClearCache();
470 }
471 
472 // In the DFA state graph, s->next[c] == NULL means that the
473 // state has not yet been computed and needs to be.  We need
474 // a different special value to signal that s->next[c] is a
475 // state that can never lead to a match (and thus the search
476 // can be called off).  Hence DeadState.
477 #define DeadState reinterpret_cast<State*>(1)
478 
479 // Signals that the rest of the string matches no matter what it is.
480 #define FullMatchState reinterpret_cast<State*>(2)
481 
482 #define SpecialStateMax FullMatchState
483 
484 // Debugging printouts
485 
486 // For debugging, returns a string representation of the work queue.
DumpWorkq(Workq * q)487 std::string DFA::DumpWorkq(Workq* q) {
488   std::string s;
489   const char* sep = "";
490   for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
491     if (q->is_mark(*it)) {
492       s += "|";
493       sep = "";
494     } else {
495       s += absl::StrFormat("%s%d", sep, *it);
496       sep = ",";
497     }
498   }
499   return s;
500 }
501 
502 // For debugging, returns a string representation of the state.
DumpState(State * state)503 std::string DFA::DumpState(State* state) {
504   if (state == NULL)
505     return "_";
506   if (state == DeadState)
507     return "X";
508   if (state == FullMatchState)
509     return "*";
510   std::string s;
511   const char* sep = "";
512   s += absl::StrFormat("(%p)", state);
513   for (int i = 0; i < state->ninst_; i++) {
514     if (state->inst_[i] == Mark) {
515       s += "|";
516       sep = "";
517     } else if (state->inst_[i] == MatchSep) {
518       s += "||";
519       sep = "";
520     } else {
521       s += absl::StrFormat("%s%d", sep, state->inst_[i]);
522       sep = ",";
523     }
524   }
525   s += absl::StrFormat(" flag=%#x", state->flag_);
526   return s;
527 }
528 
529 //////////////////////////////////////////////////////////////////////
530 //
531 // DFA state graph construction.
532 //
533 // The DFA state graph is a heavily-linked collection of State* structures.
534 // The state_cache_ is a set of all the State structures ever allocated,
535 // so that if the same state is reached by two different paths,
536 // the same State structure can be used.  This reduces allocation
537 // requirements and also avoids duplication of effort across the two
538 // identical states.
539 //
540 // A State is defined by an ordered list of instruction ids and a flag word.
541 //
542 // The choice of an ordered list of instructions differs from a typical
543 // textbook DFA implementation, which would use an unordered set.
544 // Textbook descriptions, however, only care about whether
545 // the DFA matches, not where it matches in the text.  To decide where the
546 // DFA matches, we need to mimic the behavior of the dominant backtracking
547 // implementations like PCRE, which try one possible regular expression
548 // execution, then another, then another, stopping when one of them succeeds.
549 // The DFA execution tries these many executions in parallel, representing
550 // each by an instruction id.  These pointers are ordered in the State.inst_
551 // list in the same order that the executions would happen in a backtracking
552 // search: if a match is found during execution of inst_[2], inst_[i] for i>=3
553 // can be discarded.
554 //
555 // Textbooks also typically do not consider context-aware empty string operators
556 // like ^ or $.  These are handled by the flag word, which specifies the set
557 // of empty-string operators that should be matched when executing at the
558 // current text position.  These flag bits are defined in prog.h.
559 // The flag word also contains two DFA-specific bits: kFlagMatch if the state
560 // is a matching state (one that reached a kInstMatch in the program)
561 // and kFlagLastWord if the last processed byte was a word character, for the
562 // implementation of \B and \b.
563 //
564 // The flag word also contains, shifted up 16 bits, the bits looked for by
565 // any kInstEmptyWidth instructions in the state.  These provide a useful
566 // summary indicating when new flags might be useful.
567 //
568 // The permanent representation of a State's instruction ids is just an array,
569 // but while a state is being analyzed, these instruction ids are represented
570 // as a Workq, which is an array that allows iteration in insertion order.
571 
572 // NOTE(rsc): The choice of State construction determines whether the DFA
573 // mimics backtracking implementations (so-called leftmost first matching) or
574 // traditional DFA implementations (so-called leftmost longest matching as
575 // prescribed by POSIX).  This implementation chooses to mimic the
576 // backtracking implementations, because we want to replace PCRE.  To get
577 // POSIX behavior, the states would need to be considered not as a simple
578 // ordered list of instruction ids, but as a list of unordered sets of instruction
579 // ids.  A match by a state in one set would inhibit the running of sets
580 // farther down the list but not other instruction ids in the same set.  Each
581 // set would correspond to matches beginning at a given point in the string.
582 // This is implemented by separating different sets with Mark pointers.
583 
584 // Looks in the State cache for a State matching q, flag.
585 // If one is found, returns it.  If one is not found, allocates one,
586 // inserts it in the cache, and returns it.
587 // If mq is not null, MatchSep and the match IDs in mq will be appended
588 // to the State.
WorkqToCachedState(Workq * q,Workq * mq,uint32_t flag)589 DFA::State* DFA::WorkqToCachedState(Workq* q, Workq* mq, uint32_t flag) {
590   //mutex_.AssertHeld();
591 
592   // Construct array of instruction ids for the new state.
593   // In some cases, kInstAltMatch may trigger an upgrade to FullMatchState.
594   // Otherwise, "compress" q down to list heads for storage; StateToWorkq()
595   // will "decompress" it for computation by exploring from each list head.
596   //
597   // Historically, only kInstByteRange, kInstEmptyWidth and kInstMatch were
598   // useful to keep, but it turned out that kInstAlt was necessary to keep:
599   //
600   // > [*] kInstAlt would seem useless to record in a state, since
601   // > we've already followed both its arrows and saved all the
602   // > interesting states we can reach from there.  The problem
603   // > is that one of the empty-width instructions might lead
604   // > back to the same kInstAlt (if an empty-width operator is starred),
605   // > producing a different evaluation order depending on whether
606   // > we keep the kInstAlt to begin with.  Sigh.
607   // > A specific case that this affects is /(^|a)+/ matching "a".
608   // > If we don't save the kInstAlt, we will match the whole "a" (0,1)
609   // > but in fact the correct leftmost-first match is the leading "" (0,0).
610   //
611   // Recall that flattening transformed the Prog from "tree" form to "list"
612   // form: in the former, kInstAlt existed explicitly... and abundantly; in
613   // the latter, it's implied between the instructions that compose a list.
614   // Thus, because the information wasn't lost, the bug doesn't remanifest.
615   PODArray<int> inst(q->size());
616   int n = 0;
617   uint32_t needflags = 0;  // flags needed by kInstEmptyWidth instructions
618   bool sawmatch = false;   // whether queue contains guaranteed kInstMatch
619   bool sawmark = false;    // whether queue contains a Mark
620   if (ExtraDebug)
621     absl::FPrintF(stderr, "WorkqToCachedState %s [%#x]", DumpWorkq(q), flag);
622   for (Workq::iterator it = q->begin(); it != q->end(); ++it) {
623     int id = *it;
624     if (sawmatch && (kind_ == Prog::kFirstMatch || q->is_mark(id)))
625       break;
626     if (q->is_mark(id)) {
627       if (n > 0 && inst[n-1] != Mark) {
628         sawmark = true;
629         inst[n++] = Mark;
630       }
631       continue;
632     }
633     Prog::Inst* ip = prog_->inst(id);
634     switch (ip->opcode()) {
635       case kInstAltMatch:
636         // This state will continue to a match no matter what
637         // the rest of the input is.  If it is the highest priority match
638         // being considered, return the special FullMatchState
639         // to indicate that it's all matches from here out.
640         if (kind_ != Prog::kManyMatch &&
641             (kind_ != Prog::kFirstMatch ||
642              (it == q->begin() && ip->greedy(prog_))) &&
643             (kind_ != Prog::kLongestMatch || !sawmark) &&
644             (flag & kFlagMatch)) {
645           if (ExtraDebug)
646             absl::FPrintF(stderr, " -> FullMatchState\n");
647           return FullMatchState;
648         }
649         ABSL_FALLTHROUGH_INTENDED;
650       default:
651         // Record iff id is the head of its list, which must
652         // be the case if id-1 is the last of *its* list. :)
653         if (prog_->inst(id-1)->last())
654           inst[n++] = *it;
655         if (ip->opcode() == kInstEmptyWidth)
656           needflags |= ip->empty();
657         if (ip->opcode() == kInstMatch && !prog_->anchor_end())
658           sawmatch = true;
659         break;
660     }
661   }
662   DCHECK_LE(n, q->size());
663   if (n > 0 && inst[n-1] == Mark)
664     n--;
665 
666   // If there are no empty-width instructions waiting to execute,
667   // then the extra flag bits will not be used, so there is no
668   // point in saving them.  (Discarding them reduces the number
669   // of distinct states.)
670   if (needflags == 0)
671     flag &= kFlagMatch;
672 
673   // NOTE(rsc): The code above cannot do flag &= needflags,
674   // because if the right flags were present to pass the current
675   // kInstEmptyWidth instructions, new kInstEmptyWidth instructions
676   // might be reached that in turn need different flags.
677   // The only sure thing is that if there are no kInstEmptyWidth
678   // instructions at all, no flags will be needed.
679   // We could do the extra work to figure out the full set of
680   // possibly needed flags by exploring past the kInstEmptyWidth
681   // instructions, but the check above -- are any flags needed
682   // at all? -- handles the most common case.  More fine-grained
683   // analysis can only be justified by measurements showing that
684   // too many redundant states are being allocated.
685 
686   // If there are no Insts in the list, it's a dead state,
687   // which is useful to signal with a special pointer so that
688   // the execution loop can stop early.  This is only okay
689   // if the state is *not* a matching state.
690   if (n == 0 && flag == 0) {
691     if (ExtraDebug)
692       absl::FPrintF(stderr, " -> DeadState\n");
693     return DeadState;
694   }
695 
696   // If we're in longest match mode, the state is a sequence of
697   // unordered state sets separated by Marks.  Sort each set
698   // to canonicalize, to reduce the number of distinct sets stored.
699   if (kind_ == Prog::kLongestMatch) {
700     int* ip = inst.data();
701     int* ep = ip + n;
702     while (ip < ep) {
703       int* markp = ip;
704       while (markp < ep && *markp != Mark)
705         markp++;
706       std::sort(ip, markp);
707       if (markp < ep)
708         markp++;
709       ip = markp;
710     }
711   }
712 
713   // If we're in many match mode, canonicalize for similar reasons:
714   // we have an unordered set of states (i.e. we don't have Marks)
715   // and sorting will reduce the number of distinct sets stored.
716   if (kind_ == Prog::kManyMatch) {
717     int* ip = inst.data();
718     int* ep = ip + n;
719     std::sort(ip, ep);
720   }
721 
722   // Append MatchSep and the match IDs in mq if necessary.
723   if (mq != NULL) {
724     inst[n++] = MatchSep;
725     for (Workq::iterator i = mq->begin(); i != mq->end(); ++i) {
726       int id = *i;
727       Prog::Inst* ip = prog_->inst(id);
728       if (ip->opcode() == kInstMatch)
729         inst[n++] = ip->match_id();
730     }
731   }
732 
733   // Save the needed empty-width flags in the top bits for use later.
734   flag |= needflags << kFlagNeedShift;
735 
736   State* state = CachedState(inst.data(), n, flag);
737   return state;
738 }
739 
740 // Looks in the State cache for a State matching inst, ninst, flag.
741 // If one is found, returns it.  If one is not found, allocates one,
742 // inserts it in the cache, and returns it.
CachedState(int * inst,int ninst,uint32_t flag)743 DFA::State* DFA::CachedState(int* inst, int ninst, uint32_t flag) {
744   //mutex_.AssertHeld();
745 
746   // Look in the cache for a pre-existing state.
747   // We have to initialise the struct like this because otherwise
748   // MSVC will complain about the flexible array member. :(
749   State state;
750   state.inst_ = inst;
751   state.ninst_ = ninst;
752   state.flag_ = flag;
753   StateSet::iterator it = state_cache_.find(&state);
754   if (it != state_cache_.end()) {
755     if (ExtraDebug)
756       absl::FPrintF(stderr, " -cached-> %s\n", DumpState(*it));
757     return *it;
758   }
759 
760   // Must have enough memory for new state.
761   // In addition to what we're going to allocate,
762   // the state cache hash table seems to incur about 18 bytes per
763   // State*. Worst case for non-small sets is it being half full, where each
764   // value present takes up 1 byte hash sample plus the pointer itself.
765   const int kStateCacheOverhead = 18;
766   int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
767   int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>);
768   int instmem = ninst*sizeof(int);
769   if (mem_budget_ < mem + instmem + kStateCacheOverhead) {
770     mem_budget_ = -1;
771     return NULL;
772   }
773   mem_budget_ -= mem + instmem + kStateCacheOverhead;
774 
775   // Allocate new state along with room for next_ and inst_.
776   // inst_ is stored separately since it's colder; this also
777   // means that the States for a given DFA are the same size
778   // class, so the allocator can hopefully pack them better.
779   char* space = std::allocator<char>().allocate(mem);
780   State* s = new (space) State;
781   (void) new (s->next_) std::atomic<State*>[nnext];
782   // Work around a unfortunate bug in older versions of libstdc++.
783   // (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=64658)
784   for (int i = 0; i < nnext; i++)
785     (void) new (s->next_ + i) std::atomic<State*>(NULL);
786   s->inst_ = std::allocator<int>().allocate(ninst);
787   (void) new (s->inst_) int[ninst];
788   memmove(s->inst_, inst, instmem);
789   s->ninst_ = ninst;
790   s->flag_ = flag;
791   if (ExtraDebug)
792     absl::FPrintF(stderr, " -> %s\n", DumpState(s));
793 
794   // Put state in cache and return it.
795   state_cache_.insert(s);
796   return s;
797 }
798 
799 // Clear the cache.  Must hold cache_mutex_.w or be in destructor.
ClearCache()800 void DFA::ClearCache() {
801   StateSet::iterator begin = state_cache_.begin();
802   StateSet::iterator end = state_cache_.end();
803   while (begin != end) {
804     StateSet::iterator tmp = begin;
805     ++begin;
806     // Deallocate the instruction array, which is stored separately as above.
807     std::allocator<int>().deallocate((*tmp)->inst_, (*tmp)->ninst_);
808     // Deallocate the blob of memory that we allocated in DFA::CachedState().
809     // We recompute mem in order to benefit from sized delete where possible.
810     int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
811     int mem = sizeof(State) + nnext*sizeof(std::atomic<State*>);
812     std::allocator<char>().deallocate(reinterpret_cast<char*>(*tmp), mem);
813   }
814   state_cache_.clear();
815 }
816 
817 // Copies insts in state s to the work queue q.
StateToWorkq(State * s,Workq * q)818 void DFA::StateToWorkq(State* s, Workq* q) {
819   q->clear();
820   for (int i = 0; i < s->ninst_; i++) {
821     if (s->inst_[i] == Mark) {
822       q->mark();
823     } else if (s->inst_[i] == MatchSep) {
824       // Nothing after this is an instruction!
825       break;
826     } else {
827       // Explore from the head of the list.
828       AddToQueue(q, s->inst_[i], s->flag_ & kFlagEmptyMask);
829     }
830   }
831 }
832 
833 // Adds ip to the work queue, following empty arrows according to flag.
AddToQueue(Workq * q,int id,uint32_t flag)834 void DFA::AddToQueue(Workq* q, int id, uint32_t flag) {
835 
836   // Use stack_ to hold our stack of instructions yet to process.
837   // It was preallocated as follows:
838   //   one entry per Capture;
839   //   one entry per EmptyWidth; and
840   //   one entry per Nop.
841   // This reflects the maximum number of stack pushes that each can
842   // perform. (Each instruction can be processed at most once.)
843   // When using marks, we also added nmark == prog_->size().
844   // (Otherwise, nmark == 0.)
845   int* stk = stack_.data();
846   int nstk = 0;
847 
848   stk[nstk++] = id;
849   while (nstk > 0) {
850     DCHECK_LE(nstk, stack_.size());
851     id = stk[--nstk];
852 
853   Loop:
854     if (id == Mark) {
855       q->mark();
856       continue;
857     }
858 
859     if (id == 0)
860       continue;
861 
862     // If ip is already on the queue, nothing to do.
863     // Otherwise add it.  We don't actually keep all the
864     // ones that get added, but adding all of them here
865     // increases the likelihood of q->contains(id),
866     // reducing the amount of duplicated work.
867     if (q->contains(id))
868       continue;
869     q->insert_new(id);
870 
871     // Process instruction.
872     Prog::Inst* ip = prog_->inst(id);
873     switch (ip->opcode()) {
874       default:
875         LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
876         break;
877 
878       case kInstByteRange:  // just save these on the queue
879       case kInstMatch:
880         if (ip->last())
881           break;
882         id = id+1;
883         goto Loop;
884 
885       case kInstCapture:    // DFA treats captures as no-ops.
886       case kInstNop:
887         if (!ip->last())
888           stk[nstk++] = id+1;
889 
890         // If this instruction is the [00-FF]* loop at the beginning of
891         // a leftmost-longest unanchored search, separate with a Mark so
892         // that future threads (which will start farther to the right in
893         // the input string) are lower priority than current threads.
894         if (ip->opcode() == kInstNop && q->maxmark() > 0 &&
895             id == prog_->start_unanchored() && id != prog_->start())
896           stk[nstk++] = Mark;
897         id = ip->out();
898         goto Loop;
899 
900       case kInstAltMatch:
901         DCHECK(!ip->last());
902         id = id+1;
903         goto Loop;
904 
905       case kInstEmptyWidth:
906         if (!ip->last())
907           stk[nstk++] = id+1;
908 
909         // Continue on if we have all the right flag bits.
910         if (ip->empty() & ~flag)
911           break;
912         id = ip->out();
913         goto Loop;
914     }
915   }
916 }
917 
918 // Running of work queues.  In the work queue, order matters:
919 // the queue is sorted in priority order.  If instruction i comes before j,
920 // then the instructions that i produces during the run must come before
921 // the ones that j produces.  In order to keep this invariant, all the
922 // work queue runners have to take an old queue to process and then
923 // also a new queue to fill in.  It's not acceptable to add to the end of
924 // an existing queue, because new instructions will not end up in the
925 // correct position.
926 
927 // Runs the work queue, processing the empty strings indicated by flag.
928 // For example, flag == kEmptyBeginLine|kEmptyEndLine means to match
929 // both ^ and $.  It is important that callers pass all flags at once:
930 // processing both ^ and $ is not the same as first processing only ^
931 // and then processing only $.  Doing the two-step sequence won't match
932 // ^$^$^$ but processing ^ and $ simultaneously will (and is the behavior
933 // exhibited by existing implementations).
RunWorkqOnEmptyString(Workq * oldq,Workq * newq,uint32_t flag)934 void DFA::RunWorkqOnEmptyString(Workq* oldq, Workq* newq, uint32_t flag) {
935   newq->clear();
936   for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
937     if (oldq->is_mark(*i))
938       AddToQueue(newq, Mark, flag);
939     else
940       AddToQueue(newq, *i, flag);
941   }
942 }
943 
944 // Runs the work queue, processing the single byte c followed by any empty
945 // strings indicated by flag.  For example, c == 'a' and flag == kEmptyEndLine,
946 // means to match c$.  Sets the bool *ismatch to true if the end of the
947 // regular expression program has been reached (the regexp has matched).
RunWorkqOnByte(Workq * oldq,Workq * newq,int c,uint32_t flag,bool * ismatch)948 void DFA::RunWorkqOnByte(Workq* oldq, Workq* newq,
949                          int c, uint32_t flag, bool* ismatch) {
950   //mutex_.AssertHeld();
951 
952   newq->clear();
953   for (Workq::iterator i = oldq->begin(); i != oldq->end(); ++i) {
954     if (oldq->is_mark(*i)) {
955       if (*ismatch)
956         return;
957       newq->mark();
958       continue;
959     }
960     int id = *i;
961     Prog::Inst* ip = prog_->inst(id);
962     switch (ip->opcode()) {
963       default:
964         LOG(DFATAL) << "unhandled opcode: " << ip->opcode();
965         break;
966 
967       case kInstFail:        // never succeeds
968       case kInstCapture:     // already followed
969       case kInstNop:         // already followed
970       case kInstAltMatch:    // already followed
971       case kInstEmptyWidth:  // already followed
972         break;
973 
974       case kInstByteRange:   // can follow if c is in range
975         if (!ip->Matches(c))
976           break;
977         AddToQueue(newq, ip->out(), flag);
978         if (ip->hint() != 0) {
979           // We have a hint, but we must cancel out the
980           // increment that will occur after the break.
981           i += ip->hint() - 1;
982         } else {
983           // We have no hint, so we must find the end
984           // of the current list and then skip to it.
985           Prog::Inst* ip0 = ip;
986           while (!ip->last())
987             ++ip;
988           i += ip - ip0;
989         }
990         break;
991 
992       case kInstMatch:
993         if (prog_->anchor_end() && c != kByteEndText &&
994             kind_ != Prog::kManyMatch)
995           break;
996         *ismatch = true;
997         if (kind_ == Prog::kFirstMatch) {
998           // Can stop processing work queue since we found a match.
999           return;
1000         }
1001         break;
1002     }
1003   }
1004 
1005   if (ExtraDebug)
1006     absl::FPrintF(stderr, "%s on %d[%#x] -> %s [%d]\n",
1007                   DumpWorkq(oldq), c, flag, DumpWorkq(newq), *ismatch);
1008 }
1009 
1010 // Processes input byte c in state, returning new state.
1011 // Caller does not hold mutex.
RunStateOnByteUnlocked(State * state,int c)1012 DFA::State* DFA::RunStateOnByteUnlocked(State* state, int c) {
1013   // Keep only one RunStateOnByte going
1014   // even if the DFA is being run by multiple threads.
1015   absl::MutexLock l(&mutex_);
1016   return RunStateOnByte(state, c);
1017 }
1018 
1019 // Processes input byte c in state, returning new state.
RunStateOnByte(State * state,int c)1020 DFA::State* DFA::RunStateOnByte(State* state, int c) {
1021   //mutex_.AssertHeld();
1022 
1023   if (state <= SpecialStateMax) {
1024     if (state == FullMatchState) {
1025       // It is convenient for routines like PossibleMatchRange
1026       // if we implement RunStateOnByte for FullMatchState:
1027       // once you get into this state you never get out,
1028       // so it's pretty easy.
1029       return FullMatchState;
1030     }
1031     if (state == DeadState) {
1032       LOG(DFATAL) << "DeadState in RunStateOnByte";
1033       return NULL;
1034     }
1035     if (state == NULL) {
1036       LOG(DFATAL) << "NULL state in RunStateOnByte";
1037       return NULL;
1038     }
1039     LOG(DFATAL) << "Unexpected special state in RunStateOnByte";
1040     return NULL;
1041   }
1042 
1043   // If someone else already computed this, return it.
1044   State* ns = state->next_[ByteMap(c)].load(std::memory_order_relaxed);
1045   if (ns != NULL)
1046     return ns;
1047 
1048   // Convert state into Workq.
1049   StateToWorkq(state, q0_);
1050 
1051   // Flags marking the kinds of empty-width things (^ $ etc)
1052   // around this byte.  Before the byte we have the flags recorded
1053   // in the State structure itself.  After the byte we have
1054   // nothing yet (but that will change: read on).
1055   uint32_t needflag = state->flag_ >> kFlagNeedShift;
1056   uint32_t beforeflag = state->flag_ & kFlagEmptyMask;
1057   uint32_t oldbeforeflag = beforeflag;
1058   uint32_t afterflag = 0;
1059 
1060   if (c == '\n') {
1061     // Insert implicit $ and ^ around \n
1062     beforeflag |= kEmptyEndLine;
1063     afterflag |= kEmptyBeginLine;
1064   }
1065 
1066   if (c == kByteEndText) {
1067     // Insert implicit $ and \z before the fake "end text" byte.
1068     beforeflag |= kEmptyEndLine | kEmptyEndText;
1069   }
1070 
1071   // The state flag kFlagLastWord says whether the last
1072   // byte processed was a word character.  Use that info to
1073   // insert empty-width (non-)word boundaries.
1074   bool islastword = (state->flag_ & kFlagLastWord) != 0;
1075   bool isword = c != kByteEndText && Prog::IsWordChar(static_cast<uint8_t>(c));
1076   if (isword == islastword)
1077     beforeflag |= kEmptyNonWordBoundary;
1078   else
1079     beforeflag |= kEmptyWordBoundary;
1080 
1081   // Okay, finally ready to run.
1082   // Only useful to rerun on empty string if there are new, useful flags.
1083   if (beforeflag & ~oldbeforeflag & needflag) {
1084     RunWorkqOnEmptyString(q0_, q1_, beforeflag);
1085     using std::swap;
1086     swap(q0_, q1_);
1087   }
1088   bool ismatch = false;
1089   RunWorkqOnByte(q0_, q1_, c, afterflag, &ismatch);
1090   using std::swap;
1091   swap(q0_, q1_);
1092 
1093   // Save afterflag along with ismatch and isword in new state.
1094   uint32_t flag = afterflag;
1095   if (ismatch)
1096     flag |= kFlagMatch;
1097   if (isword)
1098     flag |= kFlagLastWord;
1099 
1100   if (ismatch && kind_ == Prog::kManyMatch)
1101     ns = WorkqToCachedState(q0_, q1_, flag);
1102   else
1103     ns = WorkqToCachedState(q0_, NULL, flag);
1104 
1105   // Flush ns before linking to it.
1106   // Write barrier before updating state->next_ so that the
1107   // main search loop can proceed without any locking, for speed.
1108   // (Otherwise it would need one mutex operation per input byte.)
1109   state->next_[ByteMap(c)].store(ns, std::memory_order_release);
1110   return ns;
1111 }
1112 
1113 
1114 //////////////////////////////////////////////////////////////////////
1115 // DFA cache reset.
1116 
1117 // Reader-writer lock helper.
1118 //
1119 // The DFA uses a reader-writer mutex to protect the state graph itself.
1120 // Traversing the state graph requires holding the mutex for reading,
1121 // and discarding the state graph and starting over requires holding the
1122 // lock for writing.  If a search needs to expand the graph but is out
1123 // of memory, it will need to drop its read lock and then acquire the
1124 // write lock.  Since it cannot then atomically downgrade from write lock
1125 // to read lock, it runs the rest of the search holding the write lock.
1126 // (This probably helps avoid repeated contention, but really the decision
1127 // is forced by the Mutex interface.)  It's a bit complicated to keep
1128 // track of whether the lock is held for reading or writing and thread
1129 // that through the search, so instead we encapsulate it in the RWLocker
1130 // and pass that around.
1131 
1132 class DFA::RWLocker {
1133  public:
1134   explicit RWLocker(CacheMutex* mu);
1135   ~RWLocker();
1136 
1137   // If the lock is only held for reading right now,
1138   // drop the read lock and re-acquire for writing.
1139   // Subsequent calls to LockForWriting are no-ops.
1140   // Notice that the lock is *released* temporarily.
1141   void LockForWriting();
1142 
1143  private:
1144   CacheMutex* mu_;
1145   bool writing_;
1146 
1147   RWLocker(const RWLocker&) = delete;
1148   RWLocker& operator=(const RWLocker&) = delete;
1149 };
1150 
RWLocker(CacheMutex * mu)1151 DFA::RWLocker::RWLocker(CacheMutex* mu) : mu_(mu), writing_(false) {
1152   mu_->ReaderLock();
1153 }
1154 
1155 // This function is marked as ABSL_NO_THREAD_SAFETY_ANALYSIS because
1156 // the annotations don't support lock upgrade.
LockForWriting()1157 void DFA::RWLocker::LockForWriting() ABSL_NO_THREAD_SAFETY_ANALYSIS {
1158   if (!writing_) {
1159     mu_->ReaderUnlock();
1160     mu_->WriterLock();
1161     writing_ = true;
1162   }
1163 }
1164 
~RWLocker()1165 DFA::RWLocker::~RWLocker() {
1166   if (!writing_)
1167     mu_->ReaderUnlock();
1168   else
1169     mu_->WriterUnlock();
1170 }
1171 
1172 
1173 // When the DFA's State cache fills, we discard all the states in the
1174 // cache and start over.  Many threads can be using and adding to the
1175 // cache at the same time, so we synchronize using the cache_mutex_
1176 // to keep from stepping on other threads.  Specifically, all the
1177 // threads using the current cache hold cache_mutex_ for reading.
1178 // When a thread decides to flush the cache, it drops cache_mutex_
1179 // and then re-acquires it for writing.  That ensures there are no
1180 // other threads accessing the cache anymore.  The rest of the search
1181 // runs holding cache_mutex_ for writing, avoiding any contention
1182 // with or cache pollution caused by other threads.
1183 
ResetCache(RWLocker * cache_lock)1184 void DFA::ResetCache(RWLocker* cache_lock) {
1185   // Re-acquire the cache_mutex_ for writing (exclusive use).
1186   cache_lock->LockForWriting();
1187 
1188   hooks::GetDFAStateCacheResetHook()({
1189       state_budget_,
1190       state_cache_.size(),
1191   });
1192 
1193   // Clear the cache, reset the memory budget.
1194   for (int i = 0; i < kMaxStart; i++)
1195     start_[i].start.store(NULL, std::memory_order_relaxed);
1196   ClearCache();
1197   mem_budget_ = state_budget_;
1198 }
1199 
1200 // Typically, a couple States do need to be preserved across a cache
1201 // reset, like the State at the current point in the search.
1202 // The StateSaver class helps keep States across cache resets.
1203 // It makes a copy of the state's guts outside the cache (before the reset)
1204 // and then can be asked, after the reset, to recreate the State
1205 // in the new cache.  For example, in a DFA method ("this" is a DFA):
1206 //
1207 //   StateSaver saver(this, s);
1208 //   ResetCache(cache_lock);
1209 //   s = saver.Restore();
1210 //
1211 // The saver should always have room in the cache to re-create the state,
1212 // because resetting the cache locks out all other threads, and the cache
1213 // is known to have room for at least a couple states (otherwise the DFA
1214 // constructor fails).
1215 
1216 class DFA::StateSaver {
1217  public:
1218   explicit StateSaver(DFA* dfa, State* state);
1219   ~StateSaver();
1220 
1221   // Recreates and returns a state equivalent to the
1222   // original state passed to the constructor.
1223   // Returns NULL if the cache has filled, but
1224   // since the DFA guarantees to have room in the cache
1225   // for a couple states, should never return NULL
1226   // if used right after ResetCache.
1227   State* Restore();
1228 
1229  private:
1230   DFA* dfa_;         // the DFA to use
1231   int* inst_;        // saved info from State
1232   int ninst_;
1233   uint32_t flag_;
1234   bool is_special_;  // whether original state was special
1235   State* special_;   // if is_special_, the original state
1236 
1237   StateSaver(const StateSaver&) = delete;
1238   StateSaver& operator=(const StateSaver&) = delete;
1239 };
1240 
StateSaver(DFA * dfa,State * state)1241 DFA::StateSaver::StateSaver(DFA* dfa, State* state) {
1242   dfa_ = dfa;
1243   if (state <= SpecialStateMax) {
1244     inst_ = NULL;
1245     ninst_ = 0;
1246     flag_ = 0;
1247     is_special_ = true;
1248     special_ = state;
1249     return;
1250   }
1251   is_special_ = false;
1252   special_ = NULL;
1253   flag_ = state->flag_;
1254   ninst_ = state->ninst_;
1255   inst_ = new int[ninst_];
1256   memmove(inst_, state->inst_, ninst_*sizeof inst_[0]);
1257 }
1258 
~StateSaver()1259 DFA::StateSaver::~StateSaver() {
1260   if (!is_special_)
1261     delete[] inst_;
1262 }
1263 
Restore()1264 DFA::State* DFA::StateSaver::Restore() {
1265   if (is_special_)
1266     return special_;
1267   absl::MutexLock l(&dfa_->mutex_);
1268   State* s = dfa_->CachedState(inst_, ninst_, flag_);
1269   if (s == NULL)
1270     LOG(DFATAL) << "StateSaver failed to restore state.";
1271   return s;
1272 }
1273 
1274 
1275 //////////////////////////////////////////////////////////////////////
1276 //
1277 // DFA execution.
1278 //
1279 // The basic search loop is easy: start in a state s and then for each
1280 // byte c in the input, s = s->next[c].
1281 //
1282 // This simple description omits a few efficiency-driven complications.
1283 //
1284 // First, the State graph is constructed incrementally: it is possible
1285 // that s->next[c] is null, indicating that that state has not been
1286 // fully explored.  In this case, RunStateOnByte must be invoked to
1287 // determine the next state, which is cached in s->next[c] to save
1288 // future effort.  An alternative reason for s->next[c] to be null is
1289 // that the DFA has reached a so-called "dead state", in which any match
1290 // is no longer possible.  In this case RunStateOnByte will return NULL
1291 // and the processing of the string can stop early.
1292 //
1293 // Second, a 256-element pointer array for s->next_ makes each State
1294 // quite large (2kB on 64-bit machines).  Instead, dfa->bytemap_[]
1295 // maps from bytes to "byte classes" and then next_ only needs to have
1296 // as many pointers as there are byte classes.  A byte class is simply a
1297 // range of bytes that the regexp never distinguishes between.
1298 // A regexp looking for a[abc] would have four byte ranges -- 0 to 'a'-1,
1299 // 'a', 'b' to 'c', and 'c' to 0xFF.  The bytemap slows us a little bit
1300 // but in exchange we typically cut the size of a State (and thus our
1301 // memory footprint) by about 5-10x.  The comments still refer to
1302 // s->next[c] for simplicity, but code should refer to s->next_[bytemap_[c]].
1303 //
1304 // Third, it is common for a DFA for an unanchored match to begin in a
1305 // state in which only one particular byte value can take the DFA to a
1306 // different state.  That is, s->next[c] != s for only one c.  In this
1307 // situation, the DFA can do better than executing the simple loop.
1308 // Instead, it can call memchr to search very quickly for the byte c.
1309 // Whether the start state has this property is determined during a
1310 // pre-compilation pass and the "can_prefix_accel" argument is set.
1311 //
1312 // Fourth, the desired behavior is to search for the leftmost-best match
1313 // (approximately, the same one that Perl would find), which is not
1314 // necessarily the match ending earliest in the string.  Each time a
1315 // match is found, it must be noted, but the DFA must continue on in
1316 // hope of finding a higher-priority match.  In some cases, the caller only
1317 // cares whether there is any match at all, not which one is found.
1318 // The "want_earliest_match" flag causes the search to stop at the first
1319 // match found.
1320 //
1321 // Fifth, one algorithm that uses the DFA needs it to run over the
1322 // input string backward, beginning at the end and ending at the beginning.
1323 // Passing false for the "run_forward" flag causes the DFA to run backward.
1324 //
1325 // The checks for these last three cases, which in a naive implementation
1326 // would be performed once per input byte, slow the general loop enough
1327 // to merit specialized versions of the search loop for each of the
1328 // eight possible settings of the three booleans.  Rather than write
1329 // eight different functions, we write one general implementation and then
1330 // inline it to create the specialized ones.
1331 //
1332 // Note that matches are delayed by one byte, to make it easier to
1333 // accomodate match conditions depending on the next input byte (like $ and \b).
1334 // When s->next[c]->IsMatch(), it means that there is a match ending just
1335 // *before* byte c.
1336 
1337 // The generic search loop.  Searches text for a match, returning
1338 // the pointer to the end of the chosen match, or NULL if no match.
1339 // The bools are equal to the same-named variables in params, but
1340 // making them function arguments lets the inliner specialize
1341 // this function to each combination (see two paragraphs above).
1342 template <bool can_prefix_accel,
1343           bool want_earliest_match,
1344           bool run_forward>
InlinedSearchLoop(SearchParams * params)1345 inline bool DFA::InlinedSearchLoop(SearchParams* params) {
1346   State* start = params->start;
1347   const uint8_t* bp = BytePtr(params->text.data());  // start of text
1348   const uint8_t* p = bp;                             // text scanning point
1349   const uint8_t* ep = BytePtr(params->text.data() +
1350                               params->text.size());  // end of text
1351   const uint8_t* resetp = NULL;                      // p at last cache reset
1352   if (!run_forward) {
1353     using std::swap;
1354     swap(p, ep);
1355   }
1356 
1357   const uint8_t* bytemap = prog_->bytemap();
1358   const uint8_t* lastmatch = NULL;   // most recent matching position in text
1359   bool matched = false;
1360 
1361   State* s = start;
1362   if (ExtraDebug)
1363     absl::FPrintF(stderr, "@stx: %s\n", DumpState(s));
1364 
1365   if (s->IsMatch()) {
1366     matched = true;
1367     lastmatch = p;
1368     if (ExtraDebug)
1369       absl::FPrintF(stderr, "match @stx! [%s]\n", DumpState(s));
1370     if (params->matches != NULL) {
1371       for (int i = s->ninst_ - 1; i >= 0; i--) {
1372         int id = s->inst_[i];
1373         if (id == MatchSep)
1374           break;
1375         params->matches->insert(id);
1376       }
1377     }
1378     if (want_earliest_match) {
1379       params->ep = reinterpret_cast<const char*>(lastmatch);
1380       return true;
1381     }
1382   }
1383 
1384   while (p != ep) {
1385     if (ExtraDebug)
1386       absl::FPrintF(stderr, "@%d: %s\n", p - bp, DumpState(s));
1387 
1388     if (can_prefix_accel && s == start) {
1389       // In start state, only way out is to find the prefix,
1390       // so we use prefix accel (e.g. memchr) to skip ahead.
1391       // If not found, we can skip to the end of the string.
1392       p = BytePtr(prog_->PrefixAccel(p, ep - p));
1393       if (p == NULL) {
1394         p = ep;
1395         break;
1396       }
1397     }
1398 
1399     int c;
1400     if (run_forward)
1401       c = *p++;
1402     else
1403       c = *--p;
1404 
1405     // Note that multiple threads might be consulting
1406     // s->next_[bytemap[c]] simultaneously.
1407     // RunStateOnByte takes care of the appropriate locking,
1408     // including a memory barrier so that the unlocked access
1409     // (sometimes known as "double-checked locking") is safe.
1410     // The alternative would be either one DFA per thread
1411     // or one mutex operation per input byte.
1412     //
1413     // ns == DeadState means the state is known to be dead
1414     // (no more matches are possible).
1415     // ns == NULL means the state has not yet been computed
1416     // (need to call RunStateOnByteUnlocked).
1417     // RunStateOnByte returns ns == NULL if it is out of memory.
1418     // ns == FullMatchState means the rest of the string matches.
1419     //
1420     // Okay to use bytemap[] not ByteMap() here, because
1421     // c is known to be an actual byte and not kByteEndText.
1422 
1423     State* ns = s->next_[bytemap[c]].load(std::memory_order_acquire);
1424     if (ns == NULL) {
1425       ns = RunStateOnByteUnlocked(s, c);
1426       if (ns == NULL) {
1427         // After we reset the cache, we hold cache_mutex exclusively,
1428         // so if resetp != NULL, it means we filled the DFA state
1429         // cache with this search alone (without any other threads).
1430         // Benchmarks show that doing a state computation on every
1431         // byte runs at about 0.2 MB/s, while the NFA (nfa.cc) can do the
1432         // same at about 2 MB/s.  Unless we're processing an average
1433         // of 10 bytes per state computation, fail so that RE2 can
1434         // fall back to the NFA.  However, RE2::Set cannot fall back,
1435         // so we just have to keep on keeping on in that case.
1436         if (dfa_should_bail_when_slow && resetp != NULL &&
1437             static_cast<size_t>(p - resetp) < 10*state_cache_.size() &&
1438             kind_ != Prog::kManyMatch) {
1439           params->failed = true;
1440           return false;
1441         }
1442         resetp = p;
1443 
1444         // Prepare to save start and s across the reset.
1445         StateSaver save_start(this, start);
1446         StateSaver save_s(this, s);
1447 
1448         // Discard all the States in the cache.
1449         ResetCache(params->cache_lock);
1450 
1451         // Restore start and s so we can continue.
1452         if ((start = save_start.Restore()) == NULL ||
1453             (s = save_s.Restore()) == NULL) {
1454           // Restore already did LOG(DFATAL).
1455           params->failed = true;
1456           return false;
1457         }
1458         ns = RunStateOnByteUnlocked(s, c);
1459         if (ns == NULL) {
1460           LOG(DFATAL) << "RunStateOnByteUnlocked failed after ResetCache";
1461           params->failed = true;
1462           return false;
1463         }
1464       }
1465     }
1466     if (ns <= SpecialStateMax) {
1467       if (ns == DeadState) {
1468         params->ep = reinterpret_cast<const char*>(lastmatch);
1469         return matched;
1470       }
1471       // FullMatchState
1472       params->ep = reinterpret_cast<const char*>(ep);
1473       return true;
1474     }
1475 
1476     s = ns;
1477     if (s->IsMatch()) {
1478       matched = true;
1479       // The DFA notices the match one byte late,
1480       // so adjust p before using it in the match.
1481       if (run_forward)
1482         lastmatch = p - 1;
1483       else
1484         lastmatch = p + 1;
1485       if (ExtraDebug)
1486         absl::FPrintF(stderr, "match @%d! [%s]\n", lastmatch - bp, DumpState(s));
1487       if (params->matches != NULL) {
1488         for (int i = s->ninst_ - 1; i >= 0; i--) {
1489           int id = s->inst_[i];
1490           if (id == MatchSep)
1491             break;
1492           params->matches->insert(id);
1493         }
1494       }
1495       if (want_earliest_match) {
1496         params->ep = reinterpret_cast<const char*>(lastmatch);
1497         return true;
1498       }
1499     }
1500   }
1501 
1502   // Process one more byte to see if it triggers a match.
1503   // (Remember, matches are delayed one byte.)
1504   if (ExtraDebug)
1505     absl::FPrintF(stderr, "@etx: %s\n", DumpState(s));
1506 
1507   int lastbyte;
1508   if (run_forward) {
1509     if (EndPtr(params->text) == EndPtr(params->context))
1510       lastbyte = kByteEndText;
1511     else
1512       lastbyte = EndPtr(params->text)[0] & 0xFF;
1513   } else {
1514     if (BeginPtr(params->text) == BeginPtr(params->context))
1515       lastbyte = kByteEndText;
1516     else
1517       lastbyte = BeginPtr(params->text)[-1] & 0xFF;
1518   }
1519 
1520   State* ns = s->next_[ByteMap(lastbyte)].load(std::memory_order_acquire);
1521   if (ns == NULL) {
1522     ns = RunStateOnByteUnlocked(s, lastbyte);
1523     if (ns == NULL) {
1524       StateSaver save_s(this, s);
1525       ResetCache(params->cache_lock);
1526       if ((s = save_s.Restore()) == NULL) {
1527         params->failed = true;
1528         return false;
1529       }
1530       ns = RunStateOnByteUnlocked(s, lastbyte);
1531       if (ns == NULL) {
1532         LOG(DFATAL) << "RunStateOnByteUnlocked failed after Reset";
1533         params->failed = true;
1534         return false;
1535       }
1536     }
1537   }
1538   if (ns <= SpecialStateMax) {
1539     if (ns == DeadState) {
1540       params->ep = reinterpret_cast<const char*>(lastmatch);
1541       return matched;
1542     }
1543     // FullMatchState
1544     params->ep = reinterpret_cast<const char*>(ep);
1545     return true;
1546   }
1547 
1548   s = ns;
1549   if (s->IsMatch()) {
1550     matched = true;
1551     lastmatch = p;
1552     if (ExtraDebug)
1553       absl::FPrintF(stderr, "match @etx! [%s]\n", DumpState(s));
1554     if (params->matches != NULL) {
1555       for (int i = s->ninst_ - 1; i >= 0; i--) {
1556         int id = s->inst_[i];
1557         if (id == MatchSep)
1558           break;
1559         params->matches->insert(id);
1560       }
1561     }
1562   }
1563 
1564   params->ep = reinterpret_cast<const char*>(lastmatch);
1565   return matched;
1566 }
1567 
1568 // Inline specializations of the general loop.
SearchFFF(SearchParams * params)1569 bool DFA::SearchFFF(SearchParams* params) {
1570   return InlinedSearchLoop<false, false, false>(params);
1571 }
SearchFFT(SearchParams * params)1572 bool DFA::SearchFFT(SearchParams* params) {
1573   return InlinedSearchLoop<false, false, true>(params);
1574 }
SearchFTF(SearchParams * params)1575 bool DFA::SearchFTF(SearchParams* params) {
1576   return InlinedSearchLoop<false, true, false>(params);
1577 }
SearchFTT(SearchParams * params)1578 bool DFA::SearchFTT(SearchParams* params) {
1579   return InlinedSearchLoop<false, true, true>(params);
1580 }
SearchTFF(SearchParams * params)1581 bool DFA::SearchTFF(SearchParams* params) {
1582   return InlinedSearchLoop<true, false, false>(params);
1583 }
SearchTFT(SearchParams * params)1584 bool DFA::SearchTFT(SearchParams* params) {
1585   return InlinedSearchLoop<true, false, true>(params);
1586 }
SearchTTF(SearchParams * params)1587 bool DFA::SearchTTF(SearchParams* params) {
1588   return InlinedSearchLoop<true, true, false>(params);
1589 }
SearchTTT(SearchParams * params)1590 bool DFA::SearchTTT(SearchParams* params) {
1591   return InlinedSearchLoop<true, true, true>(params);
1592 }
1593 
1594 // For performance, calls the appropriate specialized version
1595 // of InlinedSearchLoop.
FastSearchLoop(SearchParams * params)1596 bool DFA::FastSearchLoop(SearchParams* params) {
1597   // Because the methods are private, the Searches array
1598   // cannot be declared at top level.
1599   static bool (DFA::*Searches[])(SearchParams*) = {
1600     &DFA::SearchFFF,
1601     &DFA::SearchFFT,
1602     &DFA::SearchFTF,
1603     &DFA::SearchFTT,
1604     &DFA::SearchTFF,
1605     &DFA::SearchTFT,
1606     &DFA::SearchTTF,
1607     &DFA::SearchTTT,
1608   };
1609 
1610   int index = 4 * params->can_prefix_accel +
1611               2 * params->want_earliest_match +
1612               1 * params->run_forward;
1613   return (this->*Searches[index])(params);
1614 }
1615 
1616 
1617 // The discussion of DFA execution above ignored the question of how
1618 // to determine the initial state for the search loop.  There are two
1619 // factors that influence the choice of start state.
1620 //
1621 // The first factor is whether the search is anchored or not.
1622 // The regexp program (Prog*) itself has
1623 // two different entry points: one for anchored searches and one for
1624 // unanchored searches.  (The unanchored version starts with a leading ".*?"
1625 // and then jumps to the anchored one.)
1626 //
1627 // The second factor is where text appears in the larger context, which
1628 // determines which empty-string operators can be matched at the beginning
1629 // of execution.  If text is at the very beginning of context, \A and ^ match.
1630 // Otherwise if text is at the beginning of a line, then ^ matches.
1631 // Otherwise it matters whether the character before text is a word character
1632 // or a non-word character.
1633 //
1634 // The two cases (unanchored vs not) and four cases (empty-string flags)
1635 // combine to make the eight cases recorded in the DFA's begin_text_[2],
1636 // begin_line_[2], after_wordchar_[2], and after_nonwordchar_[2] cached
1637 // StartInfos.  The start state for each is filled in the first time it
1638 // is used for an actual search.
1639 
1640 // Examines text, context, and anchored to determine the right start
1641 // state for the DFA search loop.  Fills in params and returns true on success.
1642 // Returns false on failure.
AnalyzeSearch(SearchParams * params)1643 bool DFA::AnalyzeSearch(SearchParams* params) {
1644   absl::string_view text = params->text;
1645   absl::string_view context = params->context;
1646 
1647   // Sanity check: make sure that text lies within context.
1648   if (BeginPtr(text) < BeginPtr(context) || EndPtr(text) > EndPtr(context)) {
1649     LOG(DFATAL) << "context does not contain text";
1650     params->start = DeadState;
1651     return true;
1652   }
1653 
1654   // Determine correct search type.
1655   int start;
1656   uint32_t flags;
1657   if (params->run_forward) {
1658     if (BeginPtr(text) == BeginPtr(context)) {
1659       start = kStartBeginText;
1660       flags = kEmptyBeginText|kEmptyBeginLine;
1661     } else if (BeginPtr(text)[-1] == '\n') {
1662       start = kStartBeginLine;
1663       flags = kEmptyBeginLine;
1664     } else if (Prog::IsWordChar(BeginPtr(text)[-1] & 0xFF)) {
1665       start = kStartAfterWordChar;
1666       flags = kFlagLastWord;
1667     } else {
1668       start = kStartAfterNonWordChar;
1669       flags = 0;
1670     }
1671   } else {
1672     if (EndPtr(text) == EndPtr(context)) {
1673       start = kStartBeginText;
1674       flags = kEmptyBeginText|kEmptyBeginLine;
1675     } else if (EndPtr(text)[0] == '\n') {
1676       start = kStartBeginLine;
1677       flags = kEmptyBeginLine;
1678     } else if (Prog::IsWordChar(EndPtr(text)[0] & 0xFF)) {
1679       start = kStartAfterWordChar;
1680       flags = kFlagLastWord;
1681     } else {
1682       start = kStartAfterNonWordChar;
1683       flags = 0;
1684     }
1685   }
1686   if (params->anchored)
1687     start |= kStartAnchored;
1688   StartInfo* info = &start_[start];
1689 
1690   // Try once without cache_lock for writing.
1691   // Try again after resetting the cache
1692   // (ResetCache will relock cache_lock for writing).
1693   if (!AnalyzeSearchHelper(params, info, flags)) {
1694     ResetCache(params->cache_lock);
1695     if (!AnalyzeSearchHelper(params, info, flags)) {
1696       params->failed = true;
1697       LOG(DFATAL) << "Failed to analyze start state.";
1698       return false;
1699     }
1700   }
1701 
1702   params->start = info->start.load(std::memory_order_acquire);
1703 
1704   // Even if we could prefix accel, we cannot do so when anchored and,
1705   // less obviously, we cannot do so when we are going to need flags.
1706   // This trick works only when there is a single byte that leads to a
1707   // different state!
1708   if (prog_->can_prefix_accel() &&
1709       !params->anchored &&
1710       params->start > SpecialStateMax &&
1711       params->start->flag_ >> kFlagNeedShift == 0)
1712     params->can_prefix_accel = true;
1713 
1714   if (ExtraDebug)
1715     absl::FPrintF(stderr, "anchored=%d fwd=%d flags=%#x state=%s can_prefix_accel=%d\n",
1716                   params->anchored, params->run_forward, flags,
1717                   DumpState(params->start), params->can_prefix_accel);
1718 
1719   return true;
1720 }
1721 
1722 // Fills in info if needed.  Returns true on success, false on failure.
AnalyzeSearchHelper(SearchParams * params,StartInfo * info,uint32_t flags)1723 bool DFA::AnalyzeSearchHelper(SearchParams* params, StartInfo* info,
1724                               uint32_t flags) {
1725   // Quick check.
1726   State* start = info->start.load(std::memory_order_acquire);
1727   if (start != NULL)
1728     return true;
1729 
1730   absl::MutexLock l(&mutex_);
1731   start = info->start.load(std::memory_order_relaxed);
1732   if (start != NULL)
1733     return true;
1734 
1735   q0_->clear();
1736   AddToQueue(q0_,
1737              params->anchored ? prog_->start() : prog_->start_unanchored(),
1738              flags);
1739   start = WorkqToCachedState(q0_, NULL, flags);
1740   if (start == NULL)
1741     return false;
1742 
1743   // Synchronize with "quick check" above.
1744   info->start.store(start, std::memory_order_release);
1745   return true;
1746 }
1747 
1748 // The actual DFA search: calls AnalyzeSearch and then FastSearchLoop.
Search(absl::string_view text,absl::string_view context,bool anchored,bool want_earliest_match,bool run_forward,bool * failed,const char ** epp,SparseSet * matches)1749 bool DFA::Search(absl::string_view text, absl::string_view context,
1750                  bool anchored, bool want_earliest_match, bool run_forward,
1751                  bool* failed, const char** epp, SparseSet* matches) {
1752   *epp = NULL;
1753   if (!ok()) {
1754     *failed = true;
1755     return false;
1756   }
1757   *failed = false;
1758 
1759   if (ExtraDebug) {
1760     absl::FPrintF(stderr, "\nprogram:\n%s\n", prog_->DumpUnanchored());
1761     absl::FPrintF(stderr, "text %s anchored=%d earliest=%d fwd=%d kind %d\n",
1762                   text, anchored, want_earliest_match, run_forward, kind_);
1763   }
1764 
1765   RWLocker l(&cache_mutex_);
1766   SearchParams params(text, context, &l);
1767   params.anchored = anchored;
1768   params.want_earliest_match = want_earliest_match;
1769   params.run_forward = run_forward;
1770   // matches should be null except when using RE2::Set.
1771   DCHECK(matches == NULL || kind_ == Prog::kManyMatch);
1772   params.matches = matches;
1773 
1774   if (!AnalyzeSearch(&params)) {
1775     *failed = true;
1776     return false;
1777   }
1778   if (params.start == DeadState)
1779     return false;
1780   if (params.start == FullMatchState) {
1781     if (run_forward == want_earliest_match)
1782       *epp = text.data();
1783     else
1784       *epp = text.data() + text.size();
1785     return true;
1786   }
1787   if (ExtraDebug)
1788     absl::FPrintF(stderr, "start %s\n", DumpState(params.start));
1789   bool ret = FastSearchLoop(&params);
1790   if (params.failed) {
1791     *failed = true;
1792     return false;
1793   }
1794   *epp = params.ep;
1795   return ret;
1796 }
1797 
GetDFA(MatchKind kind)1798 DFA* Prog::GetDFA(MatchKind kind) {
1799   // For a forward DFA, half the memory goes to each DFA.
1800   // However, if it is a "many match" DFA, then there is
1801   // no counterpart with which the memory must be shared.
1802   //
1803   // For a reverse DFA, all the memory goes to the
1804   // "longest match" DFA, because RE2 never does reverse
1805   // "first match" searches.
1806   if (kind == kFirstMatch) {
1807     absl::call_once(dfa_first_once_, [](Prog* prog) {
1808       prog->dfa_first_ = new DFA(prog, kFirstMatch, prog->dfa_mem_ / 2);
1809     }, this);
1810     return dfa_first_;
1811   } else if (kind == kManyMatch) {
1812     absl::call_once(dfa_first_once_, [](Prog* prog) {
1813       prog->dfa_first_ = new DFA(prog, kManyMatch, prog->dfa_mem_);
1814     }, this);
1815     return dfa_first_;
1816   } else {
1817     absl::call_once(dfa_longest_once_, [](Prog* prog) {
1818       if (!prog->reversed_)
1819         prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_ / 2);
1820       else
1821         prog->dfa_longest_ = new DFA(prog, kLongestMatch, prog->dfa_mem_);
1822     }, this);
1823     return dfa_longest_;
1824   }
1825 }
1826 
DeleteDFA(DFA * dfa)1827 void Prog::DeleteDFA(DFA* dfa) {
1828   delete dfa;
1829 }
1830 
1831 // Executes the regexp program to search in text,
1832 // which itself is inside the larger context.  (As a convenience,
1833 // passing a NULL context is equivalent to passing text.)
1834 // Returns true if a match is found, false if not.
1835 // If a match is found, fills in match0->end() to point at the end of the match
1836 // and sets match0->begin() to text.begin(), since the DFA can't track
1837 // where the match actually began.
1838 //
1839 // This is the only external interface (class DFA only exists in this file).
1840 //
SearchDFA(absl::string_view text,absl::string_view context,Anchor anchor,MatchKind kind,absl::string_view * match0,bool * failed,SparseSet * matches)1841 bool Prog::SearchDFA(absl::string_view text, absl::string_view context,
1842                      Anchor anchor, MatchKind kind, absl::string_view* match0,
1843                      bool* failed, SparseSet* matches) {
1844   *failed = false;
1845 
1846   if (context.data() == NULL)
1847     context = text;
1848   bool caret = anchor_start();
1849   bool dollar = anchor_end();
1850   if (reversed_) {
1851     using std::swap;
1852     swap(caret, dollar);
1853   }
1854   if (caret && BeginPtr(context) != BeginPtr(text))
1855     return false;
1856   if (dollar && EndPtr(context) != EndPtr(text))
1857     return false;
1858 
1859   // Handle full match by running an anchored longest match
1860   // and then checking if it covers all of text.
1861   bool anchored = anchor == kAnchored || anchor_start() || kind == kFullMatch;
1862   bool endmatch = false;
1863   if (kind == kManyMatch) {
1864     // This is split out in order to avoid clobbering kind.
1865   } else if (kind == kFullMatch || anchor_end()) {
1866     endmatch = true;
1867     kind = kLongestMatch;
1868   }
1869 
1870   // If the caller doesn't care where the match is (just whether one exists),
1871   // then we can stop at the very first match we find, the so-called
1872   // "earliest match".
1873   bool want_earliest_match = false;
1874   if (kind == kManyMatch) {
1875     // This is split out in order to avoid clobbering kind.
1876     if (matches == NULL) {
1877       want_earliest_match = true;
1878     }
1879   } else if (match0 == NULL && !endmatch) {
1880     want_earliest_match = true;
1881     kind = kLongestMatch;
1882   }
1883 
1884   DFA* dfa = GetDFA(kind);
1885   const char* ep;
1886   bool matched = dfa->Search(text, context, anchored,
1887                              want_earliest_match, !reversed_,
1888                              failed, &ep, matches);
1889   if (*failed) {
1890     hooks::GetDFASearchFailureHook()({
1891         // Nothing yet...
1892     });
1893     return false;
1894   }
1895   if (!matched)
1896     return false;
1897   if (endmatch && ep != (reversed_ ? text.data() : text.data() + text.size()))
1898     return false;
1899 
1900   // If caller cares, record the boundary of the match.
1901   // We only know where it ends, so use the boundary of text
1902   // as the beginning.
1903   if (match0) {
1904     if (reversed_)
1905       *match0 =
1906           absl::string_view(ep, static_cast<size_t>(text.data() + text.size() - ep));
1907     else
1908       *match0 =
1909           absl::string_view(text.data(), static_cast<size_t>(ep - text.data()));
1910   }
1911   return true;
1912 }
1913 
1914 // Build out all states in DFA.  Returns number of states.
BuildAllStates(const Prog::DFAStateCallback & cb)1915 int DFA::BuildAllStates(const Prog::DFAStateCallback& cb) {
1916   if (!ok())
1917     return 0;
1918 
1919   // Pick out start state for unanchored search
1920   // at beginning of text.
1921   RWLocker l(&cache_mutex_);
1922   SearchParams params(absl::string_view(), absl::string_view(), &l);
1923   params.anchored = false;
1924   if (!AnalyzeSearch(&params) ||
1925       params.start == NULL ||
1926       params.start == DeadState)
1927     return 0;
1928 
1929   // Add start state to work queue.
1930   // Note that any State* that we handle here must point into the cache,
1931   // so we can simply depend on pointer-as-a-number hashing and equality.
1932   absl::flat_hash_map<State*, int> m;
1933   std::deque<State*> q;
1934   m.emplace(params.start, static_cast<int>(m.size()));
1935   q.push_back(params.start);
1936 
1937   // Compute the input bytes needed to cover all of the next pointers.
1938   int nnext = prog_->bytemap_range() + 1;  // + 1 for kByteEndText slot
1939   std::vector<int> input(nnext);
1940   for (int c = 0; c < 256; c++) {
1941     int b = prog_->bytemap()[c];
1942     while (c < 256-1 && prog_->bytemap()[c+1] == b)
1943       c++;
1944     input[b] = c;
1945   }
1946   input[prog_->bytemap_range()] = kByteEndText;
1947 
1948   // Scratch space for the output.
1949   std::vector<int> output(nnext);
1950 
1951   // Flood to expand every state.
1952   bool oom = false;
1953   while (!q.empty()) {
1954     State* s = q.front();
1955     q.pop_front();
1956     for (int c : input) {
1957       State* ns = RunStateOnByteUnlocked(s, c);
1958       if (ns == NULL) {
1959         oom = true;
1960         break;
1961       }
1962       if (ns == DeadState) {
1963         output[ByteMap(c)] = -1;
1964         continue;
1965       }
1966       if (m.find(ns) == m.end()) {
1967         m.emplace(ns, static_cast<int>(m.size()));
1968         q.push_back(ns);
1969       }
1970       output[ByteMap(c)] = m[ns];
1971     }
1972     if (cb)
1973       cb(oom ? NULL : output.data(),
1974          s == FullMatchState || s->IsMatch());
1975     if (oom)
1976       break;
1977   }
1978 
1979   return static_cast<int>(m.size());
1980 }
1981 
1982 // Build out all states in DFA for kind.  Returns number of states.
BuildEntireDFA(MatchKind kind,const DFAStateCallback & cb)1983 int Prog::BuildEntireDFA(MatchKind kind, const DFAStateCallback& cb) {
1984   return GetDFA(kind)->BuildAllStates(cb);
1985 }
1986 
1987 // Computes min and max for matching string.
1988 // Won't return strings bigger than maxlen.
PossibleMatchRange(std::string * min,std::string * max,int maxlen)1989 bool DFA::PossibleMatchRange(std::string* min, std::string* max, int maxlen) {
1990   if (!ok())
1991     return false;
1992 
1993   // NOTE: if future users of PossibleMatchRange want more precision when
1994   // presented with infinitely repeated elements, consider making this a
1995   // parameter to PossibleMatchRange.
1996   static int kMaxEltRepetitions = 0;
1997 
1998   // Keep track of the number of times we've visited states previously. We only
1999   // revisit a given state if it's part of a repeated group, so if the value
2000   // portion of the map tuple exceeds kMaxEltRepetitions we bail out and set
2001   // |*max| to |PrefixSuccessor(*max)|.
2002   //
2003   // Also note that previously_visited_states[UnseenStatePtr] will, in the STL
2004   // tradition, implicitly insert a '0' value at first use. We take advantage
2005   // of that property below.
2006   absl::flat_hash_map<State*, int> previously_visited_states;
2007 
2008   // Pick out start state for anchored search at beginning of text.
2009   RWLocker l(&cache_mutex_);
2010   SearchParams params(absl::string_view(), absl::string_view(), &l);
2011   params.anchored = true;
2012   if (!AnalyzeSearch(&params))
2013     return false;
2014   if (params.start == DeadState) {  // No matching strings
2015     *min = "";
2016     *max = "";
2017     return true;
2018   }
2019   if (params.start == FullMatchState)  // Every string matches: no max
2020     return false;
2021 
2022   // The DFA is essentially a big graph rooted at params.start,
2023   // and paths in the graph correspond to accepted strings.
2024   // Each node in the graph has potentially 256+1 arrows
2025   // coming out, one for each byte plus the magic end of
2026   // text character kByteEndText.
2027 
2028   // To find the smallest possible prefix of an accepted
2029   // string, we just walk the graph preferring to follow
2030   // arrows with the lowest bytes possible.  To find the
2031   // largest possible prefix, we follow the largest bytes
2032   // possible.
2033 
2034   // The test for whether there is an arrow from s on byte j is
2035   //    ns = RunStateOnByteUnlocked(s, j);
2036   //    if (ns == NULL)
2037   //      return false;
2038   //    if (ns != DeadState && ns->ninst > 0)
2039   // The RunStateOnByteUnlocked call asks the DFA to build out the graph.
2040   // It returns NULL only if the DFA has run out of memory,
2041   // in which case we can't be sure of anything.
2042   // The second check sees whether there was graph built
2043   // and whether it is interesting graph.  Nodes might have
2044   // ns->ninst == 0 if they exist only to represent the fact
2045   // that a match was found on the previous byte.
2046 
2047   // Build minimum prefix.
2048   State* s = params.start;
2049   min->clear();
2050   absl::MutexLock lock(&mutex_);
2051   for (int i = 0; i < maxlen; i++) {
2052     if (previously_visited_states[s] > kMaxEltRepetitions)
2053       break;
2054     previously_visited_states[s]++;
2055 
2056     // Stop if min is a match.
2057     State* ns = RunStateOnByte(s, kByteEndText);
2058     if (ns == NULL)  // DFA out of memory
2059       return false;
2060     if (ns != DeadState && (ns == FullMatchState || ns->IsMatch()))
2061       break;
2062 
2063     // Try to extend the string with low bytes.
2064     bool extended = false;
2065     for (int j = 0; j < 256; j++) {
2066       ns = RunStateOnByte(s, j);
2067       if (ns == NULL)  // DFA out of memory
2068         return false;
2069       if (ns == FullMatchState ||
2070           (ns > SpecialStateMax && ns->ninst_ > 0)) {
2071         extended = true;
2072         min->append(1, static_cast<char>(j));
2073         s = ns;
2074         break;
2075       }
2076     }
2077     if (!extended)
2078       break;
2079   }
2080 
2081   // Build maximum prefix.
2082   previously_visited_states.clear();
2083   s = params.start;
2084   max->clear();
2085   for (int i = 0; i < maxlen; i++) {
2086     if (previously_visited_states[s] > kMaxEltRepetitions)
2087       break;
2088     previously_visited_states[s] += 1;
2089 
2090     // Try to extend the string with high bytes.
2091     bool extended = false;
2092     for (int j = 255; j >= 0; j--) {
2093       State* ns = RunStateOnByte(s, j);
2094       if (ns == NULL)
2095         return false;
2096       if (ns == FullMatchState ||
2097           (ns > SpecialStateMax && ns->ninst_ > 0)) {
2098         extended = true;
2099         max->append(1, static_cast<char>(j));
2100         s = ns;
2101         break;
2102       }
2103     }
2104     if (!extended) {
2105       // Done, no need for PrefixSuccessor.
2106       return true;
2107     }
2108   }
2109 
2110   // Stopped while still adding to *max - round aaaaaaaaaa... to aaaa...b
2111   PrefixSuccessor(max);
2112 
2113   // If there are no bytes left, we have no way to say "there is no maximum
2114   // string".  We could make the interface more complicated and be able to
2115   // return "there is no maximum but here is a minimum", but that seems like
2116   // overkill -- the most common no-max case is all possible strings, so not
2117   // telling the caller that the empty string is the minimum match isn't a
2118   // great loss.
2119   if (max->empty())
2120     return false;
2121 
2122   return true;
2123 }
2124 
2125 // PossibleMatchRange for a Prog.
PossibleMatchRange(std::string * min,std::string * max,int maxlen)2126 bool Prog::PossibleMatchRange(std::string* min, std::string* max, int maxlen) {
2127   // Have to use dfa_longest_ to get all strings for full matches.
2128   // For example, (a|aa) never matches aa in first-match mode.
2129   return GetDFA(kLongestMatch)->PossibleMatchRange(min, max, maxlen);
2130 }
2131 
2132 }  // namespace re2
2133