xref: /aosp_15_r20/external/libtextclassifier/native/utils/tokenizer.cc (revision 993b0882672172b81d12fad7a7ac0c3e5c824a12)
1 /*
2  * Copyright (C) 2018 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "utils/tokenizer.h"
18 
19 #include <algorithm>
20 
21 #include "utils/base/logging.h"
22 #include "utils/base/macros.h"
23 #include "utils/strings/utf8.h"
24 #include "utils/utf8/unicodetext.h"
25 #include "absl/strings/string_view.h"
26 
27 namespace libtextclassifier3 {
28 
Tokenizer(const TokenizationType type,const UniLib * unilib,const std::vector<const TokenizationCodepointRange * > & codepoint_ranges,const std::vector<const CodepointRange * > & internal_tokenizer_codepoint_ranges,const bool split_on_script_change,const bool icu_preserve_whitespace_tokens,const bool preserve_floating_numbers)29 Tokenizer::Tokenizer(
30     const TokenizationType type, const UniLib* unilib,
31     const std::vector<const TokenizationCodepointRange*>& codepoint_ranges,
32     const std::vector<const CodepointRange*>&
33         internal_tokenizer_codepoint_ranges,
34     const bool split_on_script_change,
35     const bool icu_preserve_whitespace_tokens,
36     const bool preserve_floating_numbers)
37     : type_(type),
38       unilib_(unilib),
39       split_on_script_change_(split_on_script_change),
40       icu_preserve_whitespace_tokens_(icu_preserve_whitespace_tokens),
41       preserve_floating_numbers_(preserve_floating_numbers) {
42   for (const TokenizationCodepointRange* range : codepoint_ranges) {
43     codepoint_ranges_.emplace_back(range->UnPack());
44   }
45 
46   std::stable_sort(
47       codepoint_ranges_.begin(), codepoint_ranges_.end(),
48       [](const std::unique_ptr<const TokenizationCodepointRangeT>& a,
49          const std::unique_ptr<const TokenizationCodepointRangeT>& b) {
50         return a->start < b->start;
51       });
52 
53   SortCodepointRanges(internal_tokenizer_codepoint_ranges,
54                       &internal_tokenizer_codepoint_ranges_);
55   if (type_ == TokenizationType_MIXED && split_on_script_change) {
56     TC3_LOG(ERROR) << "The option `split_on_script_change` is unavailable for "
57                       "the selected tokenizer type (mixed).";
58   }
59 }
60 
FindTokenizationRange(int codepoint) const61 const TokenizationCodepointRangeT* Tokenizer::FindTokenizationRange(
62     int codepoint) const {
63   auto it = std::lower_bound(
64       codepoint_ranges_.begin(), codepoint_ranges_.end(), codepoint,
65       [](const std::unique_ptr<const TokenizationCodepointRangeT>& range,
66          int codepoint) {
67         // This function compares range with the codepoint for the purpose of
68         // finding the first greater or equal range. Because of the use of
69         // std::lower_bound it needs to return true when range < codepoint;
70         // the first time it will return false the lower bound is found and
71         // returned.
72         //
73         // It might seem weird that the condition is range.end <= codepoint
74         // here but when codepoint == range.end it means it's actually just
75         // outside of the range, thus the range is less than the codepoint.
76         return range->end <= codepoint;
77       });
78   if (it != codepoint_ranges_.end() && (*it)->start <= codepoint &&
79       (*it)->end > codepoint) {
80     return it->get();
81   } else {
82     return nullptr;
83   }
84 }
85 
GetScriptAndRole(char32 codepoint,TokenizationCodepointRange_::Role * role,int * script) const86 void Tokenizer::GetScriptAndRole(char32 codepoint,
87                                  TokenizationCodepointRange_::Role* role,
88                                  int* script) const {
89   const TokenizationCodepointRangeT* range = FindTokenizationRange(codepoint);
90   if (range) {
91     *role = range->role;
92     *script = range->script_id;
93   } else {
94     *role = TokenizationCodepointRange_::Role_DEFAULT_ROLE;
95     *script = kUnknownScript;
96   }
97 }
98 
Tokenize(absl::string_view text) const99 std::vector<Token> Tokenizer::Tokenize(absl::string_view text) const {
100   UnicodeText text_unicode = UTF8ToUnicodeText(text, /*do_copy=*/false);
101   return Tokenize(text_unicode);
102 }
103 
Tokenize(const UnicodeText & text_unicode) const104 std::vector<Token> Tokenizer::Tokenize(const UnicodeText& text_unicode) const {
105   switch (type_) {
106     case TokenizationType_INTERNAL_TOKENIZER:
107       return InternalTokenize(text_unicode);
108     case TokenizationType_ICU:
109       TC3_FALLTHROUGH_INTENDED;
110     case TokenizationType_MIXED: {
111       std::vector<Token> result;
112       if (!ICUTokenize(text_unicode, &result)) {
113         return {};
114       }
115       if (type_ == TokenizationType_MIXED) {
116         InternalRetokenize(text_unicode, &result);
117       }
118       return result;
119     }
120     case TokenizationType_LETTER_DIGIT: {
121       std::vector<Token> result;
122       if (!NumberTokenize(text_unicode, &result)) {
123         return {};
124       }
125       return result;
126     }
127     default:
128       TC3_LOG(ERROR) << "Unknown tokenization type specified. Using internal.";
129       return InternalTokenize(text_unicode);
130   }
131 }
132 
AppendCodepointToToken(UnicodeText::const_iterator it,Token * token)133 void AppendCodepointToToken(UnicodeText::const_iterator it, Token* token) {
134   token->value += std::string(
135       it.utf8_data(), it.utf8_data() + GetNumBytesForUTF8Char(it.utf8_data()));
136 }
137 
InternalTokenize(const UnicodeText & text_unicode) const138 std::vector<Token> Tokenizer::InternalTokenize(
139     const UnicodeText& text_unicode) const {
140   std::vector<Token> result;
141   Token new_token("", 0, 0);
142   int codepoint_index = 0;
143 
144   int last_script = kInvalidScript;
145   for (auto it = text_unicode.begin(); it != text_unicode.end();
146        ++it, ++codepoint_index) {
147     TokenizationCodepointRange_::Role role;
148     int script;
149     GetScriptAndRole(*it, &role, &script);
150 
151     if (role & TokenizationCodepointRange_::Role_SPLIT_BEFORE ||
152         (split_on_script_change_ && last_script != kInvalidScript &&
153          last_script != script)) {
154       if (!new_token.value.empty()) {
155         result.push_back(new_token);
156       }
157       new_token = Token("", codepoint_index, codepoint_index);
158     }
159     if (!(role & TokenizationCodepointRange_::Role_DISCARD_CODEPOINT)) {
160       new_token.end += 1;
161       AppendCodepointToToken(it, &new_token);
162     }
163     if (role & TokenizationCodepointRange_::Role_SPLIT_AFTER) {
164       if (!new_token.value.empty()) {
165         result.push_back(new_token);
166       }
167       new_token = Token("", codepoint_index + 1, codepoint_index + 1);
168     }
169 
170     last_script = script;
171   }
172   if (!new_token.value.empty()) {
173     result.push_back(new_token);
174   }
175 
176   return result;
177 }
178 
TokenizeSubstring(const UnicodeText & unicode_text,CodepointSpan span,std::vector<Token> * result) const179 void Tokenizer::TokenizeSubstring(const UnicodeText& unicode_text,
180                                   CodepointSpan span,
181                                   std::vector<Token>* result) const {
182   if (span.first < 0) {
183     // There is no span to tokenize.
184     return;
185   }
186 
187   // Extract the substring.
188   UnicodeText text = UnicodeText::Substring(unicode_text, span.first,
189                                             span.second, /*do_copy=*/false);
190 
191   // Run the tokenizer and update the token bounds to reflect the offset of the
192   // substring.
193   std::vector<Token> tokens = InternalTokenize(text);
194 
195   // Avoids progressive capacity increases in the for loop.
196   result->reserve(result->size() + tokens.size());
197   for (Token& token : tokens) {
198     token.start += span.first;
199     token.end += span.first;
200     result->emplace_back(std::move(token));
201   }
202 }
203 
InternalRetokenize(const UnicodeText & unicode_text,std::vector<Token> * tokens) const204 void Tokenizer::InternalRetokenize(const UnicodeText& unicode_text,
205                                    std::vector<Token>* tokens) const {
206   std::vector<Token> result;
207   CodepointSpan span(-1, -1);
208   for (Token& token : *tokens) {
209     const UnicodeText unicode_token_value =
210         UTF8ToUnicodeText(token.value, /*do_copy=*/false);
211     bool should_retokenize = true;
212     for (const int codepoint : unicode_token_value) {
213       if (!IsCodepointInRanges(codepoint,
214                                internal_tokenizer_codepoint_ranges_)) {
215         should_retokenize = false;
216         break;
217       }
218     }
219 
220     if (should_retokenize) {
221       if (span.first < 0) {
222         span.first = token.start;
223       }
224       span.second = token.end;
225     } else {
226       TokenizeSubstring(unicode_text, span, &result);
227       span.first = -1;
228       result.emplace_back(std::move(token));
229     }
230   }
231   TokenizeSubstring(unicode_text, span, &result);
232 
233   *tokens = std::move(result);
234 }
235 
ICUTokenize(const UnicodeText & context_unicode,std::vector<Token> * result) const236 bool Tokenizer::ICUTokenize(const UnicodeText& context_unicode,
237                             std::vector<Token>* result) const {
238   std::unique_ptr<UniLib::BreakIterator> break_iterator =
239       unilib_->CreateBreakIterator(context_unicode);
240   if (!break_iterator) {
241     return false;
242   }
243   const int context_unicode_size = context_unicode.size_codepoints();
244   int last_unicode_index = 0;
245   int unicode_index = 0;
246   auto token_begin_it = context_unicode.begin();
247   while ((unicode_index = break_iterator->Next()) !=
248          UniLib::BreakIterator::kDone) {
249     const int token_length = unicode_index - last_unicode_index;
250     if (token_length + last_unicode_index > context_unicode_size) {
251       return false;
252     }
253 
254     auto token_end_it = token_begin_it;
255     std::advance(token_end_it, token_length);
256     TC3_CHECK(token_end_it <= context_unicode.end());
257 
258     // Determine if the whole token is whitespace.
259     bool is_whitespace = true;
260     for (auto char_it = token_begin_it; char_it < token_end_it; ++char_it) {
261       if (!unilib_->IsWhitespace(*char_it)) {
262         is_whitespace = false;
263         break;
264       }
265     }
266 
267     const std::string token =
268         context_unicode.UTF8Substring(token_begin_it, token_end_it);
269 
270     if (!is_whitespace || icu_preserve_whitespace_tokens_) {
271       result->push_back(Token(token, last_unicode_index, unicode_index,
272                               /*is_padding=*/false, is_whitespace));
273     }
274 
275     last_unicode_index = unicode_index;
276     token_begin_it = token_end_it;
277   }
278 
279   return true;
280 }
281 
NumberTokenize(const UnicodeText & text_unicode,std::vector<Token> * result) const282 bool Tokenizer::NumberTokenize(const UnicodeText& text_unicode,
283                                std::vector<Token>* result) const {
284   Token new_token("", 0, 0);
285   NumberTokenType current_token_type = NOT_SET;
286   int codepoint_index = 0;
287 
288   auto PushToken = [&new_token, result]() {
289     if (!new_token.value.empty()) {
290       result->push_back(new_token);
291     }
292   };
293 
294   auto MaybeResetTokenAndAddChar =
295       [&new_token, PushToken, &current_token_type](
296           int codepoint_index, NumberTokenType token_type,
297           UnicodeText::const_iterator it, bool is_whitespace = false) {
298         if (current_token_type != token_type) {
299           PushToken();
300           new_token = Token("", codepoint_index, codepoint_index,
301                             /*is_padding=*/false, is_whitespace);
302         }
303         new_token.end += 1;
304         AppendCodepointToToken(it, &new_token);
305         current_token_type = token_type;
306       };
307 
308   auto FinishTokenAndAddSeparator =
309       [&new_token, result, &current_token_type, PushToken](
310           int codepoint_index, UnicodeText::const_iterator it) {
311         PushToken();
312 
313         result->emplace_back("", codepoint_index, codepoint_index + 1);
314         AppendCodepointToToken(it, &result->back());
315 
316         new_token = Token("", codepoint_index + 1, codepoint_index + 1);
317         current_token_type = NOT_SET;
318       };
319 
320   for (auto it = text_unicode.begin(); it != text_unicode.end();
321        ++it, ++codepoint_index) {
322     if (unilib_->IsDigit(*it)) {
323       MaybeResetTokenAndAddChar(codepoint_index, NUMERICAL, it);
324     } else if (unilib_->IsLetter(*it)) {
325       MaybeResetTokenAndAddChar(codepoint_index, TERM, it);
326     } else if (unilib_->IsWhitespace(*it)) {
327       MaybeResetTokenAndAddChar(codepoint_index, WHITESPACE, it,
328                                 /*is_whitespace=*/true);
329     } else if (unilib_->IsDot(*it) && preserve_floating_numbers_) {
330       auto it_next = std::next(it);
331       if (current_token_type == NUMERICAL && it_next != text_unicode.end() &&
332           unilib_->IsDigit(*it_next)) {
333         new_token.end += 1;
334         AppendCodepointToToken(it, &new_token);
335       } else {
336         // If the current token is not a number or dot at the end or followed
337         // by a non digit => separate token
338         FinishTokenAndAddSeparator(codepoint_index, it);
339       }
340     } else {
341       FinishTokenAndAddSeparator(codepoint_index, it);
342     }
343   }
344   PushToken();
345 
346   return true;
347 }
348 
349 }  // namespace libtextclassifier3
350