1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow_lite_support/cc/text/tokenizers/regex_tokenizer.h"
17 
18 #include <iostream>
19 
20 #include "absl/strings/str_cat.h"
21 #include "absl/strings/substitute.h"
22 #include "tensorflow_lite_support/cc/utils/common_utils.h"
23 namespace tflite {
24 namespace support {
25 namespace text {
26 namespace tokenizer {
27 
28 namespace {
29 constexpr char kStart[] = "<START>";
30 constexpr char kPad[] = "<PAD>";
31 constexpr char kUnknown[] = "<UNKNOWN>";
32 
buildIndexTokenMap(const absl::node_hash_map<std::string,int> & token_index_map,absl::node_hash_map<int,absl::string_view> * index_token_map)33 void buildIndexTokenMap(
34     const absl::node_hash_map<std::string, int>& token_index_map,
35     absl::node_hash_map<int, absl::string_view>* index_token_map) {
36   for (const auto& token : token_index_map) {
37     (*index_token_map)[token.second] = token.first;
38   }
39 }
40 
41 }  // namespace
42 
43 // RE2::FindAndConsume requires the delim_re_ to have a matching group in order
44 // to capture the matched delimiter length. Surround the regex with a
45 // parenthesis to create a matching group, it's fine if the regex is already
46 // surrounded by parenthesis.
RegexTokenizer(const std::string & regex_pattern,const std::string & path_to_vocab)47 RegexTokenizer::RegexTokenizer(const std::string& regex_pattern,
48                                const std::string& path_to_vocab)
49     : delim_re_{absl::Substitute("($0)", regex_pattern)},
50       token_index_map_{utils::LoadVocabAndIndexFromFile(path_to_vocab)} {
51   buildIndexTokenMap(token_index_map_, &index_token_map_);
52 }
53 
RegexTokenizer(const std::string & regex_pattern,const char * vocab_buffer_data,size_t vocab_buffer_size)54 RegexTokenizer::RegexTokenizer(const std::string& regex_pattern,
55                                const char* vocab_buffer_data,
56                                size_t vocab_buffer_size)
57     : delim_re_{absl::Substitute("($0)", regex_pattern)},
58       token_index_map_{utils::LoadVocabAndIndexFromBuffer(vocab_buffer_data,
59                                                           vocab_buffer_size)} {
60   buildIndexTokenMap(token_index_map_, &index_token_map_);
61 }
62 
Tokenize(const std::string & input)63 TokenizerResult RegexTokenizer::Tokenize(const std::string& input) {
64   absl::string_view leftover(input.data());
65   absl::string_view last_end = leftover;
66 
67   TokenizerResult result;
68 
69   // Keep looking for split points until we have reached the end of the input.
70   absl::string_view extracted_delim_token;
71   while (RE2::FindAndConsume(&leftover, delim_re_, &extracted_delim_token)) {
72     absl::string_view token(last_end.data(),
73                             extracted_delim_token.data() - last_end.data());
74     bool has_non_empty_token = token.length() > 0;
75 
76     last_end = leftover;
77 
78     // Mark the end of the previous token, only if there was something.
79     if (has_non_empty_token) {
80       result.subwords.push_back(std::string(token));
81     }
82   }
83 
84   // Close the last token.
85   if (!leftover.empty()) {
86     result.subwords.push_back(std::string(leftover));
87   }
88 
89   return result;
90 }
91 
LookupId(absl::string_view key,int * result) const92 bool RegexTokenizer::LookupId(absl::string_view key, int* result) const {
93   auto it = token_index_map_.find(key);
94   if (it == token_index_map_.end()) {
95     return false;
96   }
97   *result = it->second;
98   return true;
99 }
100 
LookupWord(int vocab_id,absl::string_view * result) const101 bool RegexTokenizer::LookupWord(int vocab_id, absl::string_view* result) const {
102   auto it = index_token_map_.find(vocab_id);
103   if (it == index_token_map_.end()) {
104     return false;
105   }
106   *result = it->second;
107   return true;
108 }
109 
GetStartToken(int * start_token)110 bool RegexTokenizer::GetStartToken(int* start_token) {
111   return LookupId(kStart, start_token);
112 }
113 
GetPadToken(int * pad_token)114 bool RegexTokenizer::GetPadToken(int* pad_token) {
115   return LookupId(kPad, pad_token);
116 }
117 
GetUnknownToken(int * unknown_token)118 bool RegexTokenizer::GetUnknownToken(int* unknown_token) {
119   return LookupId(kUnknown, unknown_token);
120 }
121 
122 }  // namespace tokenizer
123 }  // namespace text
124 }  // namespace support
125 }  // namespace tflite
126