1 // Copyright (c) 2018 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/strings/utf_string_conversions.h"
6
7 #include <stdint.h>
8
9 #include "base/strings/string_piece.h"
10 #include "base/strings/string_util.h"
11 #include "base/strings/utf_string_conversion_utils.h"
12 #include "base/third_party/icu/icu_utf.h"
13 #include "build/build_config.h"
14
15 namespace base {
16
17 namespace {
18
19 constexpr int32_t kErrorCodePoint = 0xFFFD;
20
21 // Size coefficient ----------------------------------------------------------
22 // The maximum number of codeunits in the destination encoding corresponding to
23 // one codeunit in the source encoding.
24
25 template <typename SrcChar, typename DestChar>
26 struct SizeCoefficient {
27 static_assert(sizeof(SrcChar) < sizeof(DestChar),
28 "Default case: from a smaller encoding to the bigger one");
29
30 // ASCII symbols are encoded by one codeunit in all encodings.
31 static constexpr int value = 1;
32 };
33
34 template <>
35 struct SizeCoefficient<char16, char> {
36 // One UTF-16 codeunit corresponds to at most 3 codeunits in UTF-8.
37 static constexpr int value = 3;
38 };
39
40 #if defined(WCHAR_T_IS_UTF32)
41 template <>
42 struct SizeCoefficient<wchar_t, char> {
43 // UTF-8 uses at most 4 codeunits per character.
44 static constexpr int value = 4;
45 };
46
47 template <>
48 struct SizeCoefficient<wchar_t, char16> {
49 // UTF-16 uses at most 2 codeunits per character.
50 static constexpr int value = 2;
51 };
52 #endif // defined(WCHAR_T_IS_UTF32)
53
54 template <typename SrcChar, typename DestChar>
55 constexpr int size_coefficient_v =
56 SizeCoefficient<std::decay_t<SrcChar>, std::decay_t<DestChar>>::value;
57
58 // UnicodeAppendUnsafe --------------------------------------------------------
59 // Function overloads that write code_point to the output string. Output string
60 // has to have enough space for the codepoint.
61
UnicodeAppendUnsafe(char * out,int32_t * size,uint32_t code_point)62 void UnicodeAppendUnsafe(char* out, int32_t* size, uint32_t code_point) {
63 CBU8_APPEND_UNSAFE(out, *size, code_point);
64 }
65
UnicodeAppendUnsafe(char16 * out,int32_t * size,uint32_t code_point)66 void UnicodeAppendUnsafe(char16* out, int32_t* size, uint32_t code_point) {
67 CBU16_APPEND_UNSAFE(out, *size, code_point);
68 }
69
70 #if defined(WCHAR_T_IS_UTF32)
71
UnicodeAppendUnsafe(wchar_t * out,int32_t * size,uint32_t code_point)72 void UnicodeAppendUnsafe(wchar_t* out, int32_t* size, uint32_t code_point) {
73 out[(*size)++] = code_point;
74 }
75
76 #endif // defined(WCHAR_T_IS_UTF32)
77
78 // DoUTFConversion ------------------------------------------------------------
79 // Main driver of UTFConversion specialized for different Src encodings.
80 // dest has to have enough room for the converted text.
81
82 template <typename DestChar>
DoUTFConversion(const char * src,int32_t src_len,DestChar * dest,int32_t * dest_len)83 bool DoUTFConversion(const char* src,
84 int32_t src_len,
85 DestChar* dest,
86 int32_t* dest_len) {
87 bool success = true;
88
89 for (int32_t i = 0; i < src_len;) {
90 int32_t code_point;
91 CBU8_NEXT(src, i, src_len, code_point);
92
93 if (!IsValidCodepoint(code_point)) {
94 success = false;
95 code_point = kErrorCodePoint;
96 }
97
98 UnicodeAppendUnsafe(dest, dest_len, code_point);
99 }
100
101 return success;
102 }
103
104 template <typename DestChar>
DoUTFConversion(const char16 * src,int32_t src_len,DestChar * dest,int32_t * dest_len)105 bool DoUTFConversion(const char16* src,
106 int32_t src_len,
107 DestChar* dest,
108 int32_t* dest_len) {
109 bool success = true;
110
111 auto ConvertSingleChar = [&success](char16 in) -> int32_t {
112 if (!CBU16_IS_SINGLE(in) || !IsValidCodepoint(in)) {
113 success = false;
114 return kErrorCodePoint;
115 }
116 return in;
117 };
118
119 int32_t i = 0;
120
121 // Always have another symbol in order to avoid checking boundaries in the
122 // middle of the surrogate pair.
123 while (i < src_len - 1) {
124 int32_t code_point;
125
126 if (CBU16_IS_LEAD(src[i]) && CBU16_IS_TRAIL(src[i + 1])) {
127 code_point = CBU16_GET_SUPPLEMENTARY(src[i], src[i + 1]);
128 if (!IsValidCodepoint(code_point)) {
129 code_point = kErrorCodePoint;
130 success = false;
131 }
132 i += 2;
133 } else {
134 code_point = ConvertSingleChar(src[i]);
135 ++i;
136 }
137
138 UnicodeAppendUnsafe(dest, dest_len, code_point);
139 }
140
141 if (i < src_len)
142 UnicodeAppendUnsafe(dest, dest_len, ConvertSingleChar(src[i]));
143
144 return success;
145 }
146
147 #if defined(WCHAR_T_IS_UTF32)
148
149 template <typename DestChar>
DoUTFConversion(const wchar_t * src,int32_t src_len,DestChar * dest,int32_t * dest_len)150 bool DoUTFConversion(const wchar_t* src,
151 int32_t src_len,
152 DestChar* dest,
153 int32_t* dest_len) {
154 bool success = true;
155
156 for (int32_t i = 0; i < src_len; ++i) {
157 int32_t code_point = src[i];
158
159 if (!IsValidCodepoint(code_point)) {
160 success = false;
161 code_point = kErrorCodePoint;
162 }
163
164 UnicodeAppendUnsafe(dest, dest_len, code_point);
165 }
166
167 return success;
168 }
169
170 #endif // defined(WCHAR_T_IS_UTF32)
171
172 // UTFConversion --------------------------------------------------------------
173 // Function template for generating all UTF conversions.
174
175 template <typename InputString, typename DestString>
UTFConversion(const InputString & src_str,DestString * dest_str)176 bool UTFConversion(const InputString& src_str, DestString* dest_str) {
177 if (IsStringASCII(src_str)) {
178 dest_str->assign(src_str.begin(), src_str.end());
179 return true;
180 }
181
182 dest_str->resize(src_str.length() *
183 size_coefficient_v<typename InputString::value_type,
184 typename DestString::value_type>);
185
186 // Empty string is ASCII => it OK to call operator[].
187 auto* dest = &(*dest_str)[0];
188
189 // ICU requires 32 bit numbers.
190 int32_t src_len32 = static_cast<int32_t>(src_str.length());
191 int32_t dest_len32 = 0;
192
193 bool res = DoUTFConversion(src_str.data(), src_len32, dest, &dest_len32);
194
195 dest_str->resize(dest_len32);
196 dest_str->shrink_to_fit();
197
198 return res;
199 }
200
201 } // namespace
202
203 // UTF16 <-> UTF8 --------------------------------------------------------------
204
UTF8ToUTF16(const char * src,size_t src_len,string16 * output)205 bool UTF8ToUTF16(const char* src, size_t src_len, string16* output) {
206 return UTFConversion(StringPiece(src, src_len), output);
207 }
208
UTF8ToUTF16(StringPiece utf8)209 string16 UTF8ToUTF16(StringPiece utf8) {
210 string16 ret;
211 // Ignore the success flag of this call, it will do the best it can for
212 // invalid input, which is what we want here.
213 UTF8ToUTF16(utf8.data(), utf8.size(), &ret);
214 return ret;
215 }
216
UTF16ToUTF8(const char16 * src,size_t src_len,std::string * output)217 bool UTF16ToUTF8(const char16* src, size_t src_len, std::string* output) {
218 return UTFConversion(StringPiece16(src, src_len), output);
219 }
220
UTF16ToUTF8(StringPiece16 utf16)221 std::string UTF16ToUTF8(StringPiece16 utf16) {
222 std::string ret;
223 // Ignore the success flag of this call, it will do the best it can for
224 // invalid input, which is what we want here.
225 UTF16ToUTF8(utf16.data(), utf16.length(), &ret);
226 return ret;
227 }
228
229 // UTF-16 <-> Wide -------------------------------------------------------------
230
231 #if defined(WCHAR_T_IS_UTF16)
232 // When wide == UTF-16 the conversions are a NOP.
233
WideToUTF16(const wchar_t * src,size_t src_len,string16 * output)234 bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
235 output->assign(src, src_len);
236 return true;
237 }
238
WideToUTF16(WStringPiece wide)239 string16 WideToUTF16(WStringPiece wide) {
240 return wide.as_string();
241 }
242
UTF16ToWide(const char16 * src,size_t src_len,std::wstring * output)243 bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
244 output->assign(src, src_len);
245 return true;
246 }
247
UTF16ToWide(StringPiece16 utf16)248 std::wstring UTF16ToWide(StringPiece16 utf16) {
249 return utf16.as_string();
250 }
251
252 #elif defined(WCHAR_T_IS_UTF32)
253
WideToUTF16(const wchar_t * src,size_t src_len,string16 * output)254 bool WideToUTF16(const wchar_t* src, size_t src_len, string16* output) {
255 return UTFConversion(base::WStringPiece(src, src_len), output);
256 }
257
WideToUTF16(WStringPiece wide)258 string16 WideToUTF16(WStringPiece wide) {
259 string16 ret;
260 // Ignore the success flag of this call, it will do the best it can for
261 // invalid input, which is what we want here.
262 WideToUTF16(wide.data(), wide.length(), &ret);
263 return ret;
264 }
265
UTF16ToWide(const char16 * src,size_t src_len,std::wstring * output)266 bool UTF16ToWide(const char16* src, size_t src_len, std::wstring* output) {
267 return UTFConversion(StringPiece16(src, src_len), output);
268 }
269
UTF16ToWide(StringPiece16 utf16)270 std::wstring UTF16ToWide(StringPiece16 utf16) {
271 std::wstring ret;
272 // Ignore the success flag of this call, it will do the best it can for
273 // invalid input, which is what we want here.
274 UTF16ToWide(utf16.data(), utf16.length(), &ret);
275 return ret;
276 }
277
278 #endif // defined(WCHAR_T_IS_UTF32)
279
280 // UTF-8 <-> Wide --------------------------------------------------------------
281
282 // UTF8ToWide is the same code, regardless of whether wide is 16 or 32 bits
283
UTF8ToWide(const char * src,size_t src_len,std::wstring * output)284 bool UTF8ToWide(const char* src, size_t src_len, std::wstring* output) {
285 return UTFConversion(StringPiece(src, src_len), output);
286 }
287
UTF8ToWide(StringPiece utf8)288 std::wstring UTF8ToWide(StringPiece utf8) {
289 std::wstring ret;
290 // Ignore the success flag of this call, it will do the best it can for
291 // invalid input, which is what we want here.
292 UTF8ToWide(utf8.data(), utf8.length(), &ret);
293 return ret;
294 }
295
296 #if defined(WCHAR_T_IS_UTF16)
297 // Easy case since we can use the "utf" versions we already wrote above.
298
WideToUTF8(const wchar_t * src,size_t src_len,std::string * output)299 bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
300 return UTF16ToUTF8(src, src_len, output);
301 }
302
WideToUTF8(WStringPiece wide)303 std::string WideToUTF8(WStringPiece wide) {
304 return UTF16ToUTF8(wide);
305 }
306
307 #elif defined(WCHAR_T_IS_UTF32)
308
WideToUTF8(const wchar_t * src,size_t src_len,std::string * output)309 bool WideToUTF8(const wchar_t* src, size_t src_len, std::string* output) {
310 return UTFConversion(WStringPiece(src, src_len), output);
311 }
312
WideToUTF8(WStringPiece wide)313 std::string WideToUTF8(WStringPiece wide) {
314 std::string ret;
315 // Ignore the success flag of this call, it will do the best it can for
316 // invalid input, which is what we want here.
317 WideToUTF8(wide.data(), wide.length(), &ret);
318 return ret;
319 }
320
321 #endif // defined(WCHAR_T_IS_UTF32)
322
ASCIIToUTF16(StringPiece ascii)323 string16 ASCIIToUTF16(StringPiece ascii) {
324 DCHECK(IsStringASCII(ascii)) << ascii;
325 return string16(ascii.begin(), ascii.end());
326 }
327
UTF16ToASCII(StringPiece16 utf16)328 std::string UTF16ToASCII(StringPiece16 utf16) {
329 DCHECK(IsStringASCII(utf16)) << UTF16ToUTF8(utf16);
330 return std::string(utf16.begin(), utf16.end());
331 }
332
333 } // namespace base
334