1 // Copyright 2016 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/builtins/builtins.h"
6 #include "src/builtins/builtins-utils.h"
7 
8 #include "src/code-factory.h"
9 
10 namespace v8 {
11 namespace internal {
12 
13 // -----------------------------------------------------------------------------
14 // ES6 section 20.2.2 Function Properties of the Math Object
15 
16 // ES6 section - 20.2.2.1 Math.abs ( x )
Generate_MathAbs(CodeStubAssembler * assembler)17 void Builtins::Generate_MathAbs(CodeStubAssembler* assembler) {
18   typedef CodeStubAssembler::Label Label;
19   typedef compiler::Node Node;
20   typedef CodeStubAssembler::Variable Variable;
21 
22   Node* context = assembler->Parameter(4);
23 
24   // We might need to loop once for ToNumber conversion.
25   Variable var_x(assembler, MachineRepresentation::kTagged);
26   Label loop(assembler, &var_x);
27   var_x.Bind(assembler->Parameter(1));
28   assembler->Goto(&loop);
29   assembler->Bind(&loop);
30   {
31     // Load the current {x} value.
32     Node* x = var_x.value();
33 
34     // Check if {x} is a Smi or a HeapObject.
35     Label if_xissmi(assembler), if_xisnotsmi(assembler);
36     assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
37 
38     assembler->Bind(&if_xissmi);
39     {
40       // Check if {x} is already positive.
41       Label if_xispositive(assembler), if_xisnotpositive(assembler);
42       assembler->BranchIfSmiLessThanOrEqual(
43           assembler->SmiConstant(Smi::FromInt(0)), x, &if_xispositive,
44           &if_xisnotpositive);
45 
46       assembler->Bind(&if_xispositive);
47       {
48         // Just return the input {x}.
49         assembler->Return(x);
50       }
51 
52       assembler->Bind(&if_xisnotpositive);
53       {
54         // Try to negate the {x} value.
55         Node* pair = assembler->IntPtrSubWithOverflow(
56             assembler->IntPtrConstant(0), assembler->BitcastTaggedToWord(x));
57         Node* overflow = assembler->Projection(1, pair);
58         Label if_overflow(assembler, Label::kDeferred),
59             if_notoverflow(assembler);
60         assembler->Branch(overflow, &if_overflow, &if_notoverflow);
61 
62         assembler->Bind(&if_notoverflow);
63         {
64           // There is a Smi representation for negated {x}.
65           Node* result = assembler->Projection(0, pair);
66           result = assembler->BitcastWordToTagged(result);
67           assembler->Return(result);
68         }
69 
70         assembler->Bind(&if_overflow);
71         {
72           Node* result = assembler->NumberConstant(0.0 - Smi::kMinValue);
73           assembler->Return(result);
74         }
75       }
76     }
77 
78     assembler->Bind(&if_xisnotsmi);
79     {
80       // Check if {x} is a HeapNumber.
81       Label if_xisheapnumber(assembler),
82           if_xisnotheapnumber(assembler, Label::kDeferred);
83       assembler->Branch(
84           assembler->WordEqual(assembler->LoadMap(x),
85                                assembler->HeapNumberMapConstant()),
86           &if_xisheapnumber, &if_xisnotheapnumber);
87 
88       assembler->Bind(&if_xisheapnumber);
89       {
90         Node* x_value = assembler->LoadHeapNumberValue(x);
91         Node* value = assembler->Float64Abs(x_value);
92         Node* result = assembler->AllocateHeapNumberWithValue(value);
93         assembler->Return(result);
94       }
95 
96       assembler->Bind(&if_xisnotheapnumber);
97       {
98         // Need to convert {x} to a Number first.
99         Callable callable =
100             CodeFactory::NonNumberToNumber(assembler->isolate());
101         var_x.Bind(assembler->CallStub(callable, context, x));
102         assembler->Goto(&loop);
103       }
104     }
105   }
106 }
107 
108 namespace {
109 
Generate_MathRoundingOperation(CodeStubAssembler * assembler,compiler::Node * (CodeStubAssembler::* float64op)(compiler::Node *))110 void Generate_MathRoundingOperation(
111     CodeStubAssembler* assembler,
112     compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
113   typedef CodeStubAssembler::Label Label;
114   typedef compiler::Node Node;
115   typedef CodeStubAssembler::Variable Variable;
116 
117   Node* context = assembler->Parameter(4);
118 
119   // We might need to loop once for ToNumber conversion.
120   Variable var_x(assembler, MachineRepresentation::kTagged);
121   Label loop(assembler, &var_x);
122   var_x.Bind(assembler->Parameter(1));
123   assembler->Goto(&loop);
124   assembler->Bind(&loop);
125   {
126     // Load the current {x} value.
127     Node* x = var_x.value();
128 
129     // Check if {x} is a Smi or a HeapObject.
130     Label if_xissmi(assembler), if_xisnotsmi(assembler);
131     assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
132 
133     assembler->Bind(&if_xissmi);
134     {
135       // Nothing to do when {x} is a Smi.
136       assembler->Return(x);
137     }
138 
139     assembler->Bind(&if_xisnotsmi);
140     {
141       // Check if {x} is a HeapNumber.
142       Label if_xisheapnumber(assembler),
143           if_xisnotheapnumber(assembler, Label::kDeferred);
144       assembler->Branch(
145           assembler->WordEqual(assembler->LoadMap(x),
146                                assembler->HeapNumberMapConstant()),
147           &if_xisheapnumber, &if_xisnotheapnumber);
148 
149       assembler->Bind(&if_xisheapnumber);
150       {
151         Node* x_value = assembler->LoadHeapNumberValue(x);
152         Node* value = (assembler->*float64op)(x_value);
153         Node* result = assembler->ChangeFloat64ToTagged(value);
154         assembler->Return(result);
155       }
156 
157       assembler->Bind(&if_xisnotheapnumber);
158       {
159         // Need to convert {x} to a Number first.
160         Callable callable =
161             CodeFactory::NonNumberToNumber(assembler->isolate());
162         var_x.Bind(assembler->CallStub(callable, context, x));
163         assembler->Goto(&loop);
164       }
165     }
166   }
167 }
168 
Generate_MathUnaryOperation(CodeStubAssembler * assembler,compiler::Node * (CodeStubAssembler::* float64op)(compiler::Node *))169 void Generate_MathUnaryOperation(
170     CodeStubAssembler* assembler,
171     compiler::Node* (CodeStubAssembler::*float64op)(compiler::Node*)) {
172   typedef compiler::Node Node;
173 
174   Node* x = assembler->Parameter(1);
175   Node* context = assembler->Parameter(4);
176   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
177   Node* value = (assembler->*float64op)(x_value);
178   Node* result = assembler->AllocateHeapNumberWithValue(value);
179   assembler->Return(result);
180 }
181 
182 }  // namespace
183 
184 // ES6 section 20.2.2.2 Math.acos ( x )
Generate_MathAcos(CodeStubAssembler * assembler)185 void Builtins::Generate_MathAcos(CodeStubAssembler* assembler) {
186   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acos);
187 }
188 
189 // ES6 section 20.2.2.3 Math.acosh ( x )
Generate_MathAcosh(CodeStubAssembler * assembler)190 void Builtins::Generate_MathAcosh(CodeStubAssembler* assembler) {
191   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Acosh);
192 }
193 
194 // ES6 section 20.2.2.4 Math.asin ( x )
Generate_MathAsin(CodeStubAssembler * assembler)195 void Builtins::Generate_MathAsin(CodeStubAssembler* assembler) {
196   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asin);
197 }
198 
199 // ES6 section 20.2.2.5 Math.asinh ( x )
Generate_MathAsinh(CodeStubAssembler * assembler)200 void Builtins::Generate_MathAsinh(CodeStubAssembler* assembler) {
201   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Asinh);
202 }
203 
204 // ES6 section 20.2.2.6 Math.atan ( x )
Generate_MathAtan(CodeStubAssembler * assembler)205 void Builtins::Generate_MathAtan(CodeStubAssembler* assembler) {
206   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atan);
207 }
208 
209 // ES6 section 20.2.2.7 Math.atanh ( x )
Generate_MathAtanh(CodeStubAssembler * assembler)210 void Builtins::Generate_MathAtanh(CodeStubAssembler* assembler) {
211   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Atanh);
212 }
213 
214 // ES6 section 20.2.2.8 Math.atan2 ( y, x )
Generate_MathAtan2(CodeStubAssembler * assembler)215 void Builtins::Generate_MathAtan2(CodeStubAssembler* assembler) {
216   using compiler::Node;
217 
218   Node* y = assembler->Parameter(1);
219   Node* x = assembler->Parameter(2);
220   Node* context = assembler->Parameter(5);
221   Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
222   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
223   Node* value = assembler->Float64Atan2(y_value, x_value);
224   Node* result = assembler->AllocateHeapNumberWithValue(value);
225   assembler->Return(result);
226 }
227 
228 // ES6 section 20.2.2.10 Math.ceil ( x )
Generate_MathCeil(CodeStubAssembler * assembler)229 void Builtins::Generate_MathCeil(CodeStubAssembler* assembler) {
230   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Ceil);
231 }
232 
233 // ES6 section 20.2.2.9 Math.cbrt ( x )
Generate_MathCbrt(CodeStubAssembler * assembler)234 void Builtins::Generate_MathCbrt(CodeStubAssembler* assembler) {
235   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cbrt);
236 }
237 
238 // ES6 section 20.2.2.11 Math.clz32 ( x )
Generate_MathClz32(CodeStubAssembler * assembler)239 void Builtins::Generate_MathClz32(CodeStubAssembler* assembler) {
240   typedef CodeStubAssembler::Label Label;
241   typedef compiler::Node Node;
242   typedef CodeStubAssembler::Variable Variable;
243 
244   Node* context = assembler->Parameter(4);
245 
246   // Shared entry point for the clz32 operation.
247   Variable var_clz32_x(assembler, MachineRepresentation::kWord32);
248   Label do_clz32(assembler);
249 
250   // We might need to loop once for ToNumber conversion.
251   Variable var_x(assembler, MachineRepresentation::kTagged);
252   Label loop(assembler, &var_x);
253   var_x.Bind(assembler->Parameter(1));
254   assembler->Goto(&loop);
255   assembler->Bind(&loop);
256   {
257     // Load the current {x} value.
258     Node* x = var_x.value();
259 
260     // Check if {x} is a Smi or a HeapObject.
261     Label if_xissmi(assembler), if_xisnotsmi(assembler);
262     assembler->Branch(assembler->TaggedIsSmi(x), &if_xissmi, &if_xisnotsmi);
263 
264     assembler->Bind(&if_xissmi);
265     {
266       var_clz32_x.Bind(assembler->SmiToWord32(x));
267       assembler->Goto(&do_clz32);
268     }
269 
270     assembler->Bind(&if_xisnotsmi);
271     {
272       // Check if {x} is a HeapNumber.
273       Label if_xisheapnumber(assembler),
274           if_xisnotheapnumber(assembler, Label::kDeferred);
275       assembler->Branch(
276           assembler->WordEqual(assembler->LoadMap(x),
277                                assembler->HeapNumberMapConstant()),
278           &if_xisheapnumber, &if_xisnotheapnumber);
279 
280       assembler->Bind(&if_xisheapnumber);
281       {
282         var_clz32_x.Bind(assembler->TruncateHeapNumberValueToWord32(x));
283         assembler->Goto(&do_clz32);
284       }
285 
286       assembler->Bind(&if_xisnotheapnumber);
287       {
288         // Need to convert {x} to a Number first.
289         Callable callable =
290             CodeFactory::NonNumberToNumber(assembler->isolate());
291         var_x.Bind(assembler->CallStub(callable, context, x));
292         assembler->Goto(&loop);
293       }
294     }
295   }
296 
297   assembler->Bind(&do_clz32);
298   {
299     Node* x_value = var_clz32_x.value();
300     Node* value = assembler->Word32Clz(x_value);
301     Node* result = assembler->ChangeInt32ToTagged(value);
302     assembler->Return(result);
303   }
304 }
305 
306 // ES6 section 20.2.2.12 Math.cos ( x )
Generate_MathCos(CodeStubAssembler * assembler)307 void Builtins::Generate_MathCos(CodeStubAssembler* assembler) {
308   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cos);
309 }
310 
311 // ES6 section 20.2.2.13 Math.cosh ( x )
Generate_MathCosh(CodeStubAssembler * assembler)312 void Builtins::Generate_MathCosh(CodeStubAssembler* assembler) {
313   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Cosh);
314 }
315 
316 // ES6 section 20.2.2.14 Math.exp ( x )
Generate_MathExp(CodeStubAssembler * assembler)317 void Builtins::Generate_MathExp(CodeStubAssembler* assembler) {
318   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Exp);
319 }
320 
321 // ES6 section 20.2.2.15 Math.expm1 ( x )
Generate_MathExpm1(CodeStubAssembler * assembler)322 void Builtins::Generate_MathExpm1(CodeStubAssembler* assembler) {
323   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Expm1);
324 }
325 
326 // ES6 section 20.2.2.16 Math.floor ( x )
Generate_MathFloor(CodeStubAssembler * assembler)327 void Builtins::Generate_MathFloor(CodeStubAssembler* assembler) {
328   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Floor);
329 }
330 
331 // ES6 section 20.2.2.17 Math.fround ( x )
Generate_MathFround(CodeStubAssembler * assembler)332 void Builtins::Generate_MathFround(CodeStubAssembler* assembler) {
333   using compiler::Node;
334 
335   Node* x = assembler->Parameter(1);
336   Node* context = assembler->Parameter(4);
337   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
338   Node* value32 = assembler->TruncateFloat64ToFloat32(x_value);
339   Node* value = assembler->ChangeFloat32ToFloat64(value32);
340   Node* result = assembler->AllocateHeapNumberWithValue(value);
341   assembler->Return(result);
342 }
343 
344 // ES6 section 20.2.2.18 Math.hypot ( value1, value2, ...values )
BUILTIN(MathHypot)345 BUILTIN(MathHypot) {
346   HandleScope scope(isolate);
347   int const length = args.length() - 1;
348   if (length == 0) return Smi::kZero;
349   DCHECK_LT(0, length);
350   double max = 0;
351   bool one_arg_is_nan = false;
352   List<double> abs_values(length);
353   for (int i = 0; i < length; i++) {
354     Handle<Object> x = args.at<Object>(i + 1);
355     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, x, Object::ToNumber(x));
356     double abs_value = std::abs(x->Number());
357 
358     if (std::isnan(abs_value)) {
359       one_arg_is_nan = true;
360     } else {
361       abs_values.Add(abs_value);
362       if (max < abs_value) {
363         max = abs_value;
364       }
365     }
366   }
367 
368   if (max == V8_INFINITY) {
369     return *isolate->factory()->NewNumber(V8_INFINITY);
370   }
371 
372   if (one_arg_is_nan) {
373     return isolate->heap()->nan_value();
374   }
375 
376   if (max == 0) {
377     return Smi::kZero;
378   }
379   DCHECK_GT(max, 0);
380 
381   // Kahan summation to avoid rounding errors.
382   // Normalize the numbers to the largest one to avoid overflow.
383   double sum = 0;
384   double compensation = 0;
385   for (int i = 0; i < length; i++) {
386     double n = abs_values.at(i) / max;
387     double summand = n * n - compensation;
388     double preliminary = sum + summand;
389     compensation = (preliminary - sum) - summand;
390     sum = preliminary;
391   }
392 
393   return *isolate->factory()->NewNumber(std::sqrt(sum) * max);
394 }
395 
396 // ES6 section 20.2.2.19 Math.imul ( x, y )
Generate_MathImul(CodeStubAssembler * assembler)397 void Builtins::Generate_MathImul(CodeStubAssembler* assembler) {
398   using compiler::Node;
399 
400   Node* x = assembler->Parameter(1);
401   Node* y = assembler->Parameter(2);
402   Node* context = assembler->Parameter(5);
403   Node* x_value = assembler->TruncateTaggedToWord32(context, x);
404   Node* y_value = assembler->TruncateTaggedToWord32(context, y);
405   Node* value = assembler->Int32Mul(x_value, y_value);
406   Node* result = assembler->ChangeInt32ToTagged(value);
407   assembler->Return(result);
408 }
409 
410 // ES6 section 20.2.2.20 Math.log ( x )
Generate_MathLog(CodeStubAssembler * assembler)411 void Builtins::Generate_MathLog(CodeStubAssembler* assembler) {
412   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log);
413 }
414 
415 // ES6 section 20.2.2.21 Math.log1p ( x )
Generate_MathLog1p(CodeStubAssembler * assembler)416 void Builtins::Generate_MathLog1p(CodeStubAssembler* assembler) {
417   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log1p);
418 }
419 
420 // ES6 section 20.2.2.22 Math.log10 ( x )
Generate_MathLog10(CodeStubAssembler * assembler)421 void Builtins::Generate_MathLog10(CodeStubAssembler* assembler) {
422   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log10);
423 }
424 
425 // ES6 section 20.2.2.23 Math.log2 ( x )
Generate_MathLog2(CodeStubAssembler * assembler)426 void Builtins::Generate_MathLog2(CodeStubAssembler* assembler) {
427   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Log2);
428 }
429 
430 // ES6 section 20.2.2.26 Math.pow ( x, y )
Generate_MathPow(CodeStubAssembler * assembler)431 void Builtins::Generate_MathPow(CodeStubAssembler* assembler) {
432   using compiler::Node;
433 
434   Node* x = assembler->Parameter(1);
435   Node* y = assembler->Parameter(2);
436   Node* context = assembler->Parameter(5);
437   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
438   Node* y_value = assembler->TruncateTaggedToFloat64(context, y);
439   Node* value = assembler->Float64Pow(x_value, y_value);
440   Node* result = assembler->ChangeFloat64ToTagged(value);
441   assembler->Return(result);
442 }
443 
444 // ES6 section 20.2.2.27 Math.random ( )
Generate_MathRandom(CodeStubAssembler * assembler)445 void Builtins::Generate_MathRandom(CodeStubAssembler* assembler) {
446   using compiler::Node;
447 
448   Node* context = assembler->Parameter(3);
449   Node* native_context = assembler->LoadNativeContext(context);
450 
451   // Load cache index.
452   CodeStubAssembler::Variable smi_index(assembler,
453                                         MachineRepresentation::kTagged);
454   smi_index.Bind(assembler->LoadContextElement(
455       native_context, Context::MATH_RANDOM_INDEX_INDEX));
456 
457   // Cached random numbers are exhausted if index is 0. Go to slow path.
458   CodeStubAssembler::Label if_cached(assembler);
459   assembler->GotoIf(assembler->SmiAbove(smi_index.value(),
460                                         assembler->SmiConstant(Smi::kZero)),
461                     &if_cached);
462 
463   // Cache exhausted, populate the cache. Return value is the new index.
464   smi_index.Bind(
465       assembler->CallRuntime(Runtime::kGenerateRandomNumbers, context));
466   assembler->Goto(&if_cached);
467 
468   // Compute next index by decrement.
469   assembler->Bind(&if_cached);
470   Node* new_smi_index = assembler->SmiSub(
471       smi_index.value(), assembler->SmiConstant(Smi::FromInt(1)));
472   assembler->StoreContextElement(
473       native_context, Context::MATH_RANDOM_INDEX_INDEX, new_smi_index);
474 
475   // Load and return next cached random number.
476   Node* array = assembler->LoadContextElement(native_context,
477                                               Context::MATH_RANDOM_CACHE_INDEX);
478   Node* random = assembler->LoadFixedDoubleArrayElement(
479       array, new_smi_index, MachineType::Float64(), 0,
480       CodeStubAssembler::SMI_PARAMETERS);
481   assembler->Return(assembler->AllocateHeapNumberWithValue(random));
482 }
483 
484 // ES6 section 20.2.2.28 Math.round ( x )
Generate_MathRound(CodeStubAssembler * assembler)485 void Builtins::Generate_MathRound(CodeStubAssembler* assembler) {
486   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Round);
487 }
488 
489 // ES6 section 20.2.2.29 Math.sign ( x )
Generate_MathSign(CodeStubAssembler * assembler)490 void Builtins::Generate_MathSign(CodeStubAssembler* assembler) {
491   typedef CodeStubAssembler::Label Label;
492   using compiler::Node;
493 
494   // Convert the {x} value to a Number.
495   Node* x = assembler->Parameter(1);
496   Node* context = assembler->Parameter(4);
497   Node* x_value = assembler->TruncateTaggedToFloat64(context, x);
498 
499   // Return -1 if {x} is negative, 1 if {x} is positive, or {x} itself.
500   Label if_xisnegative(assembler), if_xispositive(assembler);
501   assembler->GotoIf(
502       assembler->Float64LessThan(x_value, assembler->Float64Constant(0.0)),
503       &if_xisnegative);
504   assembler->GotoIf(
505       assembler->Float64LessThan(assembler->Float64Constant(0.0), x_value),
506       &if_xispositive);
507   assembler->Return(assembler->ChangeFloat64ToTagged(x_value));
508 
509   assembler->Bind(&if_xisnegative);
510   assembler->Return(assembler->SmiConstant(Smi::FromInt(-1)));
511 
512   assembler->Bind(&if_xispositive);
513   assembler->Return(assembler->SmiConstant(Smi::FromInt(1)));
514 }
515 
516 // ES6 section 20.2.2.30 Math.sin ( x )
Generate_MathSin(CodeStubAssembler * assembler)517 void Builtins::Generate_MathSin(CodeStubAssembler* assembler) {
518   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sin);
519 }
520 
521 // ES6 section 20.2.2.31 Math.sinh ( x )
Generate_MathSinh(CodeStubAssembler * assembler)522 void Builtins::Generate_MathSinh(CodeStubAssembler* assembler) {
523   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sinh);
524 }
525 
526 // ES6 section 20.2.2.32 Math.sqrt ( x )
Generate_MathSqrt(CodeStubAssembler * assembler)527 void Builtins::Generate_MathSqrt(CodeStubAssembler* assembler) {
528   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Sqrt);
529 }
530 
531 // ES6 section 20.2.2.33 Math.tan ( x )
Generate_MathTan(CodeStubAssembler * assembler)532 void Builtins::Generate_MathTan(CodeStubAssembler* assembler) {
533   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tan);
534 }
535 
536 // ES6 section 20.2.2.34 Math.tanh ( x )
Generate_MathTanh(CodeStubAssembler * assembler)537 void Builtins::Generate_MathTanh(CodeStubAssembler* assembler) {
538   Generate_MathUnaryOperation(assembler, &CodeStubAssembler::Float64Tanh);
539 }
540 
541 // ES6 section 20.2.2.35 Math.trunc ( x )
Generate_MathTrunc(CodeStubAssembler * assembler)542 void Builtins::Generate_MathTrunc(CodeStubAssembler* assembler) {
543   Generate_MathRoundingOperation(assembler, &CodeStubAssembler::Float64Trunc);
544 }
545 
Generate_MathMax(MacroAssembler * masm)546 void Builtins::Generate_MathMax(MacroAssembler* masm) {
547   Generate_MathMaxMin(masm, MathMaxMinKind::kMax);
548 }
549 
Generate_MathMin(MacroAssembler * masm)550 void Builtins::Generate_MathMin(MacroAssembler* masm) {
551   Generate_MathMaxMin(masm, MathMaxMinKind::kMin);
552 }
553 
554 }  // namespace internal
555 }  // namespace v8
556