1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_ARM
6 
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19 
20 #define __ ACCESS_MASM(masm)
21 
22 
GenerateGlobalInstanceTypeCheck(MacroAssembler * masm,Register type,Label * global_object)23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24                                             Label* global_object) {
25   // Register usage:
26   //   type: holds the receiver instance type on entry.
27   __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
28   __ b(eq, global_object);
29   __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
30   __ b(eq, global_object);
31 }
32 
33 
34 // Helper function used from LoadIC GenerateNormal.
35 //
36 // elements: Property dictionary. It is not clobbered if a jump to the miss
37 //           label is done.
38 // name:     Property name. It is not clobbered if a jump to the miss label is
39 //           done
40 // result:   Register for the result. It is only updated if a jump to the miss
41 //           label is not done. Can be the same as elements or name clobbering
42 //           one of these in the case of not jumping to the miss label.
43 // The two scratch registers need to be different from elements, name and
44 // result.
45 // The generated code assumes that the receiver has slow properties,
46 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)47 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48                                    Register elements, Register name,
49                                    Register result, Register scratch1,
50                                    Register scratch2) {
51   // Main use of the scratch registers.
52   // scratch1: Used as temporary and to hold the capacity of the property
53   //           dictionary.
54   // scratch2: Used as temporary.
55   Label done;
56 
57   // Probe the dictionary.
58   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59                                                    name, scratch1, scratch2);
60 
61   // If probing finds an entry check that the value is a normal
62   // property.
63   __ bind(&done);  // scratch2 == elements + 4 * index
64   const int kElementsStartOffset =
65       NameDictionary::kHeaderSize +
66       NameDictionary::kElementsStartIndex * kPointerSize;
67   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68   __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69   __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
70   __ b(ne, miss);
71 
72   // Get the value at the masked, scaled index and return.
73   __ ldr(result,
74          FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
75 }
76 
77 
78 // Helper function used from StoreIC::GenerateNormal.
79 //
80 // elements: Property dictionary. It is not clobbered if a jump to the miss
81 //           label is done.
82 // name:     Property name. It is not clobbered if a jump to the miss label is
83 //           done
84 // value:    The value to store.
85 // The two scratch registers need to be different from elements, name and
86 // result.
87 // The generated code assumes that the receiver has slow properties,
88 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)89 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
90                                     Register elements, Register name,
91                                     Register value, Register scratch1,
92                                     Register scratch2) {
93   // Main use of the scratch registers.
94   // scratch1: Used as temporary and to hold the capacity of the property
95   //           dictionary.
96   // scratch2: Used as temporary.
97   Label done;
98 
99   // Probe the dictionary.
100   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
101                                                    name, scratch1, scratch2);
102 
103   // If probing finds an entry in the dictionary check that the value
104   // is a normal property that is not read only.
105   __ bind(&done);  // scratch2 == elements + 4 * index
106   const int kElementsStartOffset =
107       NameDictionary::kHeaderSize +
108       NameDictionary::kElementsStartIndex * kPointerSize;
109   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
110   const int kTypeAndReadOnlyMask =
111       (PropertyDetails::TypeField::kMask |
112        PropertyDetails::AttributesField::encode(READ_ONLY))
113       << kSmiTagSize;
114   __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
115   __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
116   __ b(ne, miss);
117 
118   // Store the value at the masked, scaled index and return.
119   const int kValueOffset = kElementsStartOffset + kPointerSize;
120   __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
121   __ str(value, MemOperand(scratch2));
122 
123   // Update the write barrier. Make sure not to clobber the value.
124   __ mov(scratch1, value);
125   __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
126                  kDontSaveFPRegs);
127 }
128 
129 
130 // Checks the receiver for special cases (value type, slow case bits).
131 // Falls through for regular JS object.
GenerateKeyedLoadReceiverCheck(MacroAssembler * masm,Register receiver,Register map,Register scratch,int interceptor_bit,Label * slow)132 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
133                                            Register receiver, Register map,
134                                            Register scratch,
135                                            int interceptor_bit, Label* slow) {
136   // Check that the object isn't a smi.
137   __ JumpIfSmi(receiver, slow);
138   // Get the map of the receiver.
139   __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
140   // Check bit field.
141   __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
142   __ tst(scratch,
143          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
144   __ b(ne, slow);
145   // Check that the object is some kind of JS object EXCEPT JS Value type.
146   // In the case that the object is a value-wrapper object,
147   // we enter the runtime system to make sure that indexing into string
148   // objects work as intended.
149   DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
150   __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
151   __ cmp(scratch, Operand(JS_OBJECT_TYPE));
152   __ b(lt, slow);
153 }
154 
155 
156 // Loads an indexed element from a fast case array.
GenerateFastArrayLoad(MacroAssembler * masm,Register receiver,Register key,Register elements,Register scratch1,Register scratch2,Register result,Label * slow,LanguageMode language_mode)157 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
158                                   Register key, Register elements,
159                                   Register scratch1, Register scratch2,
160                                   Register result, Label* slow,
161                                   LanguageMode language_mode) {
162   // Register use:
163   //
164   // receiver - holds the receiver on entry.
165   //            Unchanged unless 'result' is the same register.
166   //
167   // key      - holds the smi key on entry.
168   //            Unchanged unless 'result' is the same register.
169   //
170   // result   - holds the result on exit if the load succeeded.
171   //            Allowed to be the the same as 'receiver' or 'key'.
172   //            Unchanged on bailout so 'receiver' and 'key' can be safely
173   //            used by further computation.
174   //
175   // Scratch registers:
176   //
177   // elements - holds the elements of the receiver and its prototypes.
178   //
179   // scratch1 - used to hold elements length, bit fields, base addresses.
180   //
181   // scratch2 - used to hold maps, prototypes, and the loaded value.
182   Label check_prototypes, check_next_prototype;
183   Label done, in_bounds, absent;
184 
185   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
186   __ AssertFastElements(elements);
187 
188   // Check that the key (index) is within bounds.
189   __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
190   __ cmp(key, Operand(scratch1));
191   __ b(lo, &in_bounds);
192   // Out-of-bounds. Check the prototype chain to see if we can just return
193   // 'undefined'.
194   __ cmp(key, Operand(0));
195   __ b(lt, slow);  // Negative keys can't take the fast OOB path.
196   __ bind(&check_prototypes);
197   __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
198   __ bind(&check_next_prototype);
199   __ ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
200   // scratch2: current prototype
201   __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
202   __ b(eq, &absent);
203   __ ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
204   __ ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
205   // elements: elements of current prototype
206   // scratch2: map of current prototype
207   __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
208   __ b(lo, slow);
209   __ ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
210   __ tst(scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
211                            (1 << Map::kHasIndexedInterceptor)));
212   __ b(ne, slow);
213   __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
214   __ b(ne, slow);
215   __ jmp(&check_next_prototype);
216 
217   __ bind(&absent);
218   if (is_strong(language_mode)) {
219     // Strong mode accesses must throw in this case, so call the runtime.
220     __ jmp(slow);
221   } else {
222     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
223     __ jmp(&done);
224   }
225 
226   __ bind(&in_bounds);
227   // Fast case: Do the load.
228   __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
229   __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
230   __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
231   // In case the loaded value is the_hole we have to check the prototype chain.
232   __ b(eq, &check_prototypes);
233   __ mov(result, scratch2);
234   __ bind(&done);
235 }
236 
237 
238 // Checks whether a key is an array index string or a unique name.
239 // Falls through if a key is a unique name.
GenerateKeyNameCheck(MacroAssembler * masm,Register key,Register map,Register hash,Label * index_string,Label * not_unique)240 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
241                                  Register map, Register hash,
242                                  Label* index_string, Label* not_unique) {
243   // The key is not a smi.
244   Label unique;
245   // Is it a name?
246   __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
247   __ b(hi, not_unique);
248   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
249   __ b(eq, &unique);
250 
251   // Is the string an array index, with cached numeric value?
252   __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
253   __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
254   __ b(eq, index_string);
255 
256   // Is the string internalized? We know it's a string, so a single
257   // bit test is enough.
258   // map: key map
259   __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
260   STATIC_ASSERT(kInternalizedTag == 0);
261   __ tst(hash, Operand(kIsNotInternalizedMask));
262   __ b(ne, not_unique);
263 
264   __ bind(&unique);
265 }
266 
267 
GenerateNormal(MacroAssembler * masm,LanguageMode language_mode)268 void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
269   Register dictionary = r0;
270   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
271   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
272 
273   Label slow;
274 
275   __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
276                                      JSObject::kPropertiesOffset));
277   GenerateDictionaryLoad(masm, &slow, dictionary,
278                          LoadDescriptor::NameRegister(), r0, r3, r4);
279   __ Ret();
280 
281   // Dictionary load failed, go slow (but don't miss).
282   __ bind(&slow);
283   GenerateRuntimeGetProperty(masm, language_mode);
284 }
285 
286 
287 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()288 static const Register LoadIC_TempRegister() { return r3; }
289 
290 
LoadIC_PushArgs(MacroAssembler * masm)291 static void LoadIC_PushArgs(MacroAssembler* masm) {
292   Register receiver = LoadDescriptor::ReceiverRegister();
293   Register name = LoadDescriptor::NameRegister();
294   Register slot = LoadDescriptor::SlotRegister();
295   Register vector = LoadWithVectorDescriptor::VectorRegister();
296 
297   __ Push(receiver, name, slot, vector);
298 }
299 
300 
GenerateMiss(MacroAssembler * masm)301 void LoadIC::GenerateMiss(MacroAssembler* masm) {
302   // The return address is in lr.
303   Isolate* isolate = masm->isolate();
304 
305   DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
306                      LoadWithVectorDescriptor::VectorRegister()));
307   __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
308 
309   LoadIC_PushArgs(masm);
310 
311   // Perform tail call to the entry.
312   __ TailCallRuntime(Runtime::kLoadIC_Miss);
313 }
314 
315 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)316 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
317                                         LanguageMode language_mode) {
318   // The return address is in lr.
319 
320   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
321   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
322 
323   // Do tail-call to runtime routine.
324   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
325                                               : Runtime::kGetProperty);
326 }
327 
328 
GenerateMiss(MacroAssembler * masm)329 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
330   // The return address is in lr.
331   Isolate* isolate = masm->isolate();
332 
333   DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
334                      LoadWithVectorDescriptor::VectorRegister()));
335   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
336 
337   LoadIC_PushArgs(masm);
338 
339   // Perform tail call to the entry.
340   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
341 }
342 
343 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)344 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
345                                              LanguageMode language_mode) {
346   // The return address is in lr.
347 
348   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
349 
350   // Perform tail call to the entry.
351   // Do tail-call to runtime routine.
352   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
353                                               : Runtime::kKeyedGetProperty);
354 }
355 
356 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)357 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
358                                       LanguageMode language_mode) {
359   // The return address is in lr.
360   Label slow, check_name, index_smi, index_name, property_array_property;
361   Label probe_dictionary, check_number_dictionary;
362 
363   Register key = LoadDescriptor::NameRegister();
364   Register receiver = LoadDescriptor::ReceiverRegister();
365   DCHECK(key.is(r2));
366   DCHECK(receiver.is(r1));
367 
368   Isolate* isolate = masm->isolate();
369 
370   // Check that the key is a smi.
371   __ JumpIfNotSmi(key, &check_name);
372   __ bind(&index_smi);
373   // Now the key is known to be a smi. This place is also jumped to from below
374   // where a numeric string is converted to a smi.
375 
376   GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
377                                  Map::kHasIndexedInterceptor, &slow);
378 
379   // Check the receiver's map to see if it has fast elements.
380   __ CheckFastElements(r0, r3, &check_number_dictionary);
381 
382   GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, &slow,
383                         language_mode);
384   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
385   __ Ret();
386 
387   __ bind(&check_number_dictionary);
388   __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
389   __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
390 
391   // Check whether the elements is a number dictionary.
392   // r3: elements map
393   // r4: elements
394   __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
395   __ cmp(r3, ip);
396   __ b(ne, &slow);
397   __ SmiUntag(r0, key);
398   __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
399   __ Ret();
400 
401   // Slow case, key and receiver still in r2 and r1.
402   __ bind(&slow);
403   __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
404                       r3);
405   GenerateRuntimeGetProperty(masm, language_mode);
406 
407   __ bind(&check_name);
408   GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
409 
410   GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
411                                  Map::kHasNamedInterceptor, &slow);
412 
413   // If the receiver is a fast-case object, check the stub cache. Otherwise
414   // probe the dictionary.
415   __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
416   __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
417   __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
418   __ cmp(r4, ip);
419   __ b(eq, &probe_dictionary);
420 
421   // The handlers in the stub cache expect a vector and slot. Since we won't
422   // change the IC from any downstream misses, a dummy vector can be used.
423   Register vector = LoadWithVectorDescriptor::VectorRegister();
424   Register slot = LoadWithVectorDescriptor::SlotRegister();
425   DCHECK(!AreAliased(vector, slot, r4, r5, r6, r9));
426   Handle<TypeFeedbackVector> dummy_vector =
427       TypeFeedbackVector::DummyVector(masm->isolate());
428   int slot_index = dummy_vector->GetIndex(
429       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
430   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
431   __ mov(slot, Operand(Smi::FromInt(slot_index)));
432 
433   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
434       Code::ComputeHandlerFlags(Code::LOAD_IC));
435   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
436                                                receiver, key, r4, r5, r6, r9);
437   // Cache miss.
438   GenerateMiss(masm);
439 
440   // Do a quick inline probe of the receiver's dictionary, if it
441   // exists.
442   __ bind(&probe_dictionary);
443   // r3: elements
444   __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
445   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
446   GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
447   // Load the property to r0.
448   GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
449   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
450                       r3);
451   __ Ret();
452 
453   __ bind(&index_name);
454   __ IndexFromHash(r3, key);
455   // Now jump to the place where smi keys are handled.
456   __ jmp(&index_smi);
457 }
458 
459 
StoreIC_PushArgs(MacroAssembler * masm)460 static void StoreIC_PushArgs(MacroAssembler* masm) {
461   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
462           StoreDescriptor::ValueRegister(),
463           VectorStoreICDescriptor::SlotRegister(),
464           VectorStoreICDescriptor::VectorRegister());
465 }
466 
467 
GenerateMiss(MacroAssembler * masm)468 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
469   StoreIC_PushArgs(masm);
470 
471   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
472 }
473 
474 
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)475 static void KeyedStoreGenerateMegamorphicHelper(
476     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
477     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
478     Register value, Register key, Register receiver, Register receiver_map,
479     Register elements_map, Register elements) {
480   Label transition_smi_elements;
481   Label finish_object_store, non_double_value, transition_double_elements;
482   Label fast_double_without_map_check;
483 
484   // Fast case: Do the store, could be either Object or double.
485   __ bind(fast_object);
486   Register scratch = r4;
487   Register address = r5;
488   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
489                      scratch, address));
490 
491   if (check_map == kCheckMap) {
492     __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
493     __ cmp(elements_map,
494            Operand(masm->isolate()->factory()->fixed_array_map()));
495     __ b(ne, fast_double);
496   }
497 
498   // HOLECHECK: guards "A[i] = V"
499   // We have to go to the runtime if the current value is the hole because
500   // there may be a callback on the element
501   Label holecheck_passed1;
502   __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
503   __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
504   __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
505   __ b(ne, &holecheck_passed1);
506   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
507 
508   __ bind(&holecheck_passed1);
509 
510   // Smi stores don't require further checks.
511   Label non_smi_value;
512   __ JumpIfNotSmi(value, &non_smi_value);
513 
514   if (increment_length == kIncrementLength) {
515     // Add 1 to receiver->length.
516     __ add(scratch, key, Operand(Smi::FromInt(1)));
517     __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
518   }
519   // It's irrelevant whether array is smi-only or not when writing a smi.
520   __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
521   __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
522   __ Ret();
523 
524   __ bind(&non_smi_value);
525   // Escape to elements kind transition case.
526   __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
527 
528   // Fast elements array, store the value to the elements backing store.
529   __ bind(&finish_object_store);
530   if (increment_length == kIncrementLength) {
531     // Add 1 to receiver->length.
532     __ add(scratch, key, Operand(Smi::FromInt(1)));
533     __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
534   }
535   __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
536   __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
537   __ str(value, MemOperand(address));
538   // Update write barrier for the elements array address.
539   __ mov(scratch, value);  // Preserve the value which is returned.
540   __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
541                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
542   __ Ret();
543 
544   __ bind(fast_double);
545   if (check_map == kCheckMap) {
546     // Check for fast double array case. If this fails, call through to the
547     // runtime.
548     __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
549     __ b(ne, slow);
550   }
551 
552   // HOLECHECK: guards "A[i] double hole?"
553   // We have to see if the double version of the hole is present. If so
554   // go to the runtime.
555   __ add(address, elements,
556          Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
557                  kHeapObjectTag));
558   __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
559   __ cmp(scratch, Operand(kHoleNanUpper32));
560   __ b(ne, &fast_double_without_map_check);
561   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
562 
563   __ bind(&fast_double_without_map_check);
564   __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
565                                  &transition_double_elements);
566   if (increment_length == kIncrementLength) {
567     // Add 1 to receiver->length.
568     __ add(scratch, key, Operand(Smi::FromInt(1)));
569     __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
570   }
571   __ Ret();
572 
573   __ bind(&transition_smi_elements);
574   // Transition the array appropriately depending on the value type.
575   __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
576   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
577   __ b(ne, &non_double_value);
578 
579   // Value is a double. Transition FAST_SMI_ELEMENTS ->
580   // FAST_DOUBLE_ELEMENTS and complete the store.
581   __ LoadTransitionedArrayMapConditional(
582       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
583   AllocationSiteMode mode =
584       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
585   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
586                                                    receiver_map, mode, slow);
587   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
588   __ jmp(&fast_double_without_map_check);
589 
590   __ bind(&non_double_value);
591   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
592   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
593                                          receiver_map, scratch, slow);
594   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
595   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
596       masm, receiver, key, value, receiver_map, mode, slow);
597   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
598   __ jmp(&finish_object_store);
599 
600   __ bind(&transition_double_elements);
601   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
602   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
603   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
604   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
605                                          receiver_map, scratch, slow);
606   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
607   ElementsTransitionGenerator::GenerateDoubleToObject(
608       masm, receiver, key, value, receiver_map, mode, slow);
609   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
610   __ jmp(&finish_object_store);
611 }
612 
613 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)614 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
615                                        LanguageMode language_mode) {
616   // ---------- S t a t e --------------
617   //  -- r0     : value
618   //  -- r1     : key
619   //  -- r2     : receiver
620   //  -- lr     : return address
621   // -----------------------------------
622   Label slow, fast_object, fast_object_grow;
623   Label fast_double, fast_double_grow;
624   Label array, extra, check_if_double_array, maybe_name_key, miss;
625 
626   // Register usage.
627   Register value = StoreDescriptor::ValueRegister();
628   Register key = StoreDescriptor::NameRegister();
629   Register receiver = StoreDescriptor::ReceiverRegister();
630   DCHECK(receiver.is(r1));
631   DCHECK(key.is(r2));
632   DCHECK(value.is(r0));
633   Register receiver_map = r3;
634   Register elements_map = r6;
635   Register elements = r9;  // Elements array of the receiver.
636   // r4 and r5 are used as general scratch registers.
637 
638   // Check that the key is a smi.
639   __ JumpIfNotSmi(key, &maybe_name_key);
640   // Check that the object isn't a smi.
641   __ JumpIfSmi(receiver, &slow);
642   // Get the map of the object.
643   __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
644   // Check that the receiver does not require access checks and is not observed.
645   // The generic stub does not perform map checks or handle observed objects.
646   __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
647   __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
648   __ b(ne, &slow);
649   // Check if the object is a JS array or not.
650   __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
651   __ cmp(r4, Operand(JS_ARRAY_TYPE));
652   __ b(eq, &array);
653   // Check that the object is some kind of JS object EXCEPT JS Value type. In
654   // the case that the object is a value-wrapper object, we enter the runtime
655   // system to make sure that indexing into string objects works as intended.
656   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
657   __ cmp(r4, Operand(JS_OBJECT_TYPE));
658   __ b(lo, &slow);
659 
660   // Object case: Check key against length in the elements array.
661   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
662   // Check array bounds. Both the key and the length of FixedArray are smis.
663   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
664   __ cmp(key, Operand(ip));
665   __ b(lo, &fast_object);
666 
667   // Slow case, handle jump to runtime.
668   __ bind(&slow);
669   // Entry registers are intact.
670   // r0: value.
671   // r1: key.
672   // r2: receiver.
673   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
674   // Never returns to here.
675 
676   __ bind(&maybe_name_key);
677   __ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
678   __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
679   __ JumpIfNotUniqueNameInstanceType(r4, &slow);
680 
681   // We use register r8, because otherwise probing the megamorphic stub cache
682   // would require pushing temporaries on the stack.
683   // TODO(mvstanton): quit using register r8 when
684   // FLAG_enable_embedded_constant_pool is turned on.
685   DCHECK(!FLAG_enable_embedded_constant_pool);
686   Register temporary2 = r8;
687   // The handlers in the stub cache expect a vector and slot. Since we won't
688   // change the IC from any downstream misses, a dummy vector can be used.
689   Register vector = VectorStoreICDescriptor::VectorRegister();
690   Register slot = VectorStoreICDescriptor::SlotRegister();
691 
692   DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
693   Handle<TypeFeedbackVector> dummy_vector =
694       TypeFeedbackVector::DummyVector(masm->isolate());
695   int slot_index = dummy_vector->GetIndex(
696       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
697   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
698   __ mov(slot, Operand(Smi::FromInt(slot_index)));
699 
700   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
701       Code::ComputeHandlerFlags(Code::STORE_IC));
702   masm->isolate()->stub_cache()->GenerateProbe(
703       masm, Code::STORE_IC, flags, receiver, key, r5, temporary2, r6, r9);
704   // Cache miss.
705   __ b(&miss);
706 
707   // Extra capacity case: Check if there is extra capacity to
708   // perform the store and update the length. Used for adding one
709   // element to the array by writing to array[array.length].
710   __ bind(&extra);
711   // Condition code from comparing key and array length is still available.
712   __ b(ne, &slow);  // Only support writing to writing to array[array.length].
713   // Check for room in the elements backing store.
714   // Both the key and the length of FixedArray are smis.
715   __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
716   __ cmp(key, Operand(ip));
717   __ b(hs, &slow);
718   __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
719   __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
720   __ b(ne, &check_if_double_array);
721   __ jmp(&fast_object_grow);
722 
723   __ bind(&check_if_double_array);
724   __ cmp(elements_map,
725          Operand(masm->isolate()->factory()->fixed_double_array_map()));
726   __ b(ne, &slow);
727   __ jmp(&fast_double_grow);
728 
729   // Array case: Get the length and the elements array from the JS
730   // array. Check that the array is in fast mode (and writable); if it
731   // is the length is always a smi.
732   __ bind(&array);
733   __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
734 
735   // Check the key against the length in the array.
736   __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
737   __ cmp(key, Operand(ip));
738   __ b(hs, &extra);
739 
740   KeyedStoreGenerateMegamorphicHelper(
741       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
742       value, key, receiver, receiver_map, elements_map, elements);
743   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
744                                       &fast_double_grow, &slow, kDontCheckMap,
745                                       kIncrementLength, value, key, receiver,
746                                       receiver_map, elements_map, elements);
747 
748   __ bind(&miss);
749   GenerateMiss(masm);
750 }
751 
752 
GenerateMegamorphic(MacroAssembler * masm)753 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
754   Register receiver = StoreDescriptor::ReceiverRegister();
755   Register name = StoreDescriptor::NameRegister();
756   DCHECK(receiver.is(r1));
757   DCHECK(name.is(r2));
758   DCHECK(StoreDescriptor::ValueRegister().is(r0));
759 
760   // Get the receiver from the stack and probe the stub cache.
761   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
762       Code::ComputeHandlerFlags(Code::STORE_IC));
763 
764   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
765                                                receiver, name, r3, r4, r5, r6);
766 
767   // Cache miss: Jump to runtime.
768   GenerateMiss(masm);
769 }
770 
771 
GenerateMiss(MacroAssembler * masm)772 void StoreIC::GenerateMiss(MacroAssembler* masm) {
773   StoreIC_PushArgs(masm);
774 
775   // Perform tail call to the entry.
776   __ TailCallRuntime(Runtime::kStoreIC_Miss);
777 }
778 
779 
GenerateNormal(MacroAssembler * masm)780 void StoreIC::GenerateNormal(MacroAssembler* masm) {
781   Label miss;
782   Register receiver = StoreDescriptor::ReceiverRegister();
783   Register name = StoreDescriptor::NameRegister();
784   Register value = StoreDescriptor::ValueRegister();
785   Register dictionary = r5;
786   DCHECK(receiver.is(r1));
787   DCHECK(name.is(r2));
788   DCHECK(value.is(r0));
789   DCHECK(VectorStoreICDescriptor::VectorRegister().is(r3));
790   DCHECK(VectorStoreICDescriptor::SlotRegister().is(r4));
791 
792   __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
793 
794   GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
795   Counters* counters = masm->isolate()->counters();
796   __ IncrementCounter(counters->store_normal_hit(), 1, r6, r9);
797   __ Ret();
798 
799   __ bind(&miss);
800   __ IncrementCounter(counters->store_normal_miss(), 1, r6, r9);
801   GenerateMiss(masm);
802 }
803 
804 
805 #undef __
806 
807 
ComputeCondition(Token::Value op)808 Condition CompareIC::ComputeCondition(Token::Value op) {
809   switch (op) {
810     case Token::EQ_STRICT:
811     case Token::EQ:
812       return eq;
813     case Token::LT:
814       return lt;
815     case Token::GT:
816       return gt;
817     case Token::LTE:
818       return le;
819     case Token::GTE:
820       return ge;
821     default:
822       UNREACHABLE();
823       return kNoCondition;
824   }
825 }
826 
827 
HasInlinedSmiCode(Address address)828 bool CompareIC::HasInlinedSmiCode(Address address) {
829   // The address of the instruction following the call.
830   Address cmp_instruction_address =
831       Assembler::return_address_from_call_start(address);
832 
833   // If the instruction following the call is not a cmp rx, #yyy, nothing
834   // was inlined.
835   Instr instr = Assembler::instr_at(cmp_instruction_address);
836   return Assembler::IsCmpImmediate(instr);
837 }
838 
839 
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)840 void PatchInlinedSmiCode(Isolate* isolate, Address address,
841                          InlinedSmiCheck check) {
842   Address cmp_instruction_address =
843       Assembler::return_address_from_call_start(address);
844 
845   // If the instruction following the call is not a cmp rx, #yyy, nothing
846   // was inlined.
847   Instr instr = Assembler::instr_at(cmp_instruction_address);
848   if (!Assembler::IsCmpImmediate(instr)) {
849     return;
850   }
851 
852   // The delta to the start of the map check instruction and the
853   // condition code uses at the patched jump.
854   int delta = Assembler::GetCmpImmediateRawImmediate(instr);
855   delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
856   // If the delta is 0 the instruction is cmp r0, #0 which also signals that
857   // nothing was inlined.
858   if (delta == 0) {
859     return;
860   }
861 
862   if (FLAG_trace_ic) {
863     PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
864            cmp_instruction_address, delta);
865   }
866 
867   Address patch_address =
868       cmp_instruction_address - delta * Instruction::kInstrSize;
869   Instr instr_at_patch = Assembler::instr_at(patch_address);
870   Instr branch_instr =
871       Assembler::instr_at(patch_address + Instruction::kInstrSize);
872   // This is patching a conditional "jump if not smi/jump if smi" site.
873   // Enabling by changing from
874   //   cmp rx, rx
875   //   b eq/ne, <target>
876   // to
877   //   tst rx, #kSmiTagMask
878   //   b ne/eq, <target>
879   // and vice-versa to be disabled again.
880   CodePatcher patcher(isolate, patch_address, 2);
881   Register reg = Assembler::GetRn(instr_at_patch);
882   if (check == ENABLE_INLINED_SMI_CHECK) {
883     DCHECK(Assembler::IsCmpRegister(instr_at_patch));
884     DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
885               Assembler::GetRm(instr_at_patch).code());
886     patcher.masm()->tst(reg, Operand(kSmiTagMask));
887   } else {
888     DCHECK(check == DISABLE_INLINED_SMI_CHECK);
889     DCHECK(Assembler::IsTstImmediate(instr_at_patch));
890     patcher.masm()->cmp(reg, reg);
891   }
892   DCHECK(Assembler::IsBranch(branch_instr));
893   if (Assembler::GetCondition(branch_instr) == eq) {
894     patcher.EmitCondition(ne);
895   } else {
896     DCHECK(Assembler::GetCondition(branch_instr) == ne);
897     patcher.EmitCondition(eq);
898   }
899 }
900 }  // namespace internal
901 }  // namespace v8
902 
903 #endif  // V8_TARGET_ARCH_ARM
904