1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_ARM
6
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19
20 #define __ ACCESS_MASM(masm)
21
22 // Helper function used from LoadIC GenerateNormal.
23 //
24 // elements: Property dictionary. It is not clobbered if a jump to the miss
25 // label is done.
26 // name: Property name. It is not clobbered if a jump to the miss label is
27 // done
28 // result: Register for the result. It is only updated if a jump to the miss
29 // label is not done. Can be the same as elements or name clobbering
30 // one of these in the case of not jumping to the miss label.
31 // The two scratch registers need to be different from elements, name and
32 // result.
33 // The generated code assumes that the receiver has slow properties,
34 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)35 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
36 Register elements, Register name,
37 Register result, Register scratch1,
38 Register scratch2) {
39 // Main use of the scratch registers.
40 // scratch1: Used as temporary and to hold the capacity of the property
41 // dictionary.
42 // scratch2: Used as temporary.
43 Label done;
44
45 // Probe the dictionary.
46 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
47 name, scratch1, scratch2);
48
49 // If probing finds an entry check that the value is a normal
50 // property.
51 __ bind(&done); // scratch2 == elements + 4 * index
52 const int kElementsStartOffset =
53 NameDictionary::kHeaderSize +
54 NameDictionary::kElementsStartIndex * kPointerSize;
55 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
56 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
57 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
58 __ b(ne, miss);
59
60 // Get the value at the masked, scaled index and return.
61 __ ldr(result,
62 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
63 }
64
65
66 // Helper function used from StoreIC::GenerateNormal.
67 //
68 // elements: Property dictionary. It is not clobbered if a jump to the miss
69 // label is done.
70 // name: Property name. It is not clobbered if a jump to the miss label is
71 // done
72 // value: The value to store.
73 // The two scratch registers need to be different from elements, name and
74 // result.
75 // The generated code assumes that the receiver has slow properties,
76 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)77 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
78 Register elements, Register name,
79 Register value, Register scratch1,
80 Register scratch2) {
81 // Main use of the scratch registers.
82 // scratch1: Used as temporary and to hold the capacity of the property
83 // dictionary.
84 // scratch2: Used as temporary.
85 Label done;
86
87 // Probe the dictionary.
88 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
89 name, scratch1, scratch2);
90
91 // If probing finds an entry in the dictionary check that the value
92 // is a normal property that is not read only.
93 __ bind(&done); // scratch2 == elements + 4 * index
94 const int kElementsStartOffset =
95 NameDictionary::kHeaderSize +
96 NameDictionary::kElementsStartIndex * kPointerSize;
97 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
98 const int kTypeAndReadOnlyMask =
99 (PropertyDetails::TypeField::kMask |
100 PropertyDetails::AttributesField::encode(READ_ONLY))
101 << kSmiTagSize;
102 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
103 __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
104 __ b(ne, miss);
105
106 // Store the value at the masked, scaled index and return.
107 const int kValueOffset = kElementsStartOffset + kPointerSize;
108 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
109 __ str(value, MemOperand(scratch2));
110
111 // Update the write barrier. Make sure not to clobber the value.
112 __ mov(scratch1, value);
113 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
114 kDontSaveFPRegs);
115 }
116
GenerateNormal(MacroAssembler * masm)117 void LoadIC::GenerateNormal(MacroAssembler* masm) {
118 Register dictionary = r0;
119 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
120 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
121
122 Label slow;
123
124 __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
125 JSObject::kPropertiesOffset));
126 GenerateDictionaryLoad(masm, &slow, dictionary,
127 LoadDescriptor::NameRegister(), r0, r3, r4);
128 __ Ret();
129
130 // Dictionary load failed, go slow (but don't miss).
131 __ bind(&slow);
132 GenerateRuntimeGetProperty(masm);
133 }
134
135
136 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()137 static const Register LoadIC_TempRegister() { return r3; }
138
139
LoadIC_PushArgs(MacroAssembler * masm)140 static void LoadIC_PushArgs(MacroAssembler* masm) {
141 Register receiver = LoadDescriptor::ReceiverRegister();
142 Register name = LoadDescriptor::NameRegister();
143 Register slot = LoadDescriptor::SlotRegister();
144 Register vector = LoadWithVectorDescriptor::VectorRegister();
145
146 __ Push(receiver, name, slot, vector);
147 }
148
149
GenerateMiss(MacroAssembler * masm)150 void LoadIC::GenerateMiss(MacroAssembler* masm) {
151 // The return address is in lr.
152 Isolate* isolate = masm->isolate();
153
154 DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
155 LoadWithVectorDescriptor::VectorRegister()));
156 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r4, r5);
157
158 LoadIC_PushArgs(masm);
159
160 // Perform tail call to the entry.
161 __ TailCallRuntime(Runtime::kLoadIC_Miss);
162 }
163
GenerateRuntimeGetProperty(MacroAssembler * masm)164 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
165 // The return address is in lr.
166
167 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
168 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
169
170 // Do tail-call to runtime routine.
171 __ TailCallRuntime(Runtime::kGetProperty);
172 }
173
174
GenerateMiss(MacroAssembler * masm)175 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
176 // The return address is in lr.
177 Isolate* isolate = masm->isolate();
178
179 DCHECK(!AreAliased(r4, r5, LoadWithVectorDescriptor::SlotRegister(),
180 LoadWithVectorDescriptor::VectorRegister()));
181 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r4, r5);
182
183 LoadIC_PushArgs(masm);
184
185 // Perform tail call to the entry.
186 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
187 }
188
GenerateRuntimeGetProperty(MacroAssembler * masm)189 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
190 // The return address is in lr.
191
192 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
193
194 // Perform tail call to the entry.
195 // Do tail-call to runtime routine.
196 __ TailCallRuntime(Runtime::kKeyedGetProperty);
197 }
198
StoreIC_PushArgs(MacroAssembler * masm)199 static void StoreIC_PushArgs(MacroAssembler* masm) {
200 __ Push(StoreWithVectorDescriptor::ValueRegister(),
201 StoreWithVectorDescriptor::SlotRegister(),
202 StoreWithVectorDescriptor::VectorRegister(),
203 StoreWithVectorDescriptor::ReceiverRegister(),
204 StoreWithVectorDescriptor::NameRegister());
205 }
206
207
GenerateMiss(MacroAssembler * masm)208 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
209 StoreIC_PushArgs(masm);
210
211 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
212 }
213
GenerateSlow(MacroAssembler * masm)214 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
215 StoreIC_PushArgs(masm);
216
217 // The slow case calls into the runtime to complete the store without causing
218 // an IC miss that would otherwise cause a transition to the generic stub.
219 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
220 }
221
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)222 static void KeyedStoreGenerateMegamorphicHelper(
223 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
224 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
225 Register value, Register key, Register receiver, Register receiver_map,
226 Register elements_map, Register elements) {
227 Label transition_smi_elements;
228 Label finish_object_store, non_double_value, transition_double_elements;
229 Label fast_double_without_map_check;
230
231 // Fast case: Do the store, could be either Object or double.
232 __ bind(fast_object);
233 Register scratch = r4;
234 Register address = r5;
235 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
236 scratch, address));
237
238 if (check_map == kCheckMap) {
239 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
240 __ cmp(elements_map,
241 Operand(masm->isolate()->factory()->fixed_array_map()));
242 __ b(ne, fast_double);
243 }
244
245 // HOLECHECK: guards "A[i] = V"
246 // We have to go to the runtime if the current value is the hole because
247 // there may be a callback on the element
248 Label holecheck_passed1;
249 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
250 __ ldr(scratch, MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
251 __ cmp(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
252 __ b(ne, &holecheck_passed1);
253 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
254
255 __ bind(&holecheck_passed1);
256
257 // Smi stores don't require further checks.
258 Label non_smi_value;
259 __ JumpIfNotSmi(value, &non_smi_value);
260
261 if (increment_length == kIncrementLength) {
262 // Add 1 to receiver->length.
263 __ add(scratch, key, Operand(Smi::FromInt(1)));
264 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
265 }
266 // It's irrelevant whether array is smi-only or not when writing a smi.
267 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
268 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
269 __ Ret();
270
271 __ bind(&non_smi_value);
272 // Escape to elements kind transition case.
273 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
274
275 // Fast elements array, store the value to the elements backing store.
276 __ bind(&finish_object_store);
277 if (increment_length == kIncrementLength) {
278 // Add 1 to receiver->length.
279 __ add(scratch, key, Operand(Smi::FromInt(1)));
280 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
281 }
282 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
283 __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
284 __ str(value, MemOperand(address));
285 // Update write barrier for the elements array address.
286 __ mov(scratch, value); // Preserve the value which is returned.
287 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
288 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
289 __ Ret();
290
291 __ bind(fast_double);
292 if (check_map == kCheckMap) {
293 // Check for fast double array case. If this fails, call through to the
294 // runtime.
295 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
296 __ b(ne, slow);
297 }
298
299 // HOLECHECK: guards "A[i] double hole?"
300 // We have to see if the double version of the hole is present. If so
301 // go to the runtime.
302 __ add(address, elements,
303 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
304 kHeapObjectTag));
305 __ ldr(scratch, MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
306 __ cmp(scratch, Operand(kHoleNanUpper32));
307 __ b(ne, &fast_double_without_map_check);
308 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
309
310 __ bind(&fast_double_without_map_check);
311 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
312 &transition_double_elements);
313 if (increment_length == kIncrementLength) {
314 // Add 1 to receiver->length.
315 __ add(scratch, key, Operand(Smi::FromInt(1)));
316 __ str(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
317 }
318 __ Ret();
319
320 __ bind(&transition_smi_elements);
321 // Transition the array appropriately depending on the value type.
322 __ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
323 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
324 __ b(ne, &non_double_value);
325
326 // Value is a double. Transition FAST_SMI_ELEMENTS ->
327 // FAST_DOUBLE_ELEMENTS and complete the store.
328 __ LoadTransitionedArrayMapConditional(
329 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
330 AllocationSiteMode mode =
331 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
332 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
333 receiver_map, mode, slow);
334 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
335 __ jmp(&fast_double_without_map_check);
336
337 __ bind(&non_double_value);
338 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
339 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
340 receiver_map, scratch, slow);
341 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
342 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
343 masm, receiver, key, value, receiver_map, mode, slow);
344 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
345 __ jmp(&finish_object_store);
346
347 __ bind(&transition_double_elements);
348 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
349 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
350 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
351 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
352 receiver_map, scratch, slow);
353 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
354 ElementsTransitionGenerator::GenerateDoubleToObject(
355 masm, receiver, key, value, receiver_map, mode, slow);
356 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
357 __ jmp(&finish_object_store);
358 }
359
360
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)361 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
362 LanguageMode language_mode) {
363 // ---------- S t a t e --------------
364 // -- r0 : value
365 // -- r1 : key
366 // -- r2 : receiver
367 // -- lr : return address
368 // -----------------------------------
369 Label slow, fast_object, fast_object_grow;
370 Label fast_double, fast_double_grow;
371 Label array, extra, check_if_double_array, maybe_name_key, miss;
372
373 // Register usage.
374 Register value = StoreDescriptor::ValueRegister();
375 Register key = StoreDescriptor::NameRegister();
376 Register receiver = StoreDescriptor::ReceiverRegister();
377 DCHECK(receiver.is(r1));
378 DCHECK(key.is(r2));
379 DCHECK(value.is(r0));
380 Register receiver_map = r3;
381 Register elements_map = r6;
382 Register elements = r9; // Elements array of the receiver.
383 // r4 and r5 are used as general scratch registers.
384
385 // Check that the key is a smi.
386 __ JumpIfNotSmi(key, &maybe_name_key);
387 // Check that the object isn't a smi.
388 __ JumpIfSmi(receiver, &slow);
389 // Get the map of the object.
390 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
391 // Check that the receiver does not require access checks.
392 // The generic stub does not perform map checks.
393 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
394 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
395 __ b(ne, &slow);
396 // Check if the object is a JS array or not.
397 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
398 __ cmp(r4, Operand(JS_ARRAY_TYPE));
399 __ b(eq, &array);
400 // Check that the object is some kind of JS object EXCEPT JS Value type. In
401 // the case that the object is a value-wrapper object, we enter the runtime
402 // system to make sure that indexing into string objects works as intended.
403 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
404 __ cmp(r4, Operand(JS_OBJECT_TYPE));
405 __ b(lo, &slow);
406
407 // Object case: Check key against length in the elements array.
408 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
409 // Check array bounds. Both the key and the length of FixedArray are smis.
410 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
411 __ cmp(key, Operand(ip));
412 __ b(lo, &fast_object);
413
414 // Slow case, handle jump to runtime.
415 __ bind(&slow);
416 // Entry registers are intact.
417 // r0: value.
418 // r1: key.
419 // r2: receiver.
420 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
421 // Never returns to here.
422
423 __ bind(&maybe_name_key);
424 __ ldr(r4, FieldMemOperand(key, HeapObject::kMapOffset));
425 __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
426 __ JumpIfNotUniqueNameInstanceType(r4, &slow);
427
428 // We use register r8, because otherwise probing the megamorphic stub cache
429 // would require pushing temporaries on the stack.
430 // TODO(mvstanton): quit using register r8 when
431 // FLAG_enable_embedded_constant_pool is turned on.
432 DCHECK(!FLAG_enable_embedded_constant_pool);
433 Register temporary2 = r8;
434 // The handlers in the stub cache expect a vector and slot. Since we won't
435 // change the IC from any downstream misses, a dummy vector can be used.
436 Register vector = StoreWithVectorDescriptor::VectorRegister();
437 Register slot = StoreWithVectorDescriptor::SlotRegister();
438
439 DCHECK(!AreAliased(vector, slot, r5, temporary2, r6, r9));
440 Handle<TypeFeedbackVector> dummy_vector =
441 TypeFeedbackVector::DummyVector(masm->isolate());
442 int slot_index = dummy_vector->GetIndex(
443 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
444 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
445 __ mov(slot, Operand(Smi::FromInt(slot_index)));
446
447 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r5,
448 temporary2, r6, r9);
449 // Cache miss.
450 __ b(&miss);
451
452 // Extra capacity case: Check if there is extra capacity to
453 // perform the store and update the length. Used for adding one
454 // element to the array by writing to array[array.length].
455 __ bind(&extra);
456 // Condition code from comparing key and array length is still available.
457 __ b(ne, &slow); // Only support writing to writing to array[array.length].
458 // Check for room in the elements backing store.
459 // Both the key and the length of FixedArray are smis.
460 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
461 __ cmp(key, Operand(ip));
462 __ b(hs, &slow);
463 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
464 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
465 __ b(ne, &check_if_double_array);
466 __ jmp(&fast_object_grow);
467
468 __ bind(&check_if_double_array);
469 __ cmp(elements_map,
470 Operand(masm->isolate()->factory()->fixed_double_array_map()));
471 __ b(ne, &slow);
472 __ jmp(&fast_double_grow);
473
474 // Array case: Get the length and the elements array from the JS
475 // array. Check that the array is in fast mode (and writable); if it
476 // is the length is always a smi.
477 __ bind(&array);
478 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
479
480 // Check the key against the length in the array.
481 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
482 __ cmp(key, Operand(ip));
483 __ b(hs, &extra);
484
485 KeyedStoreGenerateMegamorphicHelper(
486 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
487 value, key, receiver, receiver_map, elements_map, elements);
488 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
489 &fast_double_grow, &slow, kDontCheckMap,
490 kIncrementLength, value, key, receiver,
491 receiver_map, elements_map, elements);
492
493 __ bind(&miss);
494 GenerateMiss(masm);
495 }
496
GenerateMiss(MacroAssembler * masm)497 void StoreIC::GenerateMiss(MacroAssembler* masm) {
498 StoreIC_PushArgs(masm);
499
500 // Perform tail call to the entry.
501 __ TailCallRuntime(Runtime::kStoreIC_Miss);
502 }
503
504
GenerateNormal(MacroAssembler * masm)505 void StoreIC::GenerateNormal(MacroAssembler* masm) {
506 Label miss;
507 Register receiver = StoreDescriptor::ReceiverRegister();
508 Register name = StoreDescriptor::NameRegister();
509 Register value = StoreDescriptor::ValueRegister();
510 Register dictionary = r5;
511 DCHECK(receiver.is(r1));
512 DCHECK(name.is(r2));
513 DCHECK(value.is(r0));
514 DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r3));
515 DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r4));
516
517 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
518
519 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r6, r9);
520 Counters* counters = masm->isolate()->counters();
521 __ IncrementCounter(counters->ic_store_normal_hit(), 1, r6, r9);
522 __ Ret();
523
524 __ bind(&miss);
525 __ IncrementCounter(counters->ic_store_normal_miss(), 1, r6, r9);
526 GenerateMiss(masm);
527 }
528
529
530 #undef __
531
532
ComputeCondition(Token::Value op)533 Condition CompareIC::ComputeCondition(Token::Value op) {
534 switch (op) {
535 case Token::EQ_STRICT:
536 case Token::EQ:
537 return eq;
538 case Token::LT:
539 return lt;
540 case Token::GT:
541 return gt;
542 case Token::LTE:
543 return le;
544 case Token::GTE:
545 return ge;
546 default:
547 UNREACHABLE();
548 return kNoCondition;
549 }
550 }
551
552
HasInlinedSmiCode(Address address)553 bool CompareIC::HasInlinedSmiCode(Address address) {
554 // The address of the instruction following the call.
555 Address cmp_instruction_address =
556 Assembler::return_address_from_call_start(address);
557
558 // If the instruction following the call is not a cmp rx, #yyy, nothing
559 // was inlined.
560 Instr instr = Assembler::instr_at(cmp_instruction_address);
561 return Assembler::IsCmpImmediate(instr);
562 }
563
564
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)565 void PatchInlinedSmiCode(Isolate* isolate, Address address,
566 InlinedSmiCheck check) {
567 Address cmp_instruction_address =
568 Assembler::return_address_from_call_start(address);
569
570 // If the instruction following the call is not a cmp rx, #yyy, nothing
571 // was inlined.
572 Instr instr = Assembler::instr_at(cmp_instruction_address);
573 if (!Assembler::IsCmpImmediate(instr)) {
574 return;
575 }
576
577 // The delta to the start of the map check instruction and the
578 // condition code uses at the patched jump.
579 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
580 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
581 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
582 // nothing was inlined.
583 if (delta == 0) {
584 return;
585 }
586
587 if (FLAG_trace_ic) {
588 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
589 static_cast<void*>(address),
590 static_cast<void*>(cmp_instruction_address), delta);
591 }
592
593 Address patch_address =
594 cmp_instruction_address - delta * Instruction::kInstrSize;
595 Instr instr_at_patch = Assembler::instr_at(patch_address);
596 Instr branch_instr =
597 Assembler::instr_at(patch_address + Instruction::kInstrSize);
598 // This is patching a conditional "jump if not smi/jump if smi" site.
599 // Enabling by changing from
600 // cmp rx, rx
601 // b eq/ne, <target>
602 // to
603 // tst rx, #kSmiTagMask
604 // b ne/eq, <target>
605 // and vice-versa to be disabled again.
606 CodePatcher patcher(isolate, patch_address, 2);
607 Register reg = Assembler::GetRn(instr_at_patch);
608 if (check == ENABLE_INLINED_SMI_CHECK) {
609 DCHECK(Assembler::IsCmpRegister(instr_at_patch));
610 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
611 Assembler::GetRm(instr_at_patch).code());
612 patcher.masm()->tst(reg, Operand(kSmiTagMask));
613 } else {
614 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
615 DCHECK(Assembler::IsTstImmediate(instr_at_patch));
616 patcher.masm()->cmp(reg, reg);
617 }
618 DCHECK(Assembler::IsBranch(branch_instr));
619 if (Assembler::GetCondition(branch_instr) == eq) {
620 patcher.EmitCondition(ne);
621 } else {
622 DCHECK(Assembler::GetCondition(branch_instr) == ne);
623 patcher.EmitCondition(eq);
624 }
625 }
626 } // namespace internal
627 } // namespace v8
628
629 #endif // V8_TARGET_ARCH_ARM
630