1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_MIPS64
6
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19
20 #define __ ACCESS_MASM(masm)
21
22 // Helper function used from LoadIC GenerateNormal.
23 //
24 // elements: Property dictionary. It is not clobbered if a jump to the miss
25 // label is done.
26 // name: Property name. It is not clobbered if a jump to the miss label is
27 // done
28 // result: Register for the result. It is only updated if a jump to the miss
29 // label is not done. Can be the same as elements or name clobbering
30 // one of these in the case of not jumping to the miss label.
31 // The two scratch registers need to be different from elements, name and
32 // result.
33 // The generated code assumes that the receiver has slow properties,
34 // is not a global object and does not have interceptors.
35 // The address returned from GenerateStringDictionaryProbes() in scratch2
36 // is used.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)37 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
38 Register elements, Register name,
39 Register result, Register scratch1,
40 Register scratch2) {
41 // Main use of the scratch registers.
42 // scratch1: Used as temporary and to hold the capacity of the property
43 // dictionary.
44 // scratch2: Used as temporary.
45 Label done;
46
47 // Probe the dictionary.
48 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
49 name, scratch1, scratch2);
50
51 // If probing finds an entry check that the value is a normal
52 // property.
53 __ bind(&done); // scratch2 == elements + 4 * index.
54 const int kElementsStartOffset =
55 NameDictionary::kHeaderSize +
56 NameDictionary::kElementsStartIndex * kPointerSize;
57 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
58 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
59 __ And(at, scratch1,
60 Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
61 __ Branch(miss, ne, at, Operand(zero_reg));
62
63 // Get the value at the masked, scaled index and return.
64 __ ld(result,
65 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
66 }
67
68
69 // Helper function used from StoreIC::GenerateNormal.
70 //
71 // elements: Property dictionary. It is not clobbered if a jump to the miss
72 // label is done.
73 // name: Property name. It is not clobbered if a jump to the miss label is
74 // done
75 // value: The value to store.
76 // The two scratch registers need to be different from elements, name and
77 // result.
78 // The generated code assumes that the receiver has slow properties,
79 // is not a global object and does not have interceptors.
80 // The address returned from GenerateStringDictionaryProbes() in scratch2
81 // is used.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)82 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
83 Register elements, Register name,
84 Register value, Register scratch1,
85 Register scratch2) {
86 // Main use of the scratch registers.
87 // scratch1: Used as temporary and to hold the capacity of the property
88 // dictionary.
89 // scratch2: Used as temporary.
90 Label done;
91
92 // Probe the dictionary.
93 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
94 name, scratch1, scratch2);
95
96 // If probing finds an entry in the dictionary check that the value
97 // is a normal property that is not read only.
98 __ bind(&done); // scratch2 == elements + 4 * index.
99 const int kElementsStartOffset =
100 NameDictionary::kHeaderSize +
101 NameDictionary::kElementsStartIndex * kPointerSize;
102 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
103 const int kTypeAndReadOnlyMask =
104 (PropertyDetails::TypeField::kMask |
105 PropertyDetails::AttributesField::encode(READ_ONLY));
106 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
107 __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
108 __ Branch(miss, ne, at, Operand(zero_reg));
109
110 // Store the value at the masked, scaled index and return.
111 const int kValueOffset = kElementsStartOffset + kPointerSize;
112 __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
113 __ sd(value, MemOperand(scratch2));
114
115 // Update the write barrier. Make sure not to clobber the value.
116 __ mov(scratch1, value);
117 __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
118 kDontSaveFPRegs);
119 }
120
GenerateNormal(MacroAssembler * masm)121 void LoadIC::GenerateNormal(MacroAssembler* masm) {
122 Register dictionary = a0;
123 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
124 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
125 Label slow;
126
127 __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
128 JSObject::kPropertiesOffset));
129 GenerateDictionaryLoad(masm, &slow, dictionary,
130 LoadDescriptor::NameRegister(), v0, a3, a4);
131 __ Ret();
132
133 // Dictionary load failed, go slow (but don't miss).
134 __ bind(&slow);
135 GenerateRuntimeGetProperty(masm);
136 }
137
138
139 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()140 static const Register LoadIC_TempRegister() { return a3; }
141
142
LoadIC_PushArgs(MacroAssembler * masm)143 static void LoadIC_PushArgs(MacroAssembler* masm) {
144 Register receiver = LoadDescriptor::ReceiverRegister();
145 Register name = LoadDescriptor::NameRegister();
146 Register slot = LoadDescriptor::SlotRegister();
147 Register vector = LoadWithVectorDescriptor::VectorRegister();
148
149 __ Push(receiver, name, slot, vector);
150 }
151
152
GenerateMiss(MacroAssembler * masm)153 void LoadIC::GenerateMiss(MacroAssembler* masm) {
154 // The return address is on the stack.
155 Isolate* isolate = masm->isolate();
156
157 DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
158 LoadWithVectorDescriptor::VectorRegister()));
159 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, a4, a5);
160
161 LoadIC_PushArgs(masm);
162
163 // Perform tail call to the entry.
164 __ TailCallRuntime(Runtime::kLoadIC_Miss);
165 }
166
GenerateRuntimeGetProperty(MacroAssembler * masm)167 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
168 // The return address is in ra.
169
170 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
171 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
172
173 // Do tail-call to runtime routine.
174 __ TailCallRuntime(Runtime::kGetProperty);
175 }
176
177
GenerateMiss(MacroAssembler * masm)178 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
179 // The return address is in ra.
180 Isolate* isolate = masm->isolate();
181
182 DCHECK(!AreAliased(a4, a5, LoadWithVectorDescriptor::SlotRegister(),
183 LoadWithVectorDescriptor::VectorRegister()));
184 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, a4, a5);
185
186 LoadIC_PushArgs(masm);
187
188 // Perform tail call to the entry.
189 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
190 }
191
GenerateRuntimeGetProperty(MacroAssembler * masm)192 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
193 // The return address is in ra.
194
195 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
196
197 // Do tail-call to runtime routine.
198 __ TailCallRuntime(Runtime::kKeyedGetProperty);
199 }
200
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)201 static void KeyedStoreGenerateMegamorphicHelper(
202 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
203 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
204 Register value, Register key, Register receiver, Register receiver_map,
205 Register elements_map, Register elements) {
206 Label transition_smi_elements;
207 Label finish_object_store, non_double_value, transition_double_elements;
208 Label fast_double_without_map_check;
209
210 // Fast case: Do the store, could be either Object or double.
211 __ bind(fast_object);
212 Register scratch = a4;
213 Register scratch2 = t0;
214 Register address = a5;
215 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
216 scratch, scratch2, address));
217
218 if (check_map == kCheckMap) {
219 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
220 __ Branch(fast_double, ne, elements_map,
221 Operand(masm->isolate()->factory()->fixed_array_map()));
222 }
223
224 // HOLECHECK: guards "A[i] = V"
225 // We have to go to the runtime if the current value is the hole because
226 // there may be a callback on the element.
227 Label holecheck_passed1;
228 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
229 __ SmiScale(at, key, kPointerSizeLog2);
230 __ daddu(address, address, at);
231 __ ld(scratch, MemOperand(address));
232
233 __ Branch(&holecheck_passed1, ne, scratch,
234 Operand(masm->isolate()->factory()->the_hole_value()));
235 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
236
237 __ bind(&holecheck_passed1);
238
239 // Smi stores don't require further checks.
240 Label non_smi_value;
241 __ JumpIfNotSmi(value, &non_smi_value);
242
243 if (increment_length == kIncrementLength) {
244 // Add 1 to receiver->length.
245 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
246 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
247 }
248 // It's irrelevant whether array is smi-only or not when writing a smi.
249 __ Daddu(address, elements,
250 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
251 __ SmiScale(scratch, key, kPointerSizeLog2);
252 __ Daddu(address, address, scratch);
253 __ sd(value, MemOperand(address));
254 __ Ret(USE_DELAY_SLOT);
255 __ Move(v0, value); // Ensure the stub returns correct value.
256
257 __ bind(&non_smi_value);
258 // Escape to elements kind transition case.
259 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
260
261 // Fast elements array, store the value to the elements backing store.
262 __ bind(&finish_object_store);
263 if (increment_length == kIncrementLength) {
264 // Add 1 to receiver->length.
265 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
266 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
267 }
268 __ Daddu(address, elements,
269 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
270 __ SmiScale(scratch, key, kPointerSizeLog2);
271 __ Daddu(address, address, scratch);
272 __ sd(value, MemOperand(address));
273 // Update write barrier for the elements array address.
274 __ mov(scratch, value); // Preserve the value which is returned.
275 __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
276 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
277 __ Ret(USE_DELAY_SLOT);
278 __ Move(v0, value); // Ensure the stub returns correct value.
279
280 __ bind(fast_double);
281 if (check_map == kCheckMap) {
282 // Check for fast double array case. If this fails, call through to the
283 // runtime.
284 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
285 __ Branch(slow, ne, elements_map, Operand(at));
286 }
287
288 // HOLECHECK: guards "A[i] double hole?"
289 // We have to see if the double version of the hole is present. If so
290 // go to the runtime.
291 __ Daddu(address, elements,
292 Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
293 kHeapObjectTag));
294 __ SmiScale(at, key, kPointerSizeLog2);
295 __ daddu(address, address, at);
296 __ lw(scratch, MemOperand(address));
297 __ Branch(&fast_double_without_map_check, ne, scratch,
298 Operand(static_cast<int32_t>(kHoleNanUpper32)));
299 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
300
301 __ bind(&fast_double_without_map_check);
302 __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
303 &transition_double_elements);
304 if (increment_length == kIncrementLength) {
305 // Add 1 to receiver->length.
306 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
307 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
308 }
309 __ Ret(USE_DELAY_SLOT);
310 __ Move(v0, value); // Ensure the stub returns correct value.
311
312 __ bind(&transition_smi_elements);
313 // Transition the array appropriately depending on the value type.
314 __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
315 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
316 __ Branch(&non_double_value, ne, scratch, Operand(at));
317
318 // Value is a double. Transition FAST_SMI_ELEMENTS ->
319 // FAST_DOUBLE_ELEMENTS and complete the store.
320 __ LoadTransitionedArrayMapConditional(
321 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
322 AllocationSiteMode mode =
323 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
324 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
325 receiver_map, mode, slow);
326 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
327 __ jmp(&fast_double_without_map_check);
328
329 __ bind(&non_double_value);
330 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
331 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
332 receiver_map, scratch, slow);
333 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
334 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
335 masm, receiver, key, value, receiver_map, mode, slow);
336 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
337 __ jmp(&finish_object_store);
338
339 __ bind(&transition_double_elements);
340 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
341 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
342 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
343 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
344 receiver_map, scratch, slow);
345 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
346 ElementsTransitionGenerator::GenerateDoubleToObject(
347 masm, receiver, key, value, receiver_map, mode, slow);
348 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
349 __ jmp(&finish_object_store);
350 }
351
352
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)353 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
354 LanguageMode language_mode) {
355 // ---------- S t a t e --------------
356 // -- a0 : value
357 // -- a1 : key
358 // -- a2 : receiver
359 // -- ra : return address
360 // -----------------------------------
361 Label slow, fast_object, fast_object_grow;
362 Label fast_double, fast_double_grow;
363 Label array, extra, check_if_double_array, maybe_name_key, miss;
364
365 // Register usage.
366 Register value = StoreDescriptor::ValueRegister();
367 Register key = StoreDescriptor::NameRegister();
368 Register receiver = StoreDescriptor::ReceiverRegister();
369 DCHECK(value.is(a0));
370 Register receiver_map = a3;
371 Register elements_map = a6;
372 Register elements = a7; // Elements array of the receiver.
373 // a4 and a5 are used as general scratch registers.
374
375 // Check that the key is a smi.
376 __ JumpIfNotSmi(key, &maybe_name_key);
377 // Check that the object isn't a smi.
378 __ JumpIfSmi(receiver, &slow);
379 // Get the map of the object.
380 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
381 // Check that the receiver does not require access checks.
382 // The generic stub does not perform map checks.
383 __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
384 __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
385 __ Branch(&slow, ne, a4, Operand(zero_reg));
386 // Check if the object is a JS array or not.
387 __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
388 __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
389 // Check that the object is some kind of JSObject.
390 __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
391
392 // Object case: Check key against length in the elements array.
393 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
394 // Check array bounds. Both the key and the length of FixedArray are smis.
395 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
396 __ Branch(&fast_object, lo, key, Operand(a4));
397
398 // Slow case, handle jump to runtime.
399 __ bind(&slow);
400 // Entry registers are intact.
401 // a0: value.
402 // a1: key.
403 // a2: receiver.
404 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
405 // Never returns to here.
406
407 __ bind(&maybe_name_key);
408 __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
409 __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
410 __ JumpIfNotUniqueNameInstanceType(a4, &slow);
411
412 // The handlers in the stub cache expect a vector and slot. Since we won't
413 // change the IC from any downstream misses, a dummy vector can be used.
414 Register vector = StoreWithVectorDescriptor::VectorRegister();
415 Register slot = StoreWithVectorDescriptor::SlotRegister();
416
417 DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
418 Handle<TypeFeedbackVector> dummy_vector =
419 TypeFeedbackVector::DummyVector(masm->isolate());
420 int slot_index = dummy_vector->GetIndex(
421 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
422 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
423 __ li(slot, Operand(Smi::FromInt(slot_index)));
424
425 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
426 a6, a7, t0);
427 // Cache miss.
428 __ Branch(&miss);
429
430 // Extra capacity case: Check if there is extra capacity to
431 // perform the store and update the length. Used for adding one
432 // element to the array by writing to array[array.length].
433 __ bind(&extra);
434 // Condition code from comparing key and array length is still available.
435 // Only support writing to array[array.length].
436 __ Branch(&slow, ne, key, Operand(a4));
437 // Check for room in the elements backing store.
438 // Both the key and the length of FixedArray are smis.
439 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
440 __ Branch(&slow, hs, key, Operand(a4));
441 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
442 __ Branch(&check_if_double_array, ne, elements_map,
443 Heap::kFixedArrayMapRootIndex);
444
445 __ jmp(&fast_object_grow);
446
447 __ bind(&check_if_double_array);
448 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
449 __ jmp(&fast_double_grow);
450
451 // Array case: Get the length and the elements array from the JS
452 // array. Check that the array is in fast mode (and writable); if it
453 // is the length is always a smi.
454 __ bind(&array);
455 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
456
457 // Check the key against the length in the array.
458 __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
459 __ Branch(&extra, hs, key, Operand(a4));
460
461 KeyedStoreGenerateMegamorphicHelper(
462 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
463 value, key, receiver, receiver_map, elements_map, elements);
464 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
465 &fast_double_grow, &slow, kDontCheckMap,
466 kIncrementLength, value, key, receiver,
467 receiver_map, elements_map, elements);
468
469 __ bind(&miss);
470 GenerateMiss(masm);
471 }
472
473
StoreIC_PushArgs(MacroAssembler * masm)474 static void StoreIC_PushArgs(MacroAssembler* masm) {
475 __ Push(StoreWithVectorDescriptor::ValueRegister(),
476 StoreWithVectorDescriptor::SlotRegister(),
477 StoreWithVectorDescriptor::VectorRegister(),
478 StoreWithVectorDescriptor::ReceiverRegister(),
479 StoreWithVectorDescriptor::NameRegister());
480 }
481
482
GenerateMiss(MacroAssembler * masm)483 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
484 StoreIC_PushArgs(masm);
485
486 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
487 }
488
GenerateSlow(MacroAssembler * masm)489 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
490 StoreIC_PushArgs(masm);
491
492 // The slow case calls into the runtime to complete the store without causing
493 // an IC miss that would otherwise cause a transition to the generic stub.
494 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
495 }
496
GenerateMiss(MacroAssembler * masm)497 void StoreIC::GenerateMiss(MacroAssembler* masm) {
498 StoreIC_PushArgs(masm);
499
500 // Perform tail call to the entry.
501 __ TailCallRuntime(Runtime::kStoreIC_Miss);
502 }
503
504
GenerateNormal(MacroAssembler * masm)505 void StoreIC::GenerateNormal(MacroAssembler* masm) {
506 Label miss;
507 Register receiver = StoreDescriptor::ReceiverRegister();
508 Register name = StoreDescriptor::NameRegister();
509 Register value = StoreDescriptor::ValueRegister();
510 Register dictionary = a5;
511 DCHECK(!AreAliased(
512 value, receiver, name, StoreWithVectorDescriptor::VectorRegister(),
513 StoreWithVectorDescriptor::SlotRegister(), dictionary, a6, a7));
514
515 __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
516
517 GenerateDictionaryStore(masm, &miss, dictionary, name, value, a6, a7);
518 Counters* counters = masm->isolate()->counters();
519 __ IncrementCounter(counters->ic_store_normal_hit(), 1, a6, a7);
520 __ Ret(USE_DELAY_SLOT);
521 __ Move(v0, value); // Ensure the stub returns correct value.
522
523 __ bind(&miss);
524 __ IncrementCounter(counters->ic_store_normal_miss(), 1, a6, a7);
525 GenerateMiss(masm);
526 }
527
528
529 #undef __
530
531
ComputeCondition(Token::Value op)532 Condition CompareIC::ComputeCondition(Token::Value op) {
533 switch (op) {
534 case Token::EQ_STRICT:
535 case Token::EQ:
536 return eq;
537 case Token::LT:
538 return lt;
539 case Token::GT:
540 return gt;
541 case Token::LTE:
542 return le;
543 case Token::GTE:
544 return ge;
545 default:
546 UNREACHABLE();
547 return kNoCondition;
548 }
549 }
550
551
HasInlinedSmiCode(Address address)552 bool CompareIC::HasInlinedSmiCode(Address address) {
553 // The address of the instruction following the call.
554 Address andi_instruction_address =
555 address + Assembler::kCallTargetAddressOffset;
556
557 // If the instruction following the call is not a andi at, rx, #yyy, nothing
558 // was inlined.
559 Instr instr = Assembler::instr_at(andi_instruction_address);
560 return Assembler::IsAndImmediate(instr) &&
561 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
562 }
563
564
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)565 void PatchInlinedSmiCode(Isolate* isolate, Address address,
566 InlinedSmiCheck check) {
567 Address andi_instruction_address =
568 address + Assembler::kCallTargetAddressOffset;
569
570 // If the instruction following the call is not a andi at, rx, #yyy, nothing
571 // was inlined.
572 Instr instr = Assembler::instr_at(andi_instruction_address);
573 if (!(Assembler::IsAndImmediate(instr) &&
574 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
575 return;
576 }
577
578 // The delta to the start of the map check instruction and the
579 // condition code uses at the patched jump.
580 int delta = Assembler::GetImmediate16(instr);
581 delta += Assembler::GetRs(instr) * kImm16Mask;
582 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
583 // signals that nothing was inlined.
584 if (delta == 0) {
585 return;
586 }
587
588 if (FLAG_trace_ic) {
589 PrintF("[ patching ic at %p, andi=%p, delta=%d\n",
590 static_cast<void*>(address),
591 static_cast<void*>(andi_instruction_address), delta);
592 }
593
594 Address patch_address =
595 andi_instruction_address - delta * Instruction::kInstrSize;
596 Instr instr_at_patch = Assembler::instr_at(patch_address);
597 // This is patching a conditional "jump if not smi/jump if smi" site.
598 // Enabling by changing from
599 // andi at, rx, 0
600 // Branch <target>, eq, at, Operand(zero_reg)
601 // to:
602 // andi at, rx, #kSmiTagMask
603 // Branch <target>, ne, at, Operand(zero_reg)
604 // and vice-versa to be disabled again.
605 CodePatcher patcher(isolate, patch_address, 2);
606 Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
607 if (check == ENABLE_INLINED_SMI_CHECK) {
608 DCHECK(Assembler::IsAndImmediate(instr_at_patch));
609 DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
610 patcher.masm()->andi(at, reg, kSmiTagMask);
611 } else {
612 DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
613 DCHECK(Assembler::IsAndImmediate(instr_at_patch));
614 patcher.masm()->andi(at, reg, 0);
615 }
616 Instr branch_instr =
617 Assembler::instr_at(patch_address + Instruction::kInstrSize);
618 DCHECK(Assembler::IsBranch(branch_instr));
619
620 uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
621 // Currently only the 'eq' and 'ne' cond values are supported and the simple
622 // branch instructions and their r6 variants (with opcode being the branch
623 // type). There are some special cases (see Assembler::IsBranch()) so
624 // extending this would be tricky.
625 DCHECK(opcode == BEQ || // BEQ
626 opcode == BNE || // BNE
627 opcode == POP10 || // BEQC
628 opcode == POP30 || // BNEC
629 opcode == POP66 || // BEQZC
630 opcode == POP76); // BNEZC
631 switch (opcode) {
632 case BEQ:
633 opcode = BNE; // change BEQ to BNE.
634 break;
635 case POP10:
636 opcode = POP30; // change BEQC to BNEC.
637 break;
638 case POP66:
639 opcode = POP76; // change BEQZC to BNEZC.
640 break;
641 case BNE:
642 opcode = BEQ; // change BNE to BEQ.
643 break;
644 case POP30:
645 opcode = POP10; // change BNEC to BEQC.
646 break;
647 case POP76:
648 opcode = POP66; // change BNEZC to BEQZC.
649 break;
650 default:
651 UNIMPLEMENTED();
652 }
653 patcher.ChangeBranchCondition(branch_instr, opcode);
654 }
655 } // namespace internal
656 } // namespace v8
657
658 #endif // V8_TARGET_ARCH_MIPS64
659