1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS
6 
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19 
20 #define __ ACCESS_MASM(masm)
21 
22 // Helper function used from LoadIC GenerateNormal.
23 //
24 // elements: Property dictionary. It is not clobbered if a jump to the miss
25 //           label is done.
26 // name:     Property name. It is not clobbered if a jump to the miss label is
27 //           done
28 // result:   Register for the result. It is only updated if a jump to the miss
29 //           label is not done. Can be the same as elements or name clobbering
30 //           one of these in the case of not jumping to the miss label.
31 // The two scratch registers need to be different from elements, name and
32 // result.
33 // The generated code assumes that the receiver has slow properties,
34 // is not a global object and does not have interceptors.
35 // The address returned from GenerateStringDictionaryProbes() in scratch2
36 // is used.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)37 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
38                                    Register elements, Register name,
39                                    Register result, Register scratch1,
40                                    Register scratch2) {
41   // Main use of the scratch registers.
42   // scratch1: Used as temporary and to hold the capacity of the property
43   //           dictionary.
44   // scratch2: Used as temporary.
45   Label done;
46 
47   // Probe the dictionary.
48   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
49                                                    name, scratch1, scratch2);
50 
51   // If probing finds an entry check that the value is a normal
52   // property.
53   __ bind(&done);  // scratch2 == elements + 4 * index.
54   const int kElementsStartOffset =
55       NameDictionary::kHeaderSize +
56       NameDictionary::kElementsStartIndex * kPointerSize;
57   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
58   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
59   __ And(at, scratch1,
60          Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
61   __ Branch(miss, ne, at, Operand(zero_reg));
62 
63   // Get the value at the masked, scaled index and return.
64   __ lw(result,
65         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
66 }
67 
68 
69 // Helper function used from StoreIC::GenerateNormal.
70 //
71 // elements: Property dictionary. It is not clobbered if a jump to the miss
72 //           label is done.
73 // name:     Property name. It is not clobbered if a jump to the miss label is
74 //           done
75 // value:    The value to store.
76 // The two scratch registers need to be different from elements, name and
77 // result.
78 // The generated code assumes that the receiver has slow properties,
79 // is not a global object and does not have interceptors.
80 // The address returned from GenerateStringDictionaryProbes() in scratch2
81 // is used.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)82 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
83                                     Register elements, Register name,
84                                     Register value, Register scratch1,
85                                     Register scratch2) {
86   // Main use of the scratch registers.
87   // scratch1: Used as temporary and to hold the capacity of the property
88   //           dictionary.
89   // scratch2: Used as temporary.
90   Label done;
91 
92   // Probe the dictionary.
93   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
94                                                    name, scratch1, scratch2);
95 
96   // If probing finds an entry in the dictionary check that the value
97   // is a normal property that is not read only.
98   __ bind(&done);  // scratch2 == elements + 4 * index.
99   const int kElementsStartOffset =
100       NameDictionary::kHeaderSize +
101       NameDictionary::kElementsStartIndex * kPointerSize;
102   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
103   const int kTypeAndReadOnlyMask =
104       (PropertyDetails::TypeField::kMask |
105        PropertyDetails::AttributesField::encode(READ_ONLY))
106       << kSmiTagSize;
107   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
108   __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
109   __ Branch(miss, ne, at, Operand(zero_reg));
110 
111   // Store the value at the masked, scaled index and return.
112   const int kValueOffset = kElementsStartOffset + kPointerSize;
113   __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
114   __ sw(value, MemOperand(scratch2));
115 
116   // Update the write barrier. Make sure not to clobber the value.
117   __ mov(scratch1, value);
118   __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
119                  kDontSaveFPRegs);
120 }
121 
GenerateNormal(MacroAssembler * masm)122 void LoadIC::GenerateNormal(MacroAssembler* masm) {
123   Register dictionary = a0;
124   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
125   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
126 
127   Label slow;
128 
129   __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
130                                     JSObject::kPropertiesOffset));
131   GenerateDictionaryLoad(masm, &slow, dictionary,
132                          LoadDescriptor::NameRegister(), v0, a3, t0);
133   __ Ret();
134 
135   // Dictionary load failed, go slow (but don't miss).
136   __ bind(&slow);
137   GenerateRuntimeGetProperty(masm);
138 }
139 
140 
141 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()142 static const Register LoadIC_TempRegister() { return a3; }
143 
144 
LoadIC_PushArgs(MacroAssembler * masm)145 static void LoadIC_PushArgs(MacroAssembler* masm) {
146   Register receiver = LoadDescriptor::ReceiverRegister();
147   Register name = LoadDescriptor::NameRegister();
148   Register slot = LoadDescriptor::SlotRegister();
149   Register vector = LoadWithVectorDescriptor::VectorRegister();
150 
151   __ Push(receiver, name, slot, vector);
152 }
153 
154 
GenerateMiss(MacroAssembler * masm)155 void LoadIC::GenerateMiss(MacroAssembler* masm) {
156   // The return address is in ra.
157   Isolate* isolate = masm->isolate();
158 
159   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
160                      LoadWithVectorDescriptor::VectorRegister()));
161   __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, t0, t1);
162 
163   LoadIC_PushArgs(masm);
164 
165   // Perform tail call to the entry.
166   __ TailCallRuntime(Runtime::kLoadIC_Miss);
167 }
168 
GenerateRuntimeGetProperty(MacroAssembler * masm)169 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
170   // The return address is in ra.
171 
172   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
173   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
174 
175   // Do tail-call to runtime routine.
176   __ TailCallRuntime(Runtime::kGetProperty);
177 }
178 
179 
GenerateMiss(MacroAssembler * masm)180 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
181   // The return address is in ra.
182   Isolate* isolate = masm->isolate();
183 
184   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
185                      LoadWithVectorDescriptor::VectorRegister()));
186   __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, t0, t1);
187 
188   LoadIC_PushArgs(masm);
189 
190   // Perform tail call to the entry.
191   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
192 }
193 
GenerateRuntimeGetProperty(MacroAssembler * masm)194 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
195   // The return address is in ra.
196 
197   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
198 
199   // Do tail-call to runtime routine.
200   __ TailCallRuntime(Runtime::kKeyedGetProperty);
201 }
202 
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)203 static void KeyedStoreGenerateMegamorphicHelper(
204     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
205     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
206     Register value, Register key, Register receiver, Register receiver_map,
207     Register elements_map, Register elements) {
208   Label transition_smi_elements;
209   Label finish_object_store, non_double_value, transition_double_elements;
210   Label fast_double_without_map_check;
211 
212   // Fast case: Do the store, could be either Object or double.
213   __ bind(fast_object);
214   Register scratch = t0;
215   Register scratch2 = t4;
216   Register scratch3 = t5;
217   Register address = t1;
218   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
219                      scratch, scratch2, scratch3, address));
220 
221   if (check_map == kCheckMap) {
222     __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
223     __ Branch(fast_double, ne, elements_map,
224               Operand(masm->isolate()->factory()->fixed_array_map()));
225   }
226 
227   // HOLECHECK: guards "A[i] = V"
228   // We have to go to the runtime if the current value is the hole because
229   // there may be a callback on the element.
230   Label holecheck_passed1;
231   __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
232   __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
233   __ lw(scratch, MemOperand(address));
234   __ Branch(&holecheck_passed1, ne, scratch,
235             Operand(masm->isolate()->factory()->the_hole_value()));
236   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
237 
238   __ bind(&holecheck_passed1);
239 
240   // Smi stores don't require further checks.
241   Label non_smi_value;
242   __ JumpIfNotSmi(value, &non_smi_value);
243 
244   if (increment_length == kIncrementLength) {
245     // Add 1 to receiver->length.
246     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
247     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
248   }
249   // It's irrelevant whether array is smi-only or not when writing a smi.
250   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
251   __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
252   __ sw(value, MemOperand(address));
253   __ Ret(USE_DELAY_SLOT);
254   __ Move(v0, value);  // Ensure the stub returns correct value.
255 
256   __ bind(&non_smi_value);
257   // Escape to elements kind transition case.
258   __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
259 
260   // Fast elements array, store the value to the elements backing store.
261   __ bind(&finish_object_store);
262   if (increment_length == kIncrementLength) {
263     // Add 1 to receiver->length.
264     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
265     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
266   }
267   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
268   __ Lsa(address, address, key, kPointerSizeLog2 - kSmiTagSize);
269   __ sw(value, MemOperand(address));
270   // Update write barrier for the elements array address.
271   __ mov(scratch, value);  // Preserve the value which is returned.
272   __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
273                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
274   __ Ret(USE_DELAY_SLOT);
275   __ Move(v0, value);  // Ensure the stub returns correct value.
276 
277   __ bind(fast_double);
278   if (check_map == kCheckMap) {
279     // Check for fast double array case. If this fails, call through to the
280     // runtime.
281     __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
282     __ Branch(slow, ne, elements_map, Operand(at));
283   }
284 
285   // HOLECHECK: guards "A[i] double hole?"
286   // We have to see if the double version of the hole is present. If so
287   // go to the runtime.
288   __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
289                                      kHoleNanUpper32Offset - kHeapObjectTag));
290   __ Lsa(address, address, key, kPointerSizeLog2);
291   __ lw(scratch, MemOperand(address));
292   __ Branch(&fast_double_without_map_check, ne, scratch,
293             Operand(kHoleNanUpper32));
294   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
295 
296   __ bind(&fast_double_without_map_check);
297   __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
298                                  scratch3, &transition_double_elements);
299   if (increment_length == kIncrementLength) {
300     // Add 1 to receiver->length.
301     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
302     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
303   }
304   __ Ret(USE_DELAY_SLOT);
305   __ Move(v0, value);  // Ensure the stub returns correct value.
306 
307   __ bind(&transition_smi_elements);
308   // Transition the array appropriately depending on the value type.
309   __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
310   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
311   __ Branch(&non_double_value, ne, scratch, Operand(at));
312 
313   // Value is a double. Transition FAST_SMI_ELEMENTS ->
314   // FAST_DOUBLE_ELEMENTS and complete the store.
315   __ LoadTransitionedArrayMapConditional(
316       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
317   AllocationSiteMode mode =
318       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
319   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
320                                                    receiver_map, mode, slow);
321   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
322   __ jmp(&fast_double_without_map_check);
323 
324   __ bind(&non_double_value);
325   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
326   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
327                                          receiver_map, scratch, slow);
328   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
329   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
330       masm, receiver, key, value, receiver_map, mode, slow);
331   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
332   __ jmp(&finish_object_store);
333 
334   __ bind(&transition_double_elements);
335   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
336   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
337   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
338   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
339                                          receiver_map, scratch, slow);
340   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
341   ElementsTransitionGenerator::GenerateDoubleToObject(
342       masm, receiver, key, value, receiver_map, mode, slow);
343   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
344   __ jmp(&finish_object_store);
345 }
346 
347 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)348 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
349                                        LanguageMode language_mode) {
350   // ---------- S t a t e --------------
351   //  -- a0     : value
352   //  -- a1     : key
353   //  -- a2     : receiver
354   //  -- ra     : return address
355   // -----------------------------------
356   Label slow, fast_object, fast_object_grow;
357   Label fast_double, fast_double_grow;
358   Label array, extra, check_if_double_array, maybe_name_key, miss;
359 
360   // Register usage.
361   Register value = StoreDescriptor::ValueRegister();
362   Register key = StoreDescriptor::NameRegister();
363   Register receiver = StoreDescriptor::ReceiverRegister();
364   DCHECK(value.is(a0));
365   Register receiver_map = a3;
366   Register elements_map = t2;
367   Register elements = t3;  // Elements array of the receiver.
368   // t0 and t1 are used as general scratch registers.
369 
370   // Check that the key is a smi.
371   __ JumpIfNotSmi(key, &maybe_name_key);
372   // Check that the object isn't a smi.
373   __ JumpIfSmi(receiver, &slow);
374   // Get the map of the object.
375   __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
376   // Check that the receiver does not require access checks.
377   // The generic stub does not perform map checks.
378   __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
379   __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded));
380   __ Branch(&slow, ne, t0, Operand(zero_reg));
381   // Check if the object is a JS array or not.
382   __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
383   __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
384   // Check that the object is some kind of JS object EXCEPT JS Value type. In
385   // the case that the object is a value-wrapper object, we enter the runtime
386   // system to make sure that indexing into string objects works as intended.
387   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
388   __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
389 
390   // Object case: Check key against length in the elements array.
391   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
392   // Check array bounds. Both the key and the length of FixedArray are smis.
393   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
394   __ Branch(&fast_object, lo, key, Operand(t0));
395 
396   // Slow case, handle jump to runtime.
397   __ bind(&slow);
398   // Entry registers are intact.
399   // a0: value.
400   // a1: key.
401   // a2: receiver.
402   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
403   // Never returns to here.
404 
405   __ bind(&maybe_name_key);
406   __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
407   __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
408   __ JumpIfNotUniqueNameInstanceType(t0, &slow);
409 
410   // The handlers in the stub cache expect a vector and slot. Since we won't
411   // change the IC from any downstream misses, a dummy vector can be used.
412   Register vector = StoreWithVectorDescriptor::VectorRegister();
413   Register slot = StoreWithVectorDescriptor::SlotRegister();
414   DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
415   Handle<TypeFeedbackVector> dummy_vector =
416       TypeFeedbackVector::DummyVector(masm->isolate());
417   int slot_index = dummy_vector->GetIndex(
418       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
419   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
420   __ li(slot, Operand(Smi::FromInt(slot_index)));
421 
422   masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, t1,
423                                                      t2, t4, t5);
424   // Cache miss.
425   __ Branch(&miss);
426 
427   // Extra capacity case: Check if there is extra capacity to
428   // perform the store and update the length. Used for adding one
429   // element to the array by writing to array[array.length].
430   __ bind(&extra);
431   // Condition code from comparing key and array length is still available.
432   // Only support writing to array[array.length].
433   __ Branch(&slow, ne, key, Operand(t0));
434   // Check for room in the elements backing store.
435   // Both the key and the length of FixedArray are smis.
436   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
437   __ Branch(&slow, hs, key, Operand(t0));
438   __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
439   __ Branch(&check_if_double_array, ne, elements_map,
440             Heap::kFixedArrayMapRootIndex);
441 
442   __ jmp(&fast_object_grow);
443 
444   __ bind(&check_if_double_array);
445   __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
446   __ jmp(&fast_double_grow);
447 
448   // Array case: Get the length and the elements array from the JS
449   // array. Check that the array is in fast mode (and writable); if it
450   // is the length is always a smi.
451   __ bind(&array);
452   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
453 
454   // Check the key against the length in the array.
455   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
456   __ Branch(&extra, hs, key, Operand(t0));
457 
458   KeyedStoreGenerateMegamorphicHelper(
459       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
460       value, key, receiver, receiver_map, elements_map, elements);
461   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
462                                       &fast_double_grow, &slow, kDontCheckMap,
463                                       kIncrementLength, value, key, receiver,
464                                       receiver_map, elements_map, elements);
465 
466   __ bind(&miss);
467   GenerateMiss(masm);
468 }
469 
470 
StoreIC_PushArgs(MacroAssembler * masm)471 static void StoreIC_PushArgs(MacroAssembler* masm) {
472   __ Push(StoreWithVectorDescriptor::ValueRegister(),
473           StoreWithVectorDescriptor::SlotRegister(),
474           StoreWithVectorDescriptor::VectorRegister(),
475           StoreWithVectorDescriptor::ReceiverRegister(),
476           StoreWithVectorDescriptor::NameRegister());
477 }
478 
479 
GenerateMiss(MacroAssembler * masm)480 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
481   StoreIC_PushArgs(masm);
482 
483   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
484 }
485 
GenerateSlow(MacroAssembler * masm)486 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
487   StoreIC_PushArgs(masm);
488 
489   // The slow case calls into the runtime to complete the store without causing
490   // an IC miss that would otherwise cause a transition to the generic stub.
491   __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
492 }
493 
GenerateMiss(MacroAssembler * masm)494 void StoreIC::GenerateMiss(MacroAssembler* masm) {
495   StoreIC_PushArgs(masm);
496 
497   // Perform tail call to the entry.
498   __ TailCallRuntime(Runtime::kStoreIC_Miss);
499 }
500 
501 
GenerateNormal(MacroAssembler * masm)502 void StoreIC::GenerateNormal(MacroAssembler* masm) {
503   Label miss;
504   Register receiver = StoreDescriptor::ReceiverRegister();
505   Register name = StoreDescriptor::NameRegister();
506   Register value = StoreDescriptor::ValueRegister();
507   Register dictionary = t1;
508   DCHECK(receiver.is(a1));
509   DCHECK(name.is(a2));
510   DCHECK(value.is(a0));
511   DCHECK(StoreWithVectorDescriptor::VectorRegister().is(a3));
512   DCHECK(StoreWithVectorDescriptor::SlotRegister().is(t0));
513 
514   __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
515 
516   GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
517   Counters* counters = masm->isolate()->counters();
518   __ IncrementCounter(counters->ic_store_normal_hit(), 1, t2, t5);
519   __ Ret(USE_DELAY_SLOT);
520   __ Move(v0, value);  // Ensure the stub returns correct value.
521 
522   __ bind(&miss);
523   __ IncrementCounter(counters->ic_store_normal_miss(), 1, t2, t5);
524   GenerateMiss(masm);
525 }
526 
527 
528 #undef __
529 
530 
ComputeCondition(Token::Value op)531 Condition CompareIC::ComputeCondition(Token::Value op) {
532   switch (op) {
533     case Token::EQ_STRICT:
534     case Token::EQ:
535       return eq;
536     case Token::LT:
537       return lt;
538     case Token::GT:
539       return gt;
540     case Token::LTE:
541       return le;
542     case Token::GTE:
543       return ge;
544     default:
545       UNREACHABLE();
546       return kNoCondition;
547   }
548 }
549 
550 
HasInlinedSmiCode(Address address)551 bool CompareIC::HasInlinedSmiCode(Address address) {
552   // The address of the instruction following the call.
553   Address andi_instruction_address =
554       address + Assembler::kCallTargetAddressOffset;
555 
556   // If the instruction following the call is not a andi at, rx, #yyy, nothing
557   // was inlined.
558   Instr instr = Assembler::instr_at(andi_instruction_address);
559   return Assembler::IsAndImmediate(instr) &&
560          Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
561 }
562 
563 
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)564 void PatchInlinedSmiCode(Isolate* isolate, Address address,
565                          InlinedSmiCheck check) {
566   Address andi_instruction_address =
567       address + Assembler::kCallTargetAddressOffset;
568 
569   // If the instruction following the call is not a andi at, rx, #yyy, nothing
570   // was inlined.
571   Instr instr = Assembler::instr_at(andi_instruction_address);
572   if (!(Assembler::IsAndImmediate(instr) &&
573         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
574     return;
575   }
576 
577   // The delta to the start of the map check instruction and the
578   // condition code uses at the patched jump.
579   int delta = Assembler::GetImmediate16(instr);
580   delta += Assembler::GetRs(instr) * kImm16Mask;
581   // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
582   // signals that nothing was inlined.
583   if (delta == 0) {
584     return;
585   }
586 
587   if (FLAG_trace_ic) {
588     PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
589            static_cast<void*>(address),
590            static_cast<void*>(andi_instruction_address), delta);
591   }
592 
593   Address patch_address =
594       andi_instruction_address - delta * Instruction::kInstrSize;
595   Instr instr_at_patch = Assembler::instr_at(patch_address);
596   // This is patching a conditional "jump if not smi/jump if smi" site.
597   // Enabling by changing from
598   //   andi at, rx, 0
599   //   Branch <target>, eq, at, Operand(zero_reg)
600   // to:
601   //   andi at, rx, #kSmiTagMask
602   //   Branch <target>, ne, at, Operand(zero_reg)
603   // and vice-versa to be disabled again.
604   CodePatcher patcher(isolate, patch_address, 2);
605   Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
606   if (check == ENABLE_INLINED_SMI_CHECK) {
607     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
608     DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
609     patcher.masm()->andi(at, reg, kSmiTagMask);
610   } else {
611     DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
612     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
613     patcher.masm()->andi(at, reg, 0);
614   }
615   Instr branch_instr =
616       Assembler::instr_at(patch_address + Instruction::kInstrSize);
617   DCHECK(Assembler::IsBranch(branch_instr));
618 
619   uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
620   // Currently only the 'eq' and 'ne' cond values are supported and the simple
621   // branch instructions and their r6 variants (with opcode being the branch
622   // type). There are some special cases (see Assembler::IsBranch()) so
623   // extending this would be tricky.
624   DCHECK(opcode == BEQ ||    // BEQ
625          opcode == BNE ||    // BNE
626          opcode == POP10 ||  // BEQC
627          opcode == POP30 ||  // BNEC
628          opcode == POP66 ||  // BEQZC
629          opcode == POP76);   // BNEZC
630   switch (opcode) {
631     case BEQ:
632       opcode = BNE;  // change BEQ to BNE.
633       break;
634     case POP10:
635       opcode = POP30;  // change BEQC to BNEC.
636       break;
637     case POP66:
638       opcode = POP76;  // change BEQZC to BNEZC.
639       break;
640     case BNE:
641       opcode = BEQ;  // change BNE to BEQ.
642       break;
643     case POP30:
644       opcode = POP10;  // change BNEC to BEQC.
645       break;
646     case POP76:
647       opcode = POP66;  // change BNEZC to BEQZC.
648       break;
649     default:
650       UNIMPLEMENTED();
651   }
652   patcher.ChangeBranchCondition(branch_instr, opcode);
653 }
654 }  // namespace internal
655 }  // namespace v8
656 
657 #endif  // V8_TARGET_ARCH_MIPS
658