• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #if V8_TARGET_ARCH_MIPS
6 
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11 
12 namespace v8 {
13 namespace internal {
14 
15 
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19 
20 #define __ ACCESS_MASM(masm)
21 
22 
GenerateGlobalInstanceTypeCheck(MacroAssembler * masm,Register type,Label * global_object)23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24                                             Label* global_object) {
25   // Register usage:
26   //   type: holds the receiver instance type on entry.
27   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
28   __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
29 }
30 
31 
32 // Helper function used from LoadIC GenerateNormal.
33 //
34 // elements: Property dictionary. It is not clobbered if a jump to the miss
35 //           label is done.
36 // name:     Property name. It is not clobbered if a jump to the miss label is
37 //           done
38 // result:   Register for the result. It is only updated if a jump to the miss
39 //           label is not done. Can be the same as elements or name clobbering
40 //           one of these in the case of not jumping to the miss label.
41 // The two scratch registers need to be different from elements, name and
42 // result.
43 // The generated code assumes that the receiver has slow properties,
44 // is not a global object and does not have interceptors.
45 // The address returned from GenerateStringDictionaryProbes() in scratch2
46 // is used.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)47 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48                                    Register elements, Register name,
49                                    Register result, Register scratch1,
50                                    Register scratch2) {
51   // Main use of the scratch registers.
52   // scratch1: Used as temporary and to hold the capacity of the property
53   //           dictionary.
54   // scratch2: Used as temporary.
55   Label done;
56 
57   // Probe the dictionary.
58   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59                                                    name, scratch1, scratch2);
60 
61   // If probing finds an entry check that the value is a normal
62   // property.
63   __ bind(&done);  // scratch2 == elements + 4 * index.
64   const int kElementsStartOffset =
65       NameDictionary::kHeaderSize +
66       NameDictionary::kElementsStartIndex * kPointerSize;
67   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69   __ And(at, scratch1,
70          Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
71   __ Branch(miss, ne, at, Operand(zero_reg));
72 
73   // Get the value at the masked, scaled index and return.
74   __ lw(result,
75         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
76 }
77 
78 
79 // Helper function used from StoreIC::GenerateNormal.
80 //
81 // elements: Property dictionary. It is not clobbered if a jump to the miss
82 //           label is done.
83 // name:     Property name. It is not clobbered if a jump to the miss label is
84 //           done
85 // value:    The value to store.
86 // The two scratch registers need to be different from elements, name and
87 // result.
88 // The generated code assumes that the receiver has slow properties,
89 // is not a global object and does not have interceptors.
90 // The address returned from GenerateStringDictionaryProbes() in scratch2
91 // is used.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)92 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93                                     Register elements, Register name,
94                                     Register value, Register scratch1,
95                                     Register scratch2) {
96   // Main use of the scratch registers.
97   // scratch1: Used as temporary and to hold the capacity of the property
98   //           dictionary.
99   // scratch2: Used as temporary.
100   Label done;
101 
102   // Probe the dictionary.
103   NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104                                                    name, scratch1, scratch2);
105 
106   // If probing finds an entry in the dictionary check that the value
107   // is a normal property that is not read only.
108   __ bind(&done);  // scratch2 == elements + 4 * index.
109   const int kElementsStartOffset =
110       NameDictionary::kHeaderSize +
111       NameDictionary::kElementsStartIndex * kPointerSize;
112   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113   const int kTypeAndReadOnlyMask =
114       (PropertyDetails::TypeField::kMask |
115        PropertyDetails::AttributesField::encode(READ_ONLY))
116       << kSmiTagSize;
117   __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
118   __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
119   __ Branch(miss, ne, at, Operand(zero_reg));
120 
121   // Store the value at the masked, scaled index and return.
122   const int kValueOffset = kElementsStartOffset + kPointerSize;
123   __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
124   __ sw(value, MemOperand(scratch2));
125 
126   // Update the write barrier. Make sure not to clobber the value.
127   __ mov(scratch1, value);
128   __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
129                  kDontSaveFPRegs);
130 }
131 
132 
133 // Checks the receiver for special cases (value type, slow case bits).
134 // Falls through for regular JS object.
GenerateKeyedLoadReceiverCheck(MacroAssembler * masm,Register receiver,Register map,Register scratch,int interceptor_bit,Label * slow)135 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
136                                            Register receiver, Register map,
137                                            Register scratch,
138                                            int interceptor_bit, Label* slow) {
139   // Check that the object isn't a smi.
140   __ JumpIfSmi(receiver, slow);
141   // Get the map of the receiver.
142   __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
143   // Check bit field.
144   __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
145   __ And(at, scratch,
146          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
147   __ Branch(slow, ne, at, Operand(zero_reg));
148   // Check that the object is some kind of JS object EXCEPT JS Value type.
149   // In the case that the object is a value-wrapper object,
150   // we enter the runtime system to make sure that indexing into string
151   // objects work as intended.
152   DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
153   __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
154   __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
155 }
156 
157 
158 // Loads an indexed element from a fast case array.
GenerateFastArrayLoad(MacroAssembler * masm,Register receiver,Register key,Register elements,Register scratch1,Register scratch2,Register result,Label * slow,LanguageMode language_mode)159 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
160                                   Register key, Register elements,
161                                   Register scratch1, Register scratch2,
162                                   Register result, Label* slow,
163                                   LanguageMode language_mode) {
164   // Register use:
165   //
166   // receiver - holds the receiver on entry.
167   //            Unchanged unless 'result' is the same register.
168   //
169   // key      - holds the smi key on entry.
170   //            Unchanged unless 'result' is the same register.
171   //
172   // result   - holds the result on exit if the load succeeded.
173   //            Allowed to be the the same as 'receiver' or 'key'.
174   //            Unchanged on bailout so 'receiver' and 'key' can be safely
175   //            used by further computation.
176   //
177   // Scratch registers:
178   //
179   // elements - holds the elements of the receiver and its prototypes.
180   //
181   // scratch1 - used to hold elements length, bit fields, base addresses.
182   //
183   // scratch2 - used to hold maps, prototypes, and the loaded value.
184   Label check_prototypes, check_next_prototype;
185   Label done, in_bounds, absent;
186 
187   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
188   __ AssertFastElements(elements);
189 
190   // Check that the key (index) is within bounds.
191   __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
192   __ Branch(&in_bounds, lo, key, Operand(scratch1));
193   // Out-of-bounds. Check the prototype chain to see if we can just return
194   // 'undefined'.
195   // Negative keys can't take the fast OOB path.
196   __ Branch(slow, lt, key, Operand(zero_reg));
197   __ bind(&check_prototypes);
198   __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
199   __ bind(&check_next_prototype);
200   __ lw(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
201   // scratch2: current prototype
202   __ LoadRoot(at, Heap::kNullValueRootIndex);
203   __ Branch(&absent, eq, scratch2, Operand(at));
204   __ lw(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
205   __ lw(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
206   // elements: elements of current prototype
207   // scratch2: map of current prototype
208   __ lbu(scratch1, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
209   __ Branch(slow, lo, scratch1, Operand(JS_OBJECT_TYPE));
210   __ lbu(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
211   __ And(at, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
212                                (1 << Map::kHasIndexedInterceptor)));
213   __ Branch(slow, ne, at, Operand(zero_reg));
214   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
215   __ Branch(slow, ne, elements, Operand(at));
216   __ Branch(&check_next_prototype);
217 
218   __ bind(&absent);
219   if (is_strong(language_mode)) {
220     // Strong mode accesses must throw in this case, so call the runtime.
221     __ Branch(slow);
222   } else {
223     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
224     __ Branch(&done);
225   }
226 
227   __ bind(&in_bounds);
228   // Fast case: Do the load.
229   __ Addu(scratch1, elements,
230           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
231   // The key is a smi.
232   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
233   __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
234   __ addu(at, at, scratch1);
235   __ lw(scratch2, MemOperand(at));
236 
237   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
238   // In case the loaded value is the_hole we have to check the prototype chain.
239   __ Branch(&check_prototypes, eq, scratch2, Operand(at));
240   __ Move(result, scratch2);
241   __ bind(&done);
242 }
243 
244 
245 // Checks whether a key is an array index string or a unique name.
246 // Falls through if a key is a unique name.
GenerateKeyNameCheck(MacroAssembler * masm,Register key,Register map,Register hash,Label * index_string,Label * not_unique)247 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
248                                  Register map, Register hash,
249                                  Label* index_string, Label* not_unique) {
250   // The key is not a smi.
251   Label unique;
252   // Is it a name?
253   __ GetObjectType(key, map, hash);
254   __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
255   STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
256   __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
257 
258   // Is the string an array index, with cached numeric value?
259   __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
260   __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
261   __ Branch(index_string, eq, at, Operand(zero_reg));
262 
263   // Is the string internalized? We know it's a string, so a single
264   // bit test is enough.
265   // map: key map
266   __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
267   STATIC_ASSERT(kInternalizedTag == 0);
268   __ And(at, hash, Operand(kIsNotInternalizedMask));
269   __ Branch(not_unique, ne, at, Operand(zero_reg));
270 
271   __ bind(&unique);
272 }
273 
274 
GenerateNormal(MacroAssembler * masm,LanguageMode language_mode)275 void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
276   Register dictionary = a0;
277   DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
278   DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
279 
280   Label slow;
281 
282   __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
283                                     JSObject::kPropertiesOffset));
284   GenerateDictionaryLoad(masm, &slow, dictionary,
285                          LoadDescriptor::NameRegister(), v0, a3, t0);
286   __ Ret();
287 
288   // Dictionary load failed, go slow (but don't miss).
289   __ bind(&slow);
290   GenerateRuntimeGetProperty(masm, language_mode);
291 }
292 
293 
294 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()295 static const Register LoadIC_TempRegister() { return a3; }
296 
297 
LoadIC_PushArgs(MacroAssembler * masm)298 static void LoadIC_PushArgs(MacroAssembler* masm) {
299   Register receiver = LoadDescriptor::ReceiverRegister();
300   Register name = LoadDescriptor::NameRegister();
301   Register slot = LoadDescriptor::SlotRegister();
302   Register vector = LoadWithVectorDescriptor::VectorRegister();
303 
304   __ Push(receiver, name, slot, vector);
305 }
306 
307 
GenerateMiss(MacroAssembler * masm)308 void LoadIC::GenerateMiss(MacroAssembler* masm) {
309   // The return address is in ra.
310   Isolate* isolate = masm->isolate();
311 
312   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
313                      LoadWithVectorDescriptor::VectorRegister()));
314   __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
315 
316   LoadIC_PushArgs(masm);
317 
318   // Perform tail call to the entry.
319   __ TailCallRuntime(Runtime::kLoadIC_Miss);
320 }
321 
322 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)323 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
324                                         LanguageMode language_mode) {
325   // The return address is in ra.
326 
327   __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
328   __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
329 
330   // Do tail-call to runtime routine.
331   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
332                                               : Runtime::kGetProperty);
333 }
334 
335 
GenerateMiss(MacroAssembler * masm)336 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
337   // The return address is in ra.
338   Isolate* isolate = masm->isolate();
339 
340   DCHECK(!AreAliased(t0, t1, LoadWithVectorDescriptor::SlotRegister(),
341                      LoadWithVectorDescriptor::VectorRegister()));
342   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
343 
344   LoadIC_PushArgs(masm);
345 
346   // Perform tail call to the entry.
347   __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
348 }
349 
350 
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)351 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
352                                              LanguageMode language_mode) {
353   // The return address is in ra.
354 
355   __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
356 
357   // Do tail-call to runtime routine.
358   __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
359                                               : Runtime::kKeyedGetProperty);
360 }
361 
362 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)363 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
364                                       LanguageMode language_mode) {
365   // The return address is in ra.
366   Label slow, check_name, index_smi, index_name, property_array_property;
367   Label probe_dictionary, check_number_dictionary;
368 
369   Register key = LoadDescriptor::NameRegister();
370   Register receiver = LoadDescriptor::ReceiverRegister();
371   DCHECK(key.is(a2));
372   DCHECK(receiver.is(a1));
373 
374   Isolate* isolate = masm->isolate();
375 
376   // Check that the key is a smi.
377   __ JumpIfNotSmi(key, &check_name);
378   __ bind(&index_smi);
379   // Now the key is known to be a smi. This place is also jumped to from below
380   // where a numeric string is converted to a smi.
381 
382   GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
383                                  Map::kHasIndexedInterceptor, &slow);
384 
385   // Check the receiver's map to see if it has fast elements.
386   __ CheckFastElements(a0, a3, &check_number_dictionary);
387 
388   GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, &slow,
389                         language_mode);
390   __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
391   __ Ret();
392 
393   __ bind(&check_number_dictionary);
394   __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
395   __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
396 
397   // Check whether the elements is a number dictionary.
398   // a3: elements map
399   // t0: elements
400   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
401   __ Branch(&slow, ne, a3, Operand(at));
402   __ sra(a0, key, kSmiTagSize);
403   __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
404   __ Ret();
405 
406   // Slow case, key and receiver still in a2 and a1.
407   __ bind(&slow);
408   __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
409                       a3);
410   GenerateRuntimeGetProperty(masm, language_mode);
411 
412   __ bind(&check_name);
413   GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
414 
415   GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
416                                  Map::kHasNamedInterceptor, &slow);
417 
418 
419   // If the receiver is a fast-case object, check the stub cache. Otherwise
420   // probe the dictionary.
421   __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
422   __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
423   __ LoadRoot(at, Heap::kHashTableMapRootIndex);
424   __ Branch(&probe_dictionary, eq, t0, Operand(at));
425 
426   // The handlers in the stub cache expect a vector and slot. Since we won't
427   // change the IC from any downstream misses, a dummy vector can be used.
428   Register vector = LoadWithVectorDescriptor::VectorRegister();
429   Register slot = LoadWithVectorDescriptor::SlotRegister();
430   DCHECK(!AreAliased(vector, slot, t0, t1, t2, t5));
431   Handle<TypeFeedbackVector> dummy_vector =
432       TypeFeedbackVector::DummyVector(masm->isolate());
433   int slot_index = dummy_vector->GetIndex(
434       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
435   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
436   __ li(slot, Operand(Smi::FromInt(slot_index)));
437 
438   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
439       Code::ComputeHandlerFlags(Code::LOAD_IC));
440   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, flags,
441                                                receiver, key, t0, t1, t2, t5);
442   // Cache miss.
443   GenerateMiss(masm);
444 
445   // Do a quick inline probe of the receiver's dictionary, if it
446   // exists.
447   __ bind(&probe_dictionary);
448   // a3: elements
449   __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
450   __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
451   GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
452   // Load the property to v0.
453   GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
454   __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
455                       a3);
456   __ Ret();
457 
458   __ bind(&index_name);
459   __ IndexFromHash(a3, key);
460   // Now jump to the place where smi keys are handled.
461   __ Branch(&index_smi);
462 }
463 
464 
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)465 static void KeyedStoreGenerateMegamorphicHelper(
466     MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
467     KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
468     Register value, Register key, Register receiver, Register receiver_map,
469     Register elements_map, Register elements) {
470   Label transition_smi_elements;
471   Label finish_object_store, non_double_value, transition_double_elements;
472   Label fast_double_without_map_check;
473 
474   // Fast case: Do the store, could be either Object or double.
475   __ bind(fast_object);
476   Register scratch = t0;
477   Register scratch2 = t4;
478   Register scratch3 = t5;
479   Register address = t1;
480   DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
481                      scratch, scratch2, scratch3, address));
482 
483   if (check_map == kCheckMap) {
484     __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
485     __ Branch(fast_double, ne, elements_map,
486               Operand(masm->isolate()->factory()->fixed_array_map()));
487   }
488 
489   // HOLECHECK: guards "A[i] = V"
490   // We have to go to the runtime if the current value is the hole because
491   // there may be a callback on the element.
492   Label holecheck_passed1;
493   __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
494   __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
495   __ addu(address, address, at);
496   __ lw(scratch, MemOperand(address));
497   __ Branch(&holecheck_passed1, ne, scratch,
498             Operand(masm->isolate()->factory()->the_hole_value()));
499   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
500 
501   __ bind(&holecheck_passed1);
502 
503   // Smi stores don't require further checks.
504   Label non_smi_value;
505   __ JumpIfNotSmi(value, &non_smi_value);
506 
507   if (increment_length == kIncrementLength) {
508     // Add 1 to receiver->length.
509     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
510     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
511   }
512   // It's irrelevant whether array is smi-only or not when writing a smi.
513   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
514   __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
515   __ Addu(address, address, scratch);
516   __ sw(value, MemOperand(address));
517   __ Ret();
518 
519   __ bind(&non_smi_value);
520   // Escape to elements kind transition case.
521   __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
522 
523   // Fast elements array, store the value to the elements backing store.
524   __ bind(&finish_object_store);
525   if (increment_length == kIncrementLength) {
526     // Add 1 to receiver->length.
527     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
528     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
529   }
530   __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
531   __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
532   __ Addu(address, address, scratch);
533   __ sw(value, MemOperand(address));
534   // Update write barrier for the elements array address.
535   __ mov(scratch, value);  // Preserve the value which is returned.
536   __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
537                  kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
538   __ Ret();
539 
540   __ bind(fast_double);
541   if (check_map == kCheckMap) {
542     // Check for fast double array case. If this fails, call through to the
543     // runtime.
544     __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
545     __ Branch(slow, ne, elements_map, Operand(at));
546   }
547 
548   // HOLECHECK: guards "A[i] double hole?"
549   // We have to see if the double version of the hole is present. If so
550   // go to the runtime.
551   __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
552                                      kHoleNanUpper32Offset - kHeapObjectTag));
553   __ sll(at, key, kPointerSizeLog2);
554   __ addu(address, address, at);
555   __ lw(scratch, MemOperand(address));
556   __ Branch(&fast_double_without_map_check, ne, scratch,
557             Operand(kHoleNanUpper32));
558   __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
559 
560   __ bind(&fast_double_without_map_check);
561   __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
562                                  scratch3, &transition_double_elements);
563   if (increment_length == kIncrementLength) {
564     // Add 1 to receiver->length.
565     __ Addu(scratch, key, Operand(Smi::FromInt(1)));
566     __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
567   }
568   __ Ret();
569 
570   __ bind(&transition_smi_elements);
571   // Transition the array appropriately depending on the value type.
572   __ lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
573   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
574   __ Branch(&non_double_value, ne, scratch, Operand(at));
575 
576   // Value is a double. Transition FAST_SMI_ELEMENTS ->
577   // FAST_DOUBLE_ELEMENTS and complete the store.
578   __ LoadTransitionedArrayMapConditional(
579       FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
580   AllocationSiteMode mode =
581       AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
582   ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
583                                                    receiver_map, mode, slow);
584   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
585   __ jmp(&fast_double_without_map_check);
586 
587   __ bind(&non_double_value);
588   // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
589   __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
590                                          receiver_map, scratch, slow);
591   mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
592   ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
593       masm, receiver, key, value, receiver_map, mode, slow);
594   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
595   __ jmp(&finish_object_store);
596 
597   __ bind(&transition_double_elements);
598   // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
599   // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
600   // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
601   __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
602                                          receiver_map, scratch, slow);
603   mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
604   ElementsTransitionGenerator::GenerateDoubleToObject(
605       masm, receiver, key, value, receiver_map, mode, slow);
606   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
607   __ jmp(&finish_object_store);
608 }
609 
610 
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)611 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
612                                        LanguageMode language_mode) {
613   // ---------- S t a t e --------------
614   //  -- a0     : value
615   //  -- a1     : key
616   //  -- a2     : receiver
617   //  -- ra     : return address
618   // -----------------------------------
619   Label slow, fast_object, fast_object_grow;
620   Label fast_double, fast_double_grow;
621   Label array, extra, check_if_double_array, maybe_name_key, miss;
622 
623   // Register usage.
624   Register value = StoreDescriptor::ValueRegister();
625   Register key = StoreDescriptor::NameRegister();
626   Register receiver = StoreDescriptor::ReceiverRegister();
627   DCHECK(value.is(a0));
628   Register receiver_map = a3;
629   Register elements_map = t2;
630   Register elements = t3;  // Elements array of the receiver.
631   // t0 and t1 are used as general scratch registers.
632 
633   // Check that the key is a smi.
634   __ JumpIfNotSmi(key, &maybe_name_key);
635   // Check that the object isn't a smi.
636   __ JumpIfSmi(receiver, &slow);
637   // Get the map of the object.
638   __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
639   // Check that the receiver does not require access checks and is not observed.
640   // The generic stub does not perform map checks or handle observed objects.
641   __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
642   __ And(t0, t0,
643          Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
644   __ Branch(&slow, ne, t0, Operand(zero_reg));
645   // Check if the object is a JS array or not.
646   __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
647   __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
648   // Check that the object is some kind of JS object EXCEPT JS Value type. In
649   // the case that the object is a value-wrapper object, we enter the runtime
650   // system to make sure that indexing into string objects works as intended.
651   STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
652   __ Branch(&slow, lo, t0, Operand(JS_OBJECT_TYPE));
653 
654   // Object case: Check key against length in the elements array.
655   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
656   // Check array bounds. Both the key and the length of FixedArray are smis.
657   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
658   __ Branch(&fast_object, lo, key, Operand(t0));
659 
660   // Slow case, handle jump to runtime.
661   __ bind(&slow);
662   // Entry registers are intact.
663   // a0: value.
664   // a1: key.
665   // a2: receiver.
666   PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
667   // Never returns to here.
668 
669   __ bind(&maybe_name_key);
670   __ lw(t0, FieldMemOperand(key, HeapObject::kMapOffset));
671   __ lb(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
672   __ JumpIfNotUniqueNameInstanceType(t0, &slow);
673 
674   // The handlers in the stub cache expect a vector and slot. Since we won't
675   // change the IC from any downstream misses, a dummy vector can be used.
676   Register vector = VectorStoreICDescriptor::VectorRegister();
677   Register slot = VectorStoreICDescriptor::SlotRegister();
678   DCHECK(!AreAliased(vector, slot, t1, t2, t4, t5));
679   Handle<TypeFeedbackVector> dummy_vector =
680       TypeFeedbackVector::DummyVector(masm->isolate());
681   int slot_index = dummy_vector->GetIndex(
682       FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
683   __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
684   __ li(slot, Operand(Smi::FromInt(slot_index)));
685 
686   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
687       Code::ComputeHandlerFlags(Code::STORE_IC));
688   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
689                                                receiver, key, t1, t2, t4, t5);
690   // Cache miss.
691   __ Branch(&miss);
692 
693   // Extra capacity case: Check if there is extra capacity to
694   // perform the store and update the length. Used for adding one
695   // element to the array by writing to array[array.length].
696   __ bind(&extra);
697   // Condition code from comparing key and array length is still available.
698   // Only support writing to array[array.length].
699   __ Branch(&slow, ne, key, Operand(t0));
700   // Check for room in the elements backing store.
701   // Both the key and the length of FixedArray are smis.
702   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
703   __ Branch(&slow, hs, key, Operand(t0));
704   __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
705   __ Branch(&check_if_double_array, ne, elements_map,
706             Heap::kFixedArrayMapRootIndex);
707 
708   __ jmp(&fast_object_grow);
709 
710   __ bind(&check_if_double_array);
711   __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
712   __ jmp(&fast_double_grow);
713 
714   // Array case: Get the length and the elements array from the JS
715   // array. Check that the array is in fast mode (and writable); if it
716   // is the length is always a smi.
717   __ bind(&array);
718   __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
719 
720   // Check the key against the length in the array.
721   __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
722   __ Branch(&extra, hs, key, Operand(t0));
723 
724   KeyedStoreGenerateMegamorphicHelper(
725       masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
726       value, key, receiver, receiver_map, elements_map, elements);
727   KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
728                                       &fast_double_grow, &slow, kDontCheckMap,
729                                       kIncrementLength, value, key, receiver,
730                                       receiver_map, elements_map, elements);
731 
732   __ bind(&miss);
733   GenerateMiss(masm);
734 }
735 
736 
StoreIC_PushArgs(MacroAssembler * masm)737 static void StoreIC_PushArgs(MacroAssembler* masm) {
738   __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
739           StoreDescriptor::ValueRegister(),
740           VectorStoreICDescriptor::SlotRegister(),
741           VectorStoreICDescriptor::VectorRegister());
742 }
743 
744 
GenerateMiss(MacroAssembler * masm)745 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
746   StoreIC_PushArgs(masm);
747 
748   __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
749 }
750 
751 
GenerateMegamorphic(MacroAssembler * masm)752 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
753   Register receiver = StoreDescriptor::ReceiverRegister();
754   Register name = StoreDescriptor::NameRegister();
755   DCHECK(receiver.is(a1));
756   DCHECK(name.is(a2));
757   DCHECK(StoreDescriptor::ValueRegister().is(a0));
758 
759   // Get the receiver from the stack and probe the stub cache.
760   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
761       Code::ComputeHandlerFlags(Code::STORE_IC));
762   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
763                                                receiver, name, a3, t0, t1, t2);
764 
765   // Cache miss: Jump to runtime.
766   GenerateMiss(masm);
767 }
768 
769 
GenerateMiss(MacroAssembler * masm)770 void StoreIC::GenerateMiss(MacroAssembler* masm) {
771   StoreIC_PushArgs(masm);
772 
773   // Perform tail call to the entry.
774   __ TailCallRuntime(Runtime::kStoreIC_Miss);
775 }
776 
777 
GenerateNormal(MacroAssembler * masm)778 void StoreIC::GenerateNormal(MacroAssembler* masm) {
779   Label miss;
780   Register receiver = StoreDescriptor::ReceiverRegister();
781   Register name = StoreDescriptor::NameRegister();
782   Register value = StoreDescriptor::ValueRegister();
783   Register dictionary = t1;
784   DCHECK(receiver.is(a1));
785   DCHECK(name.is(a2));
786   DCHECK(value.is(a0));
787   DCHECK(VectorStoreICDescriptor::VectorRegister().is(a3));
788   DCHECK(VectorStoreICDescriptor::SlotRegister().is(t0));
789 
790   __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
791 
792   GenerateDictionaryStore(masm, &miss, dictionary, name, value, t2, t5);
793   Counters* counters = masm->isolate()->counters();
794   __ IncrementCounter(counters->store_normal_hit(), 1, t2, t5);
795   __ Ret();
796 
797   __ bind(&miss);
798   __ IncrementCounter(counters->store_normal_miss(), 1, t2, t5);
799   GenerateMiss(masm);
800 }
801 
802 
803 #undef __
804 
805 
ComputeCondition(Token::Value op)806 Condition CompareIC::ComputeCondition(Token::Value op) {
807   switch (op) {
808     case Token::EQ_STRICT:
809     case Token::EQ:
810       return eq;
811     case Token::LT:
812       return lt;
813     case Token::GT:
814       return gt;
815     case Token::LTE:
816       return le;
817     case Token::GTE:
818       return ge;
819     default:
820       UNREACHABLE();
821       return kNoCondition;
822   }
823 }
824 
825 
HasInlinedSmiCode(Address address)826 bool CompareIC::HasInlinedSmiCode(Address address) {
827   // The address of the instruction following the call.
828   Address andi_instruction_address =
829       address + Assembler::kCallTargetAddressOffset;
830 
831   // If the instruction following the call is not a andi at, rx, #yyy, nothing
832   // was inlined.
833   Instr instr = Assembler::instr_at(andi_instruction_address);
834   return Assembler::IsAndImmediate(instr) &&
835          Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
836 }
837 
838 
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)839 void PatchInlinedSmiCode(Isolate* isolate, Address address,
840                          InlinedSmiCheck check) {
841   Address andi_instruction_address =
842       address + Assembler::kCallTargetAddressOffset;
843 
844   // If the instruction following the call is not a andi at, rx, #yyy, nothing
845   // was inlined.
846   Instr instr = Assembler::instr_at(andi_instruction_address);
847   if (!(Assembler::IsAndImmediate(instr) &&
848         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
849     return;
850   }
851 
852   // The delta to the start of the map check instruction and the
853   // condition code uses at the patched jump.
854   int delta = Assembler::GetImmediate16(instr);
855   delta += Assembler::GetRs(instr) * kImm16Mask;
856   // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
857   // signals that nothing was inlined.
858   if (delta == 0) {
859     return;
860   }
861 
862   if (FLAG_trace_ic) {
863     PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
864            andi_instruction_address, delta);
865   }
866 
867   Address patch_address =
868       andi_instruction_address - delta * Instruction::kInstrSize;
869   Instr instr_at_patch = Assembler::instr_at(patch_address);
870   // This is patching a conditional "jump if not smi/jump if smi" site.
871   // Enabling by changing from
872   //   andi at, rx, 0
873   //   Branch <target>, eq, at, Operand(zero_reg)
874   // to:
875   //   andi at, rx, #kSmiTagMask
876   //   Branch <target>, ne, at, Operand(zero_reg)
877   // and vice-versa to be disabled again.
878   CodePatcher patcher(isolate, patch_address, 2);
879   Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
880   if (check == ENABLE_INLINED_SMI_CHECK) {
881     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
882     DCHECK_EQ(0u, Assembler::GetImmediate16(instr_at_patch));
883     patcher.masm()->andi(at, reg, kSmiTagMask);
884   } else {
885     DCHECK_EQ(check, DISABLE_INLINED_SMI_CHECK);
886     DCHECK(Assembler::IsAndImmediate(instr_at_patch));
887     patcher.masm()->andi(at, reg, 0);
888   }
889   Instr branch_instr =
890       Assembler::instr_at(patch_address + Instruction::kInstrSize);
891   DCHECK(Assembler::IsBranch(branch_instr));
892 
893   uint32_t opcode = Assembler::GetOpcodeField(branch_instr);
894   // Currently only the 'eq' and 'ne' cond values are supported and the simple
895   // branch instructions and their r6 variants (with opcode being the branch
896   // type). There are some special cases (see Assembler::IsBranch()) so
897   // extending this would be tricky.
898   DCHECK(opcode == BEQ ||    // BEQ
899          opcode == BNE ||    // BNE
900          opcode == POP10 ||  // BEQC
901          opcode == POP30 ||  // BNEC
902          opcode == POP66 ||  // BEQZC
903          opcode == POP76);   // BNEZC
904   switch (opcode) {
905     case BEQ:
906       opcode = BNE;  // change BEQ to BNE.
907       break;
908     case POP10:
909       opcode = POP30;  // change BEQC to BNEC.
910       break;
911     case POP66:
912       opcode = POP76;  // change BEQZC to BNEZC.
913       break;
914     case BNE:
915       opcode = BEQ;  // change BNE to BEQ.
916       break;
917     case POP30:
918       opcode = POP10;  // change BNEC to BEQC.
919       break;
920     case POP76:
921       opcode = POP66;  // change BNEZC to BEQZC.
922       break;
923     default:
924       UNIMPLEMENTED();
925   }
926   patcher.ChangeBranchCondition(branch_instr, opcode);
927 }
928 }  // namespace internal
929 }  // namespace v8
930 
931 #endif  // V8_TARGET_ARCH_MIPS
932