1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_PPC
6
7 #include "src/codegen.h"
8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15
16 // ----------------------------------------------------------------------------
17 // Static IC stub generators.
18 //
19
20 #define __ ACCESS_MASM(masm)
21
22
GenerateGlobalInstanceTypeCheck(MacroAssembler * masm,Register type,Label * global_object)23 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
24 Label* global_object) {
25 // Register usage:
26 // type: holds the receiver instance type on entry.
27 __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
28 __ beq(global_object);
29 __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
30 __ beq(global_object);
31 }
32
33
34 // Helper function used from LoadIC GenerateNormal.
35 //
36 // elements: Property dictionary. It is not clobbered if a jump to the miss
37 // label is done.
38 // name: Property name. It is not clobbered if a jump to the miss label is
39 // done
40 // result: Register for the result. It is only updated if a jump to the miss
41 // label is not done. Can be the same as elements or name clobbering
42 // one of these in the case of not jumping to the miss label.
43 // The two scratch registers need to be different from elements, name and
44 // result.
45 // The generated code assumes that the receiver has slow properties,
46 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)47 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
48 Register elements, Register name,
49 Register result, Register scratch1,
50 Register scratch2) {
51 // Main use of the scratch registers.
52 // scratch1: Used as temporary and to hold the capacity of the property
53 // dictionary.
54 // scratch2: Used as temporary.
55 Label done;
56
57 // Probe the dictionary.
58 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
59 name, scratch1, scratch2);
60
61 // If probing finds an entry check that the value is a normal
62 // property.
63 __ bind(&done); // scratch2 == elements + 4 * index
64 const int kElementsStartOffset =
65 NameDictionary::kHeaderSize +
66 NameDictionary::kElementsStartIndex * kPointerSize;
67 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
68 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
69 __ mr(r0, scratch2);
70 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
71 __ and_(scratch2, scratch1, scratch2, SetRC);
72 __ bne(miss, cr0);
73 __ mr(scratch2, r0);
74
75 // Get the value at the masked, scaled index and return.
76 __ LoadP(result,
77 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
78 }
79
80
81 // Helper function used from StoreIC::GenerateNormal.
82 //
83 // elements: Property dictionary. It is not clobbered if a jump to the miss
84 // label is done.
85 // name: Property name. It is not clobbered if a jump to the miss label is
86 // done
87 // value: The value to store.
88 // The two scratch registers need to be different from elements, name and
89 // result.
90 // The generated code assumes that the receiver has slow properties,
91 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)92 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
93 Register elements, Register name,
94 Register value, Register scratch1,
95 Register scratch2) {
96 // Main use of the scratch registers.
97 // scratch1: Used as temporary and to hold the capacity of the property
98 // dictionary.
99 // scratch2: Used as temporary.
100 Label done;
101
102 // Probe the dictionary.
103 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
104 name, scratch1, scratch2);
105
106 // If probing finds an entry in the dictionary check that the value
107 // is a normal property that is not read only.
108 __ bind(&done); // scratch2 == elements + 4 * index
109 const int kElementsStartOffset =
110 NameDictionary::kHeaderSize +
111 NameDictionary::kElementsStartIndex * kPointerSize;
112 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
113 int kTypeAndReadOnlyMask =
114 PropertyDetails::TypeField::kMask |
115 PropertyDetails::AttributesField::encode(READ_ONLY);
116 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
117 __ mr(r0, scratch2);
118 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
119 __ and_(scratch2, scratch1, scratch2, SetRC);
120 __ bne(miss, cr0);
121 __ mr(scratch2, r0);
122
123 // Store the value at the masked, scaled index and return.
124 const int kValueOffset = kElementsStartOffset + kPointerSize;
125 __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
126 __ StoreP(value, MemOperand(scratch2));
127
128 // Update the write barrier. Make sure not to clobber the value.
129 __ mr(scratch1, value);
130 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
131 kDontSaveFPRegs);
132 }
133
134
135 // Checks the receiver for special cases (value type, slow case bits).
136 // Falls through for regular JS object.
GenerateKeyedLoadReceiverCheck(MacroAssembler * masm,Register receiver,Register map,Register scratch,int interceptor_bit,Label * slow)137 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
138 Register receiver, Register map,
139 Register scratch,
140 int interceptor_bit, Label* slow) {
141 // Check that the object isn't a smi.
142 __ JumpIfSmi(receiver, slow);
143 // Get the map of the receiver.
144 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
145 // Check bit field.
146 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
147 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
148 __ andi(r0, scratch,
149 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
150 __ bne(slow, cr0);
151 // Check that the object is some kind of JS object EXCEPT JS Value type.
152 // In the case that the object is a value-wrapper object,
153 // we enter the runtime system to make sure that indexing into string
154 // objects work as intended.
155 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
156 __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
157 __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
158 __ blt(slow);
159 }
160
161
162 // Loads an indexed element from a fast case array.
GenerateFastArrayLoad(MacroAssembler * masm,Register receiver,Register key,Register elements,Register scratch1,Register scratch2,Register result,Label * slow,LanguageMode language_mode)163 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
164 Register key, Register elements,
165 Register scratch1, Register scratch2,
166 Register result, Label* slow,
167 LanguageMode language_mode) {
168 // Register use:
169 //
170 // receiver - holds the receiver on entry.
171 // Unchanged unless 'result' is the same register.
172 //
173 // key - holds the smi key on entry.
174 // Unchanged unless 'result' is the same register.
175 //
176 // result - holds the result on exit if the load succeeded.
177 // Allowed to be the the same as 'receiver' or 'key'.
178 // Unchanged on bailout so 'receiver' and 'key' can be safely
179 // used by further computation.
180 //
181 // Scratch registers:
182 //
183 // elements - holds the elements of the receiver and its protoypes.
184 //
185 // scratch1 - used to hold elements length, bit fields, base addresses.
186 //
187 // scratch2 - used to hold maps, prototypes, and the loaded value.
188 Label check_prototypes, check_next_prototype;
189 Label done, in_bounds, absent;
190
191 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
192 __ AssertFastElements(elements);
193
194 // Check that the key (index) is within bounds.
195 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
196 __ cmpl(key, scratch1);
197 __ blt(&in_bounds);
198 // Out-of-bounds. Check the prototype chain to see if we can just return
199 // 'undefined'.
200 __ cmpi(key, Operand::Zero());
201 __ blt(slow); // Negative keys can't take the fast OOB path.
202 __ bind(&check_prototypes);
203 __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
204 __ bind(&check_next_prototype);
205 __ LoadP(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset));
206 // scratch2: current prototype
207 __ CompareRoot(scratch2, Heap::kNullValueRootIndex);
208 __ beq(&absent);
209 __ LoadP(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset));
210 __ LoadP(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset));
211 // elements: elements of current prototype
212 // scratch2: map of current prototype
213 __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE);
214 __ blt(slow);
215 __ lbz(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset));
216 __ andi(r0, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
217 (1 << Map::kHasIndexedInterceptor)));
218 __ bne(slow, cr0);
219 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
220 __ bne(slow);
221 __ jmp(&check_next_prototype);
222
223 __ bind(&absent);
224 if (is_strong(language_mode)) {
225 // Strong mode accesses must throw in this case, so call the runtime.
226 __ jmp(slow);
227 } else {
228 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
229 __ jmp(&done);
230 }
231
232 __ bind(&in_bounds);
233 // Fast case: Do the load.
234 __ addi(scratch1, elements,
235 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
236 // The key is a smi.
237 __ SmiToPtrArrayOffset(scratch2, key);
238 __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
239 __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
240 // In case the loaded value is the_hole we have to check the prototype chain.
241 __ beq(&check_prototypes);
242 __ mr(result, scratch2);
243 __ bind(&done);
244 }
245
246
247 // Checks whether a key is an array index string or a unique name.
248 // Falls through if a key is a unique name.
GenerateKeyNameCheck(MacroAssembler * masm,Register key,Register map,Register hash,Label * index_string,Label * not_unique)249 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
250 Register map, Register hash,
251 Label* index_string, Label* not_unique) {
252 // The key is not a smi.
253 Label unique;
254 // Is it a name?
255 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
256 __ bgt(not_unique);
257 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
258 __ beq(&unique);
259
260 // Is the string an array index, with cached numeric value?
261 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
262 __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
263 __ and_(r0, hash, r8, SetRC);
264 __ beq(index_string, cr0);
265
266 // Is the string internalized? We know it's a string, so a single
267 // bit test is enough.
268 // map: key map
269 __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
270 STATIC_ASSERT(kInternalizedTag == 0);
271 __ andi(r0, hash, Operand(kIsNotInternalizedMask));
272 __ bne(not_unique, cr0);
273
274 __ bind(&unique);
275 }
276
277
GenerateNormal(MacroAssembler * masm,LanguageMode language_mode)278 void LoadIC::GenerateNormal(MacroAssembler* masm, LanguageMode language_mode) {
279 Register dictionary = r3;
280 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
281 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
282
283 Label slow;
284
285 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
286 JSObject::kPropertiesOffset));
287 GenerateDictionaryLoad(masm, &slow, dictionary,
288 LoadDescriptor::NameRegister(), r3, r6, r7);
289 __ Ret();
290
291 // Dictionary load failed, go slow (but don't miss).
292 __ bind(&slow);
293 GenerateRuntimeGetProperty(masm, language_mode);
294 }
295
296
297 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()298 static const Register LoadIC_TempRegister() { return r6; }
299
300
LoadIC_PushArgs(MacroAssembler * masm)301 static void LoadIC_PushArgs(MacroAssembler* masm) {
302 Register receiver = LoadDescriptor::ReceiverRegister();
303 Register name = LoadDescriptor::NameRegister();
304 Register slot = LoadDescriptor::SlotRegister();
305 Register vector = LoadWithVectorDescriptor::VectorRegister();
306
307 __ Push(receiver, name, slot, vector);
308 }
309
310
GenerateMiss(MacroAssembler * masm)311 void LoadIC::GenerateMiss(MacroAssembler* masm) {
312 // The return address is in lr.
313 Isolate* isolate = masm->isolate();
314
315 DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
316 LoadWithVectorDescriptor::VectorRegister()));
317 __ IncrementCounter(isolate->counters()->load_miss(), 1, r7, r8);
318
319 LoadIC_PushArgs(masm);
320
321 // Perform tail call to the entry.
322 __ TailCallRuntime(Runtime::kLoadIC_Miss);
323 }
324
325
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)326 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
327 LanguageMode language_mode) {
328 // The return address is in lr.
329
330 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
331 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
332
333 // Do tail-call to runtime routine.
334 __ TailCallRuntime(is_strong(language_mode) ? Runtime::kGetPropertyStrong
335 : Runtime::kGetProperty);
336 }
337
338
GenerateMiss(MacroAssembler * masm)339 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
340 // The return address is in lr.
341 Isolate* isolate = masm->isolate();
342
343 DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::SlotRegister(),
344 LoadWithVectorDescriptor::VectorRegister()));
345 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r7, r8);
346
347 LoadIC_PushArgs(masm);
348
349 // Perform tail call to the entry.
350 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
351 }
352
353
GenerateRuntimeGetProperty(MacroAssembler * masm,LanguageMode language_mode)354 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm,
355 LanguageMode language_mode) {
356 // The return address is in lr.
357
358 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
359
360 // Do tail-call to runtime routine.
361 __ TailCallRuntime(is_strong(language_mode) ? Runtime::kKeyedGetPropertyStrong
362 : Runtime::kKeyedGetProperty);
363 }
364
365
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)366 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm,
367 LanguageMode language_mode) {
368 // The return address is in lr.
369 Label slow, check_name, index_smi, index_name, property_array_property;
370 Label probe_dictionary, check_number_dictionary;
371
372 Register key = LoadDescriptor::NameRegister();
373 Register receiver = LoadDescriptor::ReceiverRegister();
374 DCHECK(key.is(r5));
375 DCHECK(receiver.is(r4));
376
377 Isolate* isolate = masm->isolate();
378
379 // Check that the key is a smi.
380 __ JumpIfNotSmi(key, &check_name);
381 __ bind(&index_smi);
382 // Now the key is known to be a smi. This place is also jumped to from below
383 // where a numeric string is converted to a smi.
384
385 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
386 Map::kHasIndexedInterceptor, &slow);
387
388 // Check the receiver's map to see if it has fast elements.
389 __ CheckFastElements(r3, r6, &check_number_dictionary);
390
391 GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, &slow,
392 language_mode);
393 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
394 __ Ret();
395
396 __ bind(&check_number_dictionary);
397 __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
398 __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
399
400 // Check whether the elements is a number dictionary.
401 // r6: elements map
402 // r7: elements
403 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
404 __ cmp(r6, ip);
405 __ bne(&slow);
406 __ SmiUntag(r3, key);
407 __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
408 __ Ret();
409
410 // Slow case, key and receiver still in r3 and r4.
411 __ bind(&slow);
412 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
413 r6);
414 GenerateRuntimeGetProperty(masm, language_mode);
415
416 __ bind(&check_name);
417 GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
418
419 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
420 Map::kHasNamedInterceptor, &slow);
421
422 // If the receiver is a fast-case object, check the stub cache. Otherwise
423 // probe the dictionary.
424 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
425 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
426 __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
427 __ cmp(r7, ip);
428 __ beq(&probe_dictionary);
429
430
431 // The handlers in the stub cache expect a vector and slot. Since we won't
432 // change the IC from any downstream misses, a dummy vector can be used.
433 Register vector = LoadWithVectorDescriptor::VectorRegister();
434 Register slot = LoadWithVectorDescriptor::SlotRegister();
435 DCHECK(!AreAliased(vector, slot, r7, r8, r9, r10));
436 Handle<TypeFeedbackVector> dummy_vector =
437 TypeFeedbackVector::DummyVector(masm->isolate());
438 int slot_index = dummy_vector->GetIndex(
439 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot));
440 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
441 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
442
443 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
444 Code::ComputeHandlerFlags(Code::LOAD_IC));
445 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::KEYED_LOAD_IC, flags,
446 receiver, key, r7, r8, r9, r10);
447 // Cache miss.
448 GenerateMiss(masm);
449
450 // Do a quick inline probe of the receiver's dictionary, if it
451 // exists.
452 __ bind(&probe_dictionary);
453 // r6: elements
454 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
455 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
456 GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
457 // Load the property to r3.
458 GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
459 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7,
460 r6);
461 __ Ret();
462
463 __ bind(&index_name);
464 __ IndexFromHash(r6, key);
465 // Now jump to the place where smi keys are handled.
466 __ b(&index_smi);
467 }
468
469
StoreIC_PushArgs(MacroAssembler * masm)470 static void StoreIC_PushArgs(MacroAssembler* masm) {
471 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
472 StoreDescriptor::ValueRegister(),
473 VectorStoreICDescriptor::SlotRegister(),
474 VectorStoreICDescriptor::VectorRegister());
475 }
476
477
GenerateMiss(MacroAssembler * masm)478 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
479 StoreIC_PushArgs(masm);
480
481 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
482 }
483
484
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)485 static void KeyedStoreGenerateMegamorphicHelper(
486 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
487 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
488 Register value, Register key, Register receiver, Register receiver_map,
489 Register elements_map, Register elements) {
490 Label transition_smi_elements;
491 Label finish_object_store, non_double_value, transition_double_elements;
492 Label fast_double_without_map_check;
493
494 // Fast case: Do the store, could be either Object or double.
495 __ bind(fast_object);
496 Register scratch = r7;
497 Register address = r8;
498 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
499 scratch, address));
500
501 if (check_map == kCheckMap) {
502 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
503 __ mov(scratch, Operand(masm->isolate()->factory()->fixed_array_map()));
504 __ cmp(elements_map, scratch);
505 __ bne(fast_double);
506 }
507
508 // HOLECHECK: guards "A[i] = V"
509 // We have to go to the runtime if the current value is the hole because
510 // there may be a callback on the element
511 Label holecheck_passed1;
512 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
513 __ SmiToPtrArrayOffset(scratch, key);
514 __ LoadPX(scratch, MemOperand(address, scratch));
515 __ Cmpi(scratch, Operand(masm->isolate()->factory()->the_hole_value()), r0);
516 __ bne(&holecheck_passed1);
517 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
518
519 __ bind(&holecheck_passed1);
520
521 // Smi stores don't require further checks.
522 Label non_smi_value;
523 __ JumpIfNotSmi(value, &non_smi_value);
524
525 if (increment_length == kIncrementLength) {
526 // Add 1 to receiver->length.
527 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
528 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
529 }
530 // It's irrelevant whether array is smi-only or not when writing a smi.
531 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
532 __ SmiToPtrArrayOffset(scratch, key);
533 __ StorePX(value, MemOperand(address, scratch));
534 __ Ret();
535
536 __ bind(&non_smi_value);
537 // Escape to elements kind transition case.
538 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
539
540 // Fast elements array, store the value to the elements backing store.
541 __ bind(&finish_object_store);
542 if (increment_length == kIncrementLength) {
543 // Add 1 to receiver->length.
544 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
545 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
546 }
547 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
548 __ SmiToPtrArrayOffset(scratch, key);
549 __ StorePUX(value, MemOperand(address, scratch));
550 // Update write barrier for the elements array address.
551 __ mr(scratch, value); // Preserve the value which is returned.
552 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
553 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
554 __ Ret();
555
556 __ bind(fast_double);
557 if (check_map == kCheckMap) {
558 // Check for fast double array case. If this fails, call through to the
559 // runtime.
560 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
561 __ bne(slow);
562 }
563
564 // HOLECHECK: guards "A[i] double hole?"
565 // We have to see if the double version of the hole is present. If so
566 // go to the runtime.
567 __ addi(address, elements,
568 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
569 kHeapObjectTag)));
570 __ SmiToDoubleArrayOffset(scratch, key);
571 __ lwzx(scratch, MemOperand(address, scratch));
572 __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
573 __ bne(&fast_double_without_map_check);
574 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
575
576 __ bind(&fast_double_without_map_check);
577 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
578 &transition_double_elements);
579 if (increment_length == kIncrementLength) {
580 // Add 1 to receiver->length.
581 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
582 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset), r0);
583 }
584 __ Ret();
585
586 __ bind(&transition_smi_elements);
587 // Transition the array appropriately depending on the value type.
588 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
589 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
590 __ bne(&non_double_value);
591
592 // Value is a double. Transition FAST_SMI_ELEMENTS ->
593 // FAST_DOUBLE_ELEMENTS and complete the store.
594 __ LoadTransitionedArrayMapConditional(
595 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
596 AllocationSiteMode mode =
597 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
598 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
599 receiver_map, mode, slow);
600 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
601 __ b(&fast_double_without_map_check);
602
603 __ bind(&non_double_value);
604 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
605 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
606 receiver_map, scratch, slow);
607 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
608 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
609 masm, receiver, key, value, receiver_map, mode, slow);
610 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
611 __ b(&finish_object_store);
612
613 __ bind(&transition_double_elements);
614 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
615 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
616 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
617 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
618 receiver_map, scratch, slow);
619 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
620 ElementsTransitionGenerator::GenerateDoubleToObject(
621 masm, receiver, key, value, receiver_map, mode, slow);
622 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
623 __ b(&finish_object_store);
624 }
625
626
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)627 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
628 LanguageMode language_mode) {
629 // ---------- S t a t e --------------
630 // -- r3 : value
631 // -- r4 : key
632 // -- r5 : receiver
633 // -- lr : return address
634 // -----------------------------------
635 Label slow, fast_object, fast_object_grow;
636 Label fast_double, fast_double_grow;
637 Label array, extra, check_if_double_array, maybe_name_key, miss;
638
639 // Register usage.
640 Register value = StoreDescriptor::ValueRegister();
641 Register key = StoreDescriptor::NameRegister();
642 Register receiver = StoreDescriptor::ReceiverRegister();
643 DCHECK(receiver.is(r4));
644 DCHECK(key.is(r5));
645 DCHECK(value.is(r3));
646 Register receiver_map = r6;
647 Register elements_map = r9;
648 Register elements = r10; // Elements array of the receiver.
649 // r7 and r8 are used as general scratch registers.
650
651 // Check that the key is a smi.
652 __ JumpIfNotSmi(key, &maybe_name_key);
653 // Check that the object isn't a smi.
654 __ JumpIfSmi(receiver, &slow);
655 // Get the map of the object.
656 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
657 // Check that the receiver does not require access checks and is not observed.
658 // The generic stub does not perform map checks or handle observed objects.
659 __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
660 __ andi(r0, ip,
661 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
662 __ bne(&slow, cr0);
663 // Check if the object is a JS array or not.
664 __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
665 __ cmpi(r7, Operand(JS_ARRAY_TYPE));
666 __ beq(&array);
667 // Check that the object is some kind of JSObject.
668 __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
669 __ blt(&slow);
670
671 // Object case: Check key against length in the elements array.
672 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
673 // Check array bounds. Both the key and the length of FixedArray are smis.
674 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
675 __ cmpl(key, ip);
676 __ blt(&fast_object);
677
678 // Slow case, handle jump to runtime.
679 __ bind(&slow);
680 // Entry registers are intact.
681 // r3: value.
682 // r4: key.
683 // r5: receiver.
684 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
685 // Never returns to here.
686
687 __ bind(&maybe_name_key);
688 __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
689 __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
690 __ JumpIfNotUniqueNameInstanceType(r7, &slow);
691
692 // The handlers in the stub cache expect a vector and slot. Since we won't
693 // change the IC from any downstream misses, a dummy vector can be used.
694 Register vector = VectorStoreICDescriptor::VectorRegister();
695 Register slot = VectorStoreICDescriptor::SlotRegister();
696 DCHECK(!AreAliased(vector, slot, r8, r9, r10, r11));
697 Handle<TypeFeedbackVector> dummy_vector =
698 TypeFeedbackVector::DummyVector(masm->isolate());
699 int slot_index = dummy_vector->GetIndex(
700 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
701 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
702 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
703
704 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
705 Code::ComputeHandlerFlags(Code::STORE_IC));
706 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
707 receiver, key, r8, r9, r10, r11);
708 // Cache miss.
709 __ b(&miss);
710
711 // Extra capacity case: Check if there is extra capacity to
712 // perform the store and update the length. Used for adding one
713 // element to the array by writing to array[array.length].
714 __ bind(&extra);
715 // Condition code from comparing key and array length is still available.
716 __ bne(&slow); // Only support writing to writing to array[array.length].
717 // Check for room in the elements backing store.
718 // Both the key and the length of FixedArray are smis.
719 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
720 __ cmpl(key, ip);
721 __ bge(&slow);
722 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
723 __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
724 __ cmp(elements_map, ip); // PPC - I think I can re-use ip here
725 __ bne(&check_if_double_array);
726 __ b(&fast_object_grow);
727
728 __ bind(&check_if_double_array);
729 __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
730 __ cmp(elements_map, ip); // PPC - another ip re-use
731 __ bne(&slow);
732 __ b(&fast_double_grow);
733
734 // Array case: Get the length and the elements array from the JS
735 // array. Check that the array is in fast mode (and writable); if it
736 // is the length is always a smi.
737 __ bind(&array);
738 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
739
740 // Check the key against the length in the array.
741 __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
742 __ cmpl(key, ip);
743 __ bge(&extra);
744
745 KeyedStoreGenerateMegamorphicHelper(
746 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
747 value, key, receiver, receiver_map, elements_map, elements);
748 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
749 &fast_double_grow, &slow, kDontCheckMap,
750 kIncrementLength, value, key, receiver,
751 receiver_map, elements_map, elements);
752 __ bind(&miss);
753 GenerateMiss(masm);
754 }
755
756
GenerateMegamorphic(MacroAssembler * masm)757 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
758 Register receiver = StoreDescriptor::ReceiverRegister();
759 Register name = StoreDescriptor::NameRegister();
760 DCHECK(receiver.is(r4));
761 DCHECK(name.is(r5));
762 DCHECK(StoreDescriptor::ValueRegister().is(r3));
763
764 // Get the receiver from the stack and probe the stub cache.
765 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
766 Code::ComputeHandlerFlags(Code::STORE_IC));
767
768 masm->isolate()->stub_cache()->GenerateProbe(masm, Code::STORE_IC, flags,
769 receiver, name, r6, r7, r8, r9);
770
771 // Cache miss: Jump to runtime.
772 GenerateMiss(masm);
773 }
774
775
GenerateMiss(MacroAssembler * masm)776 void StoreIC::GenerateMiss(MacroAssembler* masm) {
777 StoreIC_PushArgs(masm);
778
779 // Perform tail call to the entry.
780 __ TailCallRuntime(Runtime::kStoreIC_Miss);
781 }
782
783
GenerateNormal(MacroAssembler * masm)784 void StoreIC::GenerateNormal(MacroAssembler* masm) {
785 Label miss;
786 Register receiver = StoreDescriptor::ReceiverRegister();
787 Register name = StoreDescriptor::NameRegister();
788 Register value = StoreDescriptor::ValueRegister();
789 Register dictionary = r8;
790 DCHECK(receiver.is(r4));
791 DCHECK(name.is(r5));
792 DCHECK(value.is(r3));
793 DCHECK(VectorStoreICDescriptor::VectorRegister().is(r6));
794 DCHECK(VectorStoreICDescriptor::SlotRegister().is(r7));
795
796 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
797
798 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r9, r10);
799 Counters* counters = masm->isolate()->counters();
800 __ IncrementCounter(counters->store_normal_hit(), 1, r9, r10);
801 __ Ret();
802
803 __ bind(&miss);
804 __ IncrementCounter(counters->store_normal_miss(), 1, r9, r10);
805 GenerateMiss(masm);
806 }
807
808
809 #undef __
810
811
ComputeCondition(Token::Value op)812 Condition CompareIC::ComputeCondition(Token::Value op) {
813 switch (op) {
814 case Token::EQ_STRICT:
815 case Token::EQ:
816 return eq;
817 case Token::LT:
818 return lt;
819 case Token::GT:
820 return gt;
821 case Token::LTE:
822 return le;
823 case Token::GTE:
824 return ge;
825 default:
826 UNREACHABLE();
827 return kNoCondition;
828 }
829 }
830
831
HasInlinedSmiCode(Address address)832 bool CompareIC::HasInlinedSmiCode(Address address) {
833 // The address of the instruction following the call.
834 Address cmp_instruction_address =
835 Assembler::return_address_from_call_start(address);
836
837 // If the instruction following the call is not a cmp rx, #yyy, nothing
838 // was inlined.
839 Instr instr = Assembler::instr_at(cmp_instruction_address);
840 return Assembler::IsCmpImmediate(instr);
841 }
842
843
844 //
845 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
846 //
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)847 void PatchInlinedSmiCode(Isolate* isolate, Address address,
848 InlinedSmiCheck check) {
849 Address cmp_instruction_address =
850 Assembler::return_address_from_call_start(address);
851
852 // If the instruction following the call is not a cmp rx, #yyy, nothing
853 // was inlined.
854 Instr instr = Assembler::instr_at(cmp_instruction_address);
855 if (!Assembler::IsCmpImmediate(instr)) {
856 return;
857 }
858
859 // The delta to the start of the map check instruction and the
860 // condition code uses at the patched jump.
861 int delta = Assembler::GetCmpImmediateRawImmediate(instr);
862 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask;
863 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
864 // nothing was inlined.
865 if (delta == 0) {
866 return;
867 }
868
869 if (FLAG_trace_ic) {
870 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address,
871 cmp_instruction_address, delta);
872 }
873
874 Address patch_address =
875 cmp_instruction_address - delta * Instruction::kInstrSize;
876 Instr instr_at_patch = Assembler::instr_at(patch_address);
877 Instr branch_instr =
878 Assembler::instr_at(patch_address + Instruction::kInstrSize);
879 // This is patching a conditional "jump if not smi/jump if smi" site.
880 // Enabling by changing from
881 // cmp cr0, rx, rx
882 // to
883 // rlwinm(r0, value, 0, 31, 31, SetRC);
884 // bc(label, BT/BF, 2)
885 // and vice-versa to be disabled again.
886 CodePatcher patcher(isolate, patch_address, 2);
887 Register reg = Assembler::GetRA(instr_at_patch);
888 if (check == ENABLE_INLINED_SMI_CHECK) {
889 DCHECK(Assembler::IsCmpRegister(instr_at_patch));
890 DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(),
891 Assembler::GetRB(instr_at_patch).code());
892 patcher.masm()->TestIfSmi(reg, r0);
893 } else {
894 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
895 DCHECK(Assembler::IsAndi(instr_at_patch));
896 patcher.masm()->cmp(reg, reg, cr0);
897 }
898 DCHECK(Assembler::IsBranch(branch_instr));
899
900 // Invert the logic of the branch
901 if (Assembler::GetCondition(branch_instr) == eq) {
902 patcher.EmitCondition(ne);
903 } else {
904 DCHECK(Assembler::GetCondition(branch_instr) == ne);
905 patcher.EmitCondition(eq);
906 }
907 }
908 } // namespace internal
909 } // namespace v8
910
911 #endif // V8_TARGET_ARCH_PPC
912