1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #if V8_TARGET_ARCH_S390
6
7 #include "src/ic/ic.h"
8 #include "src/codegen.h"
9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h"
11
12 namespace v8 {
13 namespace internal {
14
15 // ----------------------------------------------------------------------------
16 // Static IC stub generators.
17 //
18
19 #define __ ACCESS_MASM(masm)
20
21 // Helper function used from LoadIC GenerateNormal.
22 //
23 // elements: Property dictionary. It is not clobbered if a jump to the miss
24 // label is done.
25 // name: Property name. It is not clobbered if a jump to the miss label is
26 // done
27 // result: Register for the result. It is only updated if a jump to the miss
28 // label is not done. Can be the same as elements or name clobbering
29 // one of these in the case of not jumping to the miss label.
30 // The two scratch registers need to be different from elements, name and
31 // result.
32 // The generated code assumes that the receiver has slow properties,
33 // is not a global object and does not have interceptors.
GenerateDictionaryLoad(MacroAssembler * masm,Label * miss,Register elements,Register name,Register result,Register scratch1,Register scratch2)34 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
35 Register elements, Register name,
36 Register result, Register scratch1,
37 Register scratch2) {
38 // Main use of the scratch registers.
39 // scratch1: Used as temporary and to hold the capacity of the property
40 // dictionary.
41 // scratch2: Used as temporary.
42 Label done;
43
44 // Probe the dictionary.
45 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
46 name, scratch1, scratch2);
47
48 // If probing finds an entry check that the value is a normal
49 // property.
50 __ bind(&done); // scratch2 == elements + 4 * index
51 const int kElementsStartOffset =
52 NameDictionary::kHeaderSize +
53 NameDictionary::kElementsStartIndex * kPointerSize;
54 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
55 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
56 __ LoadRR(r0, scratch2);
57 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
58 __ AndP(scratch2, scratch1);
59 __ bne(miss);
60 __ LoadRR(scratch2, r0);
61
62 // Get the value at the masked, scaled index and return.
63 __ LoadP(result,
64 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
65 }
66
67 // Helper function used from StoreIC::GenerateNormal.
68 //
69 // elements: Property dictionary. It is not clobbered if a jump to the miss
70 // label is done.
71 // name: Property name. It is not clobbered if a jump to the miss label is
72 // done
73 // value: The value to store.
74 // The two scratch registers need to be different from elements, name and
75 // result.
76 // The generated code assumes that the receiver has slow properties,
77 // is not a global object and does not have interceptors.
GenerateDictionaryStore(MacroAssembler * masm,Label * miss,Register elements,Register name,Register value,Register scratch1,Register scratch2)78 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
79 Register elements, Register name,
80 Register value, Register scratch1,
81 Register scratch2) {
82 // Main use of the scratch registers.
83 // scratch1: Used as temporary and to hold the capacity of the property
84 // dictionary.
85 // scratch2: Used as temporary.
86 Label done;
87
88 // Probe the dictionary.
89 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
90 name, scratch1, scratch2);
91
92 // If probing finds an entry in the dictionary check that the value
93 // is a normal property that is not read only.
94 __ bind(&done); // scratch2 == elements + 4 * index
95 const int kElementsStartOffset =
96 NameDictionary::kHeaderSize +
97 NameDictionary::kElementsStartIndex * kPointerSize;
98 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
99 int kTypeAndReadOnlyMask =
100 PropertyDetails::TypeField::kMask |
101 PropertyDetails::AttributesField::encode(READ_ONLY);
102 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
103 __ LoadRR(r0, scratch2);
104 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
105 __ AndP(scratch2, scratch1);
106 __ bne(miss /*, cr0*/);
107 __ LoadRR(scratch2, r0);
108
109 // Store the value at the masked, scaled index and return.
110 const int kValueOffset = kElementsStartOffset + kPointerSize;
111 __ AddP(scratch2, Operand(kValueOffset - kHeapObjectTag));
112 __ StoreP(value, MemOperand(scratch2));
113
114 // Update the write barrier. Make sure not to clobber the value.
115 __ LoadRR(scratch1, value);
116 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
117 kDontSaveFPRegs);
118 }
119
GenerateNormal(MacroAssembler * masm)120 void LoadIC::GenerateNormal(MacroAssembler* masm) {
121 Register dictionary = r2;
122 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
123 DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
124
125 Label slow;
126
127 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
128 JSObject::kPropertiesOffset));
129 GenerateDictionaryLoad(masm, &slow, dictionary,
130 LoadDescriptor::NameRegister(), r2, r5, r6);
131 __ Ret();
132
133 // Dictionary load failed, go slow (but don't miss).
134 __ bind(&slow);
135 GenerateRuntimeGetProperty(masm);
136 }
137
138 // A register that isn't one of the parameters to the load ic.
LoadIC_TempRegister()139 static const Register LoadIC_TempRegister() { return r5; }
140
LoadIC_PushArgs(MacroAssembler * masm)141 static void LoadIC_PushArgs(MacroAssembler* masm) {
142 Register receiver = LoadDescriptor::ReceiverRegister();
143 Register name = LoadDescriptor::NameRegister();
144 Register slot = LoadDescriptor::SlotRegister();
145 Register vector = LoadWithVectorDescriptor::VectorRegister();
146
147 __ Push(receiver, name, slot, vector);
148 }
149
GenerateMiss(MacroAssembler * masm)150 void LoadIC::GenerateMiss(MacroAssembler* masm) {
151 // The return address is in lr.
152 Isolate* isolate = masm->isolate();
153
154 DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
155 LoadWithVectorDescriptor::VectorRegister()));
156 __ IncrementCounter(isolate->counters()->ic_load_miss(), 1, r6, r7);
157
158 LoadIC_PushArgs(masm);
159
160 // Perform tail call to the entry.
161 __ TailCallRuntime(Runtime::kLoadIC_Miss);
162 }
163
GenerateRuntimeGetProperty(MacroAssembler * masm)164 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
165 // The return address is in lr.
166
167 __ LoadRR(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
168 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
169
170 // Do tail-call to runtime routine.
171 __ TailCallRuntime(Runtime::kGetProperty);
172 }
173
GenerateMiss(MacroAssembler * masm)174 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
175 // The return address is in lr.
176 Isolate* isolate = masm->isolate();
177
178 DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::SlotRegister(),
179 LoadWithVectorDescriptor::VectorRegister()));
180 __ IncrementCounter(isolate->counters()->ic_keyed_load_miss(), 1, r6, r7);
181
182 LoadIC_PushArgs(masm);
183
184 // Perform tail call to the entry.
185 __ TailCallRuntime(Runtime::kKeyedLoadIC_Miss);
186 }
187
GenerateRuntimeGetProperty(MacroAssembler * masm)188 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
189 // The return address is in lr.
190
191 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
192
193 // Do tail-call to runtime routine.
194 __ TailCallRuntime(Runtime::kKeyedGetProperty);
195 }
196
StoreIC_PushArgs(MacroAssembler * masm)197 static void StoreIC_PushArgs(MacroAssembler* masm) {
198 __ Push(StoreWithVectorDescriptor::ValueRegister(),
199 StoreWithVectorDescriptor::SlotRegister(),
200 StoreWithVectorDescriptor::VectorRegister(),
201 StoreWithVectorDescriptor::ReceiverRegister(),
202 StoreWithVectorDescriptor::NameRegister());
203 }
204
GenerateMiss(MacroAssembler * masm)205 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
206 StoreIC_PushArgs(masm);
207
208 __ TailCallRuntime(Runtime::kKeyedStoreIC_Miss);
209 }
210
GenerateSlow(MacroAssembler * masm)211 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
212 StoreIC_PushArgs(masm);
213
214 // The slow case calls into the runtime to complete the store without causing
215 // an IC miss that would otherwise cause a transition to the generic stub.
216 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
217 }
218
KeyedStoreGenerateMegamorphicHelper(MacroAssembler * masm,Label * fast_object,Label * fast_double,Label * slow,KeyedStoreCheckMap check_map,KeyedStoreIncrementLength increment_length,Register value,Register key,Register receiver,Register receiver_map,Register elements_map,Register elements)219 static void KeyedStoreGenerateMegamorphicHelper(
220 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
221 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
222 Register value, Register key, Register receiver, Register receiver_map,
223 Register elements_map, Register elements) {
224 Label transition_smi_elements;
225 Label finish_object_store, non_double_value, transition_double_elements;
226 Label fast_double_without_map_check;
227
228 // Fast case: Do the store, could be either Object or double.
229 __ bind(fast_object);
230 Register scratch = r6;
231 Register address = r7;
232 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
233 scratch, address));
234
235 if (check_map == kCheckMap) {
236 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
237 __ CmpP(elements_map,
238 Operand(masm->isolate()->factory()->fixed_array_map()));
239 __ bne(fast_double);
240 }
241
242 // HOLECHECK: guards "A[i] = V"
243 // We have to go to the runtime if the current value is the hole because
244 // there may be a callback on the element
245 Label holecheck_passed1;
246 // @TODO(joransiu) : Fold AddP into memref of LoadP
247 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
248 __ SmiToPtrArrayOffset(scratch, key);
249 __ LoadP(scratch, MemOperand(address, scratch));
250 __ CmpP(scratch, Operand(masm->isolate()->factory()->the_hole_value()));
251 __ bne(&holecheck_passed1, Label::kNear);
252 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
253
254 __ bind(&holecheck_passed1);
255
256 // Smi stores don't require further checks.
257 Label non_smi_value;
258 __ JumpIfNotSmi(value, &non_smi_value);
259
260 if (increment_length == kIncrementLength) {
261 // Add 1 to receiver->length.
262 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
263 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
264 }
265 // It's irrelevant whether array is smi-only or not when writing a smi.
266 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
267 __ SmiToPtrArrayOffset(scratch, key);
268 __ StoreP(value, MemOperand(address, scratch));
269 __ Ret();
270
271 __ bind(&non_smi_value);
272 // Escape to elements kind transition case.
273 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
274
275 // Fast elements array, store the value to the elements backing store.
276 __ bind(&finish_object_store);
277 if (increment_length == kIncrementLength) {
278 // Add 1 to receiver->length.
279 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
280 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
281 }
282 __ AddP(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
283 __ SmiToPtrArrayOffset(scratch, key);
284 __ StoreP(value, MemOperand(address, scratch));
285 __ la(address, MemOperand(address, scratch));
286 // Update write barrier for the elements array address.
287 __ LoadRR(scratch, value); // Preserve the value which is returned.
288 __ RecordWrite(elements, address, scratch, kLRHasNotBeenSaved,
289 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
290 __ Ret();
291
292 __ bind(fast_double);
293 if (check_map == kCheckMap) {
294 // Check for fast double array case. If this fails, call through to the
295 // runtime.
296 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
297 __ bne(slow);
298 }
299
300 // HOLECHECK: guards "A[i] double hole?"
301 // We have to see if the double version of the hole is present. If so
302 // go to the runtime.
303 // @TODO(joransiu) : Fold AddP Operand into LoadlW
304 __ AddP(address, elements,
305 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
306 kHeapObjectTag)));
307 __ SmiToDoubleArrayOffset(scratch, key);
308 __ LoadlW(scratch, MemOperand(address, scratch));
309 __ CmpP(scratch, Operand(kHoleNanUpper32));
310 __ bne(&fast_double_without_map_check, Label::kNear);
311 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
312
313 __ bind(&fast_double_without_map_check);
314 __ StoreNumberToDoubleElements(value, key, elements, scratch, d0,
315 &transition_double_elements);
316 if (increment_length == kIncrementLength) {
317 // Add 1 to receiver->length.
318 __ AddSmiLiteral(scratch, key, Smi::FromInt(1), r0);
319 __ StoreP(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
320 }
321 __ Ret();
322
323 __ bind(&transition_smi_elements);
324 // Transition the array appropriately depending on the value type.
325 __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
326 __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
327 __ bne(&non_double_value);
328
329 // Value is a double. Transition FAST_SMI_ELEMENTS ->
330 // FAST_DOUBLE_ELEMENTS and complete the store.
331 __ LoadTransitionedArrayMapConditional(
332 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
333 AllocationSiteMode mode =
334 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
335 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
336 receiver_map, mode, slow);
337 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
338 __ b(&fast_double_without_map_check);
339
340 __ bind(&non_double_value);
341 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
342 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
343 receiver_map, scratch, slow);
344 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
345 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
346 masm, receiver, key, value, receiver_map, mode, slow);
347 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
348 __ b(&finish_object_store);
349
350 __ bind(&transition_double_elements);
351 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
352 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
353 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
354 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
355 receiver_map, scratch, slow);
356 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
357 ElementsTransitionGenerator::GenerateDoubleToObject(
358 masm, receiver, key, value, receiver_map, mode, slow);
359 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
360 __ b(&finish_object_store);
361 }
362
GenerateMegamorphic(MacroAssembler * masm,LanguageMode language_mode)363 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
364 LanguageMode language_mode) {
365 // ---------- S t a t e --------------
366 // -- r2 : value
367 // -- r3 : key
368 // -- r4 : receiver
369 // -- lr : return address
370 // -----------------------------------
371 Label slow, fast_object, fast_object_grow;
372 Label fast_double, fast_double_grow;
373 Label array, extra, check_if_double_array, maybe_name_key, miss;
374
375 // Register usage.
376 Register value = StoreDescriptor::ValueRegister();
377 Register key = StoreDescriptor::NameRegister();
378 Register receiver = StoreDescriptor::ReceiverRegister();
379 DCHECK(receiver.is(r3));
380 DCHECK(key.is(r4));
381 DCHECK(value.is(r2));
382 Register receiver_map = r5;
383 Register elements_map = r8;
384 Register elements = r9; // Elements array of the receiver.
385 // r6 and r7 are used as general scratch registers.
386
387 // Check that the key is a smi.
388 __ JumpIfNotSmi(key, &maybe_name_key);
389 // Check that the object isn't a smi.
390 __ JumpIfSmi(receiver, &slow);
391 // Get the map of the object.
392 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
393 // Check that the receiver does not require access checks.
394 // The generic stub does not perform map checks.
395 __ LoadlB(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
396 __ AndP(r0, ip, Operand(1 << Map::kIsAccessCheckNeeded));
397 __ bne(&slow, Label::kNear);
398 // Check if the object is a JS array or not.
399 __ LoadlB(r6, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
400 __ CmpP(r6, Operand(JS_ARRAY_TYPE));
401 __ beq(&array);
402 // Check that the object is some kind of JSObject.
403 __ CmpP(r6, Operand(FIRST_JS_OBJECT_TYPE));
404 __ blt(&slow, Label::kNear);
405
406 // Object case: Check key against length in the elements array.
407 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
408 // Check array bounds. Both the key and the length of FixedArray are smis.
409 __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
410 __ blt(&fast_object);
411
412 // Slow case, handle jump to runtime.
413 __ bind(&slow);
414 // Entry registers are intact.
415 // r2: value.
416 // r3: key.
417 // r4: receiver.
418 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
419 // Never returns to here.
420
421 __ bind(&maybe_name_key);
422 __ LoadP(r6, FieldMemOperand(key, HeapObject::kMapOffset));
423 __ LoadlB(r6, FieldMemOperand(r6, Map::kInstanceTypeOffset));
424 __ JumpIfNotUniqueNameInstanceType(r6, &slow);
425
426 // The handlers in the stub cache expect a vector and slot. Since we won't
427 // change the IC from any downstream misses, a dummy vector can be used.
428 Register vector = StoreWithVectorDescriptor::VectorRegister();
429 Register slot = StoreWithVectorDescriptor::SlotRegister();
430 DCHECK(!AreAliased(vector, slot, r7, r8, r9, ip));
431 Handle<TypeFeedbackVector> dummy_vector =
432 TypeFeedbackVector::DummyVector(masm->isolate());
433 int slot_index = dummy_vector->GetIndex(
434 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
435 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
436 __ LoadSmiLiteral(slot, Smi::FromInt(slot_index));
437
438 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, r7,
439 r8, r9, ip);
440 // Cache miss.
441 __ b(&miss);
442
443 // Extra capacity case: Check if there is extra capacity to
444 // perform the store and update the length. Used for adding one
445 // element to the array by writing to array[array.length].
446 __ bind(&extra);
447 // Condition code from comparing key and array length is still available.
448 __ bne(&slow); // Only support writing to writing to array[array.length].
449 // Check for room in the elements backing store.
450 // Both the key and the length of FixedArray are smis.
451 __ CmpLogicalP(key, FieldMemOperand(elements, FixedArray::kLengthOffset));
452 __ bge(&slow);
453 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
454 __ CmpP(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
455 __ bne(&check_if_double_array, Label::kNear);
456 __ b(&fast_object_grow);
457
458 __ bind(&check_if_double_array);
459 __ CmpP(elements_map,
460 Operand(masm->isolate()->factory()->fixed_double_array_map()));
461 __ bne(&slow);
462 __ b(&fast_double_grow);
463
464 // Array case: Get the length and the elements array from the JS
465 // array. Check that the array is in fast mode (and writable); if it
466 // is the length is always a smi.
467 __ bind(&array);
468 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
469
470 // Check the key against the length in the array.
471 __ CmpLogicalP(key, FieldMemOperand(receiver, JSArray::kLengthOffset));
472 __ bge(&extra);
473
474 KeyedStoreGenerateMegamorphicHelper(
475 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
476 value, key, receiver, receiver_map, elements_map, elements);
477 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
478 &fast_double_grow, &slow, kDontCheckMap,
479 kIncrementLength, value, key, receiver,
480 receiver_map, elements_map, elements);
481 __ bind(&miss);
482 GenerateMiss(masm);
483 }
484
GenerateMiss(MacroAssembler * masm)485 void StoreIC::GenerateMiss(MacroAssembler* masm) {
486 StoreIC_PushArgs(masm);
487
488 // Perform tail call to the entry.
489 __ TailCallRuntime(Runtime::kStoreIC_Miss);
490 }
491
GenerateNormal(MacroAssembler * masm)492 void StoreIC::GenerateNormal(MacroAssembler* masm) {
493 Label miss;
494 Register receiver = StoreDescriptor::ReceiverRegister();
495 Register name = StoreDescriptor::NameRegister();
496 Register value = StoreDescriptor::ValueRegister();
497 Register dictionary = r7;
498 DCHECK(receiver.is(r3));
499 DCHECK(name.is(r4));
500 DCHECK(value.is(r2));
501 DCHECK(StoreWithVectorDescriptor::VectorRegister().is(r5));
502 DCHECK(StoreWithVectorDescriptor::SlotRegister().is(r6));
503
504 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
505
506 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
507 Counters* counters = masm->isolate()->counters();
508 __ IncrementCounter(counters->ic_store_normal_hit(), 1, r8, r9);
509 __ Ret();
510
511 __ bind(&miss);
512 __ IncrementCounter(counters->ic_store_normal_miss(), 1, r8, r9);
513 GenerateMiss(masm);
514 }
515
516 #undef __
517
ComputeCondition(Token::Value op)518 Condition CompareIC::ComputeCondition(Token::Value op) {
519 switch (op) {
520 case Token::EQ_STRICT:
521 case Token::EQ:
522 return eq;
523 case Token::LT:
524 return lt;
525 case Token::GT:
526 return gt;
527 case Token::LTE:
528 return le;
529 case Token::GTE:
530 return ge;
531 default:
532 UNREACHABLE();
533 return kNoCondition;
534 }
535 }
536
HasInlinedSmiCode(Address address)537 bool CompareIC::HasInlinedSmiCode(Address address) {
538 // The address of the instruction following the call.
539 Address cmp_instruction_address =
540 Assembler::return_address_from_call_start(address);
541
542 // If the instruction following the call is not a CHI, nothing
543 // was inlined.
544 return (Instruction::S390OpcodeValue(cmp_instruction_address) == CHI);
545 }
546
547 //
548 // This code is paired with the JumpPatchSite class in full-codegen-s390.cc
549 //
PatchInlinedSmiCode(Isolate * isolate,Address address,InlinedSmiCheck check)550 void PatchInlinedSmiCode(Isolate* isolate, Address address,
551 InlinedSmiCheck check) {
552 Address cmp_instruction_address =
553 Assembler::return_address_from_call_start(address);
554
555 // If the instruction following the call is not a cmp rx, #yyy, nothing
556 // was inlined.
557 Instr instr = Assembler::instr_at(cmp_instruction_address);
558 if (Instruction::S390OpcodeValue(cmp_instruction_address) != CHI) {
559 return;
560 }
561
562 if (Instruction::S390OpcodeValue(address) != BRASL) {
563 return;
564 }
565 // The delta to the start of the map check instruction and the
566 // condition code uses at the patched jump.
567 int delta = instr & 0x0000ffff;
568
569 // If the delta is 0 the instruction is cmp r0, #0 which also signals that
570 // nothing was inlined.
571 if (delta == 0) {
572 return;
573 }
574
575 if (FLAG_trace_ic) {
576 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n",
577 static_cast<void*>(address),
578 static_cast<void*>(cmp_instruction_address), delta);
579 }
580
581 // Expected sequence to enable by changing the following
582 // CR/CGR Rx, Rx // 2 / 4 bytes
583 // LR R0, R0 // 2 bytes // 31-bit only!
584 // BRC/BRCL // 4 / 6 bytes
585 // into
586 // TMLL Rx, XXX // 4 bytes
587 // BRC/BRCL // 4 / 6 bytes
588 // And vice versa to disable.
589
590 // The following constant is the size of the CR/CGR + LR + LR
591 const int kPatchAreaSizeNoBranch = 4;
592 Address patch_address = cmp_instruction_address - delta;
593 Address branch_address = patch_address + kPatchAreaSizeNoBranch;
594
595 Instr instr_at_patch = Assembler::instr_at(patch_address);
596 SixByteInstr branch_instr = Assembler::instr_at(branch_address);
597
598 // This is patching a conditional "jump if not smi/jump if smi" site.
599 size_t patch_size = 0;
600 if (Instruction::S390OpcodeValue(branch_address) == BRC) {
601 patch_size = kPatchAreaSizeNoBranch + 4;
602 } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
603 patch_size = kPatchAreaSizeNoBranch + 6;
604 } else {
605 DCHECK(false);
606 }
607 CodePatcher patcher(isolate, patch_address, patch_size);
608 Register reg;
609 reg.reg_code = instr_at_patch & 0xf;
610 if (check == ENABLE_INLINED_SMI_CHECK) {
611 patcher.masm()->TestIfSmi(reg);
612 } else {
613 // Emit the NOP to ensure sufficient place for patching
614 // (replaced by LR + NILL)
615 DCHECK(check == DISABLE_INLINED_SMI_CHECK);
616 patcher.masm()->CmpP(reg, reg);
617 #ifndef V8_TARGET_ARCH_S390X
618 patcher.masm()->nop();
619 #endif
620 }
621
622 Condition cc = al;
623 if (Instruction::S390OpcodeValue(branch_address) == BRC) {
624 cc = static_cast<Condition>((branch_instr & 0x00f00000) >> 20);
625 DCHECK((cc == ne) || (cc == eq));
626 cc = (cc == ne) ? eq : ne;
627 patcher.masm()->brc(cc, Operand((branch_instr & 0xffff) << 1));
628 } else if (Instruction::S390OpcodeValue(branch_address) == BRCL) {
629 cc = static_cast<Condition>(
630 (branch_instr & (static_cast<uint64_t>(0x00f0) << 32)) >> 36);
631 DCHECK((cc == ne) || (cc == eq));
632 cc = (cc == ne) ? eq : ne;
633 patcher.masm()->brcl(cc, Operand((branch_instr & 0xffffffff) << 1));
634 } else {
635 DCHECK(false);
636 }
637 }
638
639 } // namespace internal
640 } // namespace v8
641
642 #endif // V8_TARGET_ARCH_S390
643