1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/layout-descriptor.h"
6
7 #include <sstream>
8
9 #include "src/base/bits.h"
10 #include "src/handles-inl.h"
11 #include "src/objects-inl.h"
12
13 namespace v8 {
14 namespace internal {
15
New(Isolate * isolate,Handle<Map> map,Handle<DescriptorArray> descriptors,int num_descriptors)16 Handle<LayoutDescriptor> LayoutDescriptor::New(
17 Isolate* isolate, Handle<Map> map, Handle<DescriptorArray> descriptors,
18 int num_descriptors) {
19 if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
20
21 int layout_descriptor_length =
22 CalculateCapacity(*map, *descriptors, num_descriptors);
23
24 if (layout_descriptor_length == 0) {
25 // No double fields were found, use fast pointer layout.
26 return handle(FastPointerLayout(), isolate);
27 }
28
29 // Initially, layout descriptor corresponds to an object with all fields
30 // tagged.
31 Handle<LayoutDescriptor> layout_descriptor_handle =
32 LayoutDescriptor::New(isolate, layout_descriptor_length);
33
34 LayoutDescriptor* layout_descriptor = Initialize(
35 *layout_descriptor_handle, *map, *descriptors, num_descriptors);
36
37 return handle(layout_descriptor, isolate);
38 }
39
ShareAppend(Isolate * isolate,Handle<Map> map,PropertyDetails details)40 Handle<LayoutDescriptor> LayoutDescriptor::ShareAppend(
41 Isolate* isolate, Handle<Map> map, PropertyDetails details) {
42 DCHECK(map->owns_descriptors());
43 Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
44 isolate);
45
46 if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
47 DCHECK(details.location() != kField ||
48 layout_descriptor->IsTagged(details.field_index()));
49 return layout_descriptor;
50 }
51 int field_index = details.field_index();
52 layout_descriptor = LayoutDescriptor::EnsureCapacity(
53 isolate, layout_descriptor, field_index + details.field_width_in_words());
54
55 DisallowHeapAllocation no_allocation;
56 LayoutDescriptor* layout_desc = *layout_descriptor;
57 layout_desc = layout_desc->SetRawData(field_index);
58 if (details.field_width_in_words() > 1) {
59 layout_desc = layout_desc->SetRawData(field_index + 1);
60 }
61 return handle(layout_desc, isolate);
62 }
63
AppendIfFastOrUseFull(Isolate * isolate,Handle<Map> map,PropertyDetails details,Handle<LayoutDescriptor> full_layout_descriptor)64 Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
65 Isolate* isolate, Handle<Map> map, PropertyDetails details,
66 Handle<LayoutDescriptor> full_layout_descriptor) {
67 DisallowHeapAllocation no_allocation;
68 LayoutDescriptor* layout_descriptor = map->layout_descriptor();
69 if (layout_descriptor->IsSlowLayout()) {
70 return full_layout_descriptor;
71 }
72 if (!InobjectUnboxedField(map->GetInObjectProperties(), details)) {
73 DCHECK(details.location() != kField ||
74 layout_descriptor->IsTagged(details.field_index()));
75 return handle(layout_descriptor, isolate);
76 }
77 int field_index = details.field_index();
78 int new_capacity = field_index + details.field_width_in_words();
79 if (new_capacity > layout_descriptor->capacity()) {
80 // Current map's layout descriptor runs out of space, so use the full
81 // layout descriptor.
82 return full_layout_descriptor;
83 }
84
85 layout_descriptor = layout_descriptor->SetRawData(field_index);
86 if (details.field_width_in_words() > 1) {
87 layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
88 }
89 return handle(layout_descriptor, isolate);
90 }
91
92
EnsureCapacity(Isolate * isolate,Handle<LayoutDescriptor> layout_descriptor,int new_capacity)93 Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
94 Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
95 int new_capacity) {
96 int old_capacity = layout_descriptor->capacity();
97 if (new_capacity <= old_capacity) {
98 return layout_descriptor;
99 }
100 Handle<LayoutDescriptor> new_layout_descriptor =
101 LayoutDescriptor::New(isolate, new_capacity);
102 DCHECK(new_layout_descriptor->IsSlowLayout());
103
104 if (layout_descriptor->IsSlowLayout()) {
105 memcpy(new_layout_descriptor->GetDataStartAddress(),
106 layout_descriptor->GetDataStartAddress(),
107 layout_descriptor->DataSize());
108 return new_layout_descriptor;
109 } else {
110 // Fast layout.
111 uint32_t value = static_cast<uint32_t>(Smi::ToInt(*layout_descriptor));
112 new_layout_descriptor->set_layout_word(0, value);
113 return new_layout_descriptor;
114 }
115 }
116
117
IsTagged(int field_index,int max_sequence_length,int * out_sequence_length)118 bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
119 int* out_sequence_length) {
120 DCHECK_GT(max_sequence_length, 0);
121 if (IsFastPointerLayout()) {
122 *out_sequence_length = max_sequence_length;
123 return true;
124 }
125
126 int layout_word_index;
127 int layout_bit_index;
128
129 if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
130 // Out of bounds queries are considered tagged.
131 *out_sequence_length = max_sequence_length;
132 return true;
133 }
134 uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
135
136 uint32_t value = IsSlowLayout() ? get_layout_word(layout_word_index)
137 : static_cast<uint32_t>(Smi::ToInt(this));
138
139 bool is_tagged = (value & layout_mask) == 0;
140 if (!is_tagged) value = ~value; // Count set bits instead of cleared bits.
141 value = value & ~(layout_mask - 1); // Clear bits we are not interested in.
142 int sequence_length;
143 if (IsSlowLayout()) {
144 sequence_length = base::bits::CountTrailingZeros(value) - layout_bit_index;
145
146 if (layout_bit_index + sequence_length == kBitsPerLayoutWord) {
147 // This is a contiguous sequence till the end of current word, proceed
148 // counting in the subsequent words.
149 ++layout_word_index;
150 int num_words = number_of_layout_words();
151 for (; layout_word_index < num_words; layout_word_index++) {
152 value = get_layout_word(layout_word_index);
153 bool cur_is_tagged = (value & 1) == 0;
154 if (cur_is_tagged != is_tagged) break;
155 if (!is_tagged) value = ~value; // Count set bits instead.
156 int cur_sequence_length = base::bits::CountTrailingZeros(value);
157 sequence_length += cur_sequence_length;
158 if (sequence_length >= max_sequence_length) break;
159 if (cur_sequence_length != kBitsPerLayoutWord) break;
160 }
161 if (is_tagged && (field_index + sequence_length == capacity())) {
162 // The contiguous sequence of tagged fields lasts till the end of the
163 // layout descriptor which means that all the fields starting from
164 // field_index are tagged.
165 sequence_length = std::numeric_limits<int>::max();
166 }
167 }
168 } else { // Fast layout.
169 sequence_length = Min(base::bits::CountTrailingZeros(value),
170 static_cast<unsigned>(kBitsInSmiLayout)) -
171 layout_bit_index;
172 if (is_tagged && (field_index + sequence_length == capacity())) {
173 // The contiguous sequence of tagged fields lasts till the end of the
174 // layout descriptor which means that all the fields starting from
175 // field_index are tagged.
176 sequence_length = std::numeric_limits<int>::max();
177 }
178 }
179 *out_sequence_length = Min(sequence_length, max_sequence_length);
180 return is_tagged;
181 }
182
183
NewForTesting(Isolate * isolate,int length)184 Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
185 int length) {
186 return New(isolate, length);
187 }
188
189
SetTaggedForTesting(int field_index,bool tagged)190 LayoutDescriptor* LayoutDescriptor::SetTaggedForTesting(int field_index,
191 bool tagged) {
192 return SetTagged(field_index, tagged);
193 }
194
195
IsTagged(int offset_in_bytes,int end_offset,int * out_end_of_contiguous_region_offset)196 bool LayoutDescriptorHelper::IsTagged(
197 int offset_in_bytes, int end_offset,
198 int* out_end_of_contiguous_region_offset) {
199 DCHECK(IsAligned(offset_in_bytes, kPointerSize));
200 DCHECK(IsAligned(end_offset, kPointerSize));
201 DCHECK(offset_in_bytes < end_offset);
202 if (all_fields_tagged_) {
203 *out_end_of_contiguous_region_offset = end_offset;
204 DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
205 return true;
206 }
207 int max_sequence_length = (end_offset - offset_in_bytes) / kPointerSize;
208 int field_index = Max(0, (offset_in_bytes - header_size_) / kPointerSize);
209 int sequence_length;
210 bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
211 &sequence_length);
212 DCHECK_GT(sequence_length, 0);
213 if (offset_in_bytes < header_size_) {
214 // Object headers do not contain non-tagged fields. Check if the contiguous
215 // region continues after the header.
216 if (tagged) {
217 // First field is tagged, calculate end offset from there.
218 *out_end_of_contiguous_region_offset =
219 header_size_ + sequence_length * kPointerSize;
220
221 } else {
222 *out_end_of_contiguous_region_offset = header_size_;
223 }
224 DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
225 return true;
226 }
227 *out_end_of_contiguous_region_offset =
228 offset_in_bytes + sequence_length * kPointerSize;
229 DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
230 return tagged;
231 }
232
233
Trim(Heap * heap,Map * map,DescriptorArray * descriptors,int num_descriptors)234 LayoutDescriptor* LayoutDescriptor::Trim(Heap* heap, Map* map,
235 DescriptorArray* descriptors,
236 int num_descriptors) {
237 DisallowHeapAllocation no_allocation;
238 // Fast mode descriptors are never shared and therefore always fully
239 // correspond to their map.
240 if (!IsSlowLayout()) return this;
241
242 int layout_descriptor_length =
243 CalculateCapacity(map, descriptors, num_descriptors);
244 // It must not become fast-mode descriptor here, because otherwise it has to
245 // be fast pointer layout descriptor already but it's is slow mode now.
246 DCHECK_LT(kBitsInSmiLayout, layout_descriptor_length);
247
248 // Trim, clean and reinitialize this slow-mode layout descriptor.
249 int new_backing_store_length =
250 GetSlowModeBackingStoreLength(layout_descriptor_length);
251 int backing_store_length = length();
252 if (new_backing_store_length != backing_store_length) {
253 DCHECK_LT(new_backing_store_length, backing_store_length);
254 int delta = backing_store_length - new_backing_store_length;
255 heap->RightTrimFixedArray(this, delta);
256 }
257 memset(GetDataStartAddress(), 0, DataSize());
258 LayoutDescriptor* layout_descriptor =
259 Initialize(this, map, descriptors, num_descriptors);
260 DCHECK_EQ(this, layout_descriptor);
261 return layout_descriptor;
262 }
263
264
IsConsistentWithMap(Map * map,bool check_tail)265 bool LayoutDescriptor::IsConsistentWithMap(Map* map, bool check_tail) {
266 if (FLAG_unbox_double_fields) {
267 DescriptorArray* descriptors = map->instance_descriptors();
268 int nof_descriptors = map->NumberOfOwnDescriptors();
269 int last_field_index = 0;
270 for (int i = 0; i < nof_descriptors; i++) {
271 PropertyDetails details = descriptors->GetDetails(i);
272 if (details.location() != kField) continue;
273 FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
274 bool tagged_expected =
275 !field_index.is_inobject() || !details.representation().IsDouble();
276 for (int bit = 0; bit < details.field_width_in_words(); bit++) {
277 bool tagged_actual = IsTagged(details.field_index() + bit);
278 DCHECK_EQ(tagged_expected, tagged_actual);
279 if (tagged_actual != tagged_expected) return false;
280 }
281 last_field_index =
282 Max(last_field_index,
283 details.field_index() + details.field_width_in_words());
284 }
285 if (check_tail) {
286 int n = capacity();
287 for (int i = last_field_index; i < n; i++) {
288 DCHECK(IsTagged(i));
289 }
290 }
291 }
292 return true;
293 }
294 } // namespace internal
295 } // namespace v8
296