1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/snapshot/default-deserializer-allocator.h"
6
7 #include "src/heap/heap-inl.h"
8 #include "src/snapshot/builtin-deserializer.h"
9 #include "src/snapshot/deserializer.h"
10 #include "src/snapshot/startup-deserializer.h"
11
12 namespace v8 {
13 namespace internal {
14
DefaultDeserializerAllocator(Deserializer<DefaultDeserializerAllocator> * deserializer)15 DefaultDeserializerAllocator::DefaultDeserializerAllocator(
16 Deserializer<DefaultDeserializerAllocator>* deserializer)
17 : deserializer_(deserializer) {}
18
19 // We know the space requirements before deserialization and can
20 // pre-allocate that reserved space. During deserialization, all we need
21 // to do is to bump up the pointer for each space in the reserved
22 // space. This is also used for fixing back references.
23 // We may have to split up the pre-allocation into several chunks
24 // because it would not fit onto a single page. We do not have to keep
25 // track of when to move to the next chunk. An opcode will signal this.
26 // Since multiple large objects cannot be folded into one large object
27 // space allocation, we have to do an actual allocation when deserializing
28 // each large object. Instead of tracking offset for back references, we
29 // reference large objects by index.
AllocateRaw(AllocationSpace space,int size)30 Address DefaultDeserializerAllocator::AllocateRaw(AllocationSpace space,
31 int size) {
32 if (space == LO_SPACE) {
33 AlwaysAllocateScope scope(isolate());
34 LargeObjectSpace* lo_space = isolate()->heap()->lo_space();
35 // TODO(jgruber): May be cleaner to pass in executability as an argument.
36 Executability exec =
37 static_cast<Executability>(deserializer_->source()->Get());
38 AllocationResult result = lo_space->AllocateRaw(size, exec);
39 HeapObject* obj = result.ToObjectChecked();
40 deserialized_large_objects_.push_back(obj);
41 return obj->address();
42 } else if (space == MAP_SPACE) {
43 DCHECK_EQ(Map::kSize, size);
44 return allocated_maps_[next_map_index_++];
45 } else {
46 DCHECK_LT(space, kNumberOfPreallocatedSpaces);
47 Address address = high_water_[space];
48 DCHECK_NE(address, kNullAddress);
49 high_water_[space] += size;
50 #ifdef DEBUG
51 // Assert that the current reserved chunk is still big enough.
52 const Heap::Reservation& reservation = reservations_[space];
53 int chunk_index = current_chunk_[space];
54 DCHECK_LE(high_water_[space], reservation[chunk_index].end);
55 #endif
56 if (space == CODE_SPACE) SkipList::Update(address, size);
57 return address;
58 }
59 }
60
Allocate(AllocationSpace space,int size)61 Address DefaultDeserializerAllocator::Allocate(AllocationSpace space,
62 int size) {
63 Address address;
64 HeapObject* obj;
65
66 if (next_alignment_ != kWordAligned) {
67 const int reserved = size + Heap::GetMaximumFillToAlign(next_alignment_);
68 address = AllocateRaw(space, reserved);
69 obj = HeapObject::FromAddress(address);
70 // If one of the following assertions fails, then we are deserializing an
71 // aligned object when the filler maps have not been deserialized yet.
72 // We require filler maps as padding to align the object.
73 Heap* heap = isolate()->heap();
74 DCHECK(ReadOnlyRoots(heap).free_space_map()->IsMap());
75 DCHECK(ReadOnlyRoots(heap).one_pointer_filler_map()->IsMap());
76 DCHECK(ReadOnlyRoots(heap).two_pointer_filler_map()->IsMap());
77 obj = heap->AlignWithFiller(obj, size, reserved, next_alignment_);
78 address = obj->address();
79 next_alignment_ = kWordAligned;
80 return address;
81 } else {
82 return AllocateRaw(space, size);
83 }
84 }
85
MoveToNextChunk(AllocationSpace space)86 void DefaultDeserializerAllocator::MoveToNextChunk(AllocationSpace space) {
87 DCHECK_LT(space, kNumberOfPreallocatedSpaces);
88 uint32_t chunk_index = current_chunk_[space];
89 const Heap::Reservation& reservation = reservations_[space];
90 // Make sure the current chunk is indeed exhausted.
91 CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
92 // Move to next reserved chunk.
93 chunk_index = ++current_chunk_[space];
94 CHECK_LT(chunk_index, reservation.size());
95 high_water_[space] = reservation[chunk_index].start;
96 }
97
GetMap(uint32_t index)98 HeapObject* DefaultDeserializerAllocator::GetMap(uint32_t index) {
99 DCHECK_LT(index, next_map_index_);
100 return HeapObject::FromAddress(allocated_maps_[index]);
101 }
102
GetLargeObject(uint32_t index)103 HeapObject* DefaultDeserializerAllocator::GetLargeObject(uint32_t index) {
104 DCHECK_LT(index, deserialized_large_objects_.size());
105 return deserialized_large_objects_[index];
106 }
107
GetObject(AllocationSpace space,uint32_t chunk_index,uint32_t chunk_offset)108 HeapObject* DefaultDeserializerAllocator::GetObject(AllocationSpace space,
109 uint32_t chunk_index,
110 uint32_t chunk_offset) {
111 DCHECK_LT(space, kNumberOfPreallocatedSpaces);
112 DCHECK_LE(chunk_index, current_chunk_[space]);
113 Address address = reservations_[space][chunk_index].start + chunk_offset;
114 if (next_alignment_ != kWordAligned) {
115 int padding = Heap::GetFillToAlign(address, next_alignment_);
116 next_alignment_ = kWordAligned;
117 DCHECK(padding == 0 || HeapObject::FromAddress(address)->IsFiller());
118 address += padding;
119 }
120 return HeapObject::FromAddress(address);
121 }
122
DecodeReservation(std::vector<SerializedData::Reservation> res)123 void DefaultDeserializerAllocator::DecodeReservation(
124 std::vector<SerializedData::Reservation> res) {
125 DCHECK_EQ(0, reservations_[FIRST_SPACE].size());
126 int current_space = FIRST_SPACE;
127 for (auto& r : res) {
128 reservations_[current_space].push_back(
129 {r.chunk_size(), kNullAddress, kNullAddress});
130 if (r.is_last()) current_space++;
131 }
132 DCHECK_EQ(kNumberOfSpaces, current_space);
133 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
134 }
135
ReserveSpace()136 bool DefaultDeserializerAllocator::ReserveSpace() {
137 #ifdef DEBUG
138 for (int i = FIRST_SPACE; i < kNumberOfSpaces; ++i) {
139 DCHECK_GT(reservations_[i].size(), 0);
140 }
141 #endif // DEBUG
142 DCHECK(allocated_maps_.empty());
143 if (!isolate()->heap()->ReserveSpace(reservations_, &allocated_maps_)) {
144 return false;
145 }
146 for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
147 high_water_[i] = reservations_[i][0].start;
148 }
149 return true;
150 }
151
152 // static
ReserveSpace(StartupDeserializer * startup_deserializer,BuiltinDeserializer * builtin_deserializer)153 bool DefaultDeserializerAllocator::ReserveSpace(
154 StartupDeserializer* startup_deserializer,
155 BuiltinDeserializer* builtin_deserializer) {
156 Isolate* isolate = startup_deserializer->isolate();
157
158 // Create a set of merged reservations to reserve space in one go.
159 // The BuiltinDeserializer's reservations are ignored, since our actual
160 // requirements vary based on whether lazy deserialization is enabled.
161 // Instead, we manually determine the required code-space.
162
163 Heap::Reservation merged_reservations[kNumberOfSpaces];
164 for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
165 merged_reservations[i] =
166 startup_deserializer->allocator()->reservations_[i];
167 }
168
169 Heap::Reservation builtin_reservations =
170 builtin_deserializer->allocator()
171 ->CreateReservationsForEagerBuiltinsAndHandlers();
172 DCHECK(!builtin_reservations.empty());
173
174 for (const auto& c : builtin_reservations) {
175 merged_reservations[CODE_SPACE].push_back(c);
176 }
177
178 if (!isolate->heap()->ReserveSpace(
179 merged_reservations,
180 &startup_deserializer->allocator()->allocated_maps_)) {
181 return false;
182 }
183
184 DisallowHeapAllocation no_allocation;
185
186 // Distribute the successful allocations between both deserializers.
187 // There's nothing to be done here except for code space.
188
189 {
190 const int num_builtin_reservations =
191 static_cast<int>(builtin_reservations.size());
192 for (int i = num_builtin_reservations - 1; i >= 0; i--) {
193 const auto& c = merged_reservations[CODE_SPACE].back();
194 DCHECK_EQ(c.size, builtin_reservations[i].size);
195 DCHECK_EQ(c.size, c.end - c.start);
196 builtin_reservations[i].start = c.start;
197 builtin_reservations[i].end = c.end;
198 merged_reservations[CODE_SPACE].pop_back();
199 }
200
201 builtin_deserializer->allocator()->InitializeFromReservations(
202 builtin_reservations);
203 }
204
205 // Write back startup reservations.
206
207 for (int i = FIRST_SPACE; i < kNumberOfSpaces; i++) {
208 startup_deserializer->allocator()->reservations_[i].swap(
209 merged_reservations[i]);
210 }
211
212 for (int i = FIRST_SPACE; i < kNumberOfPreallocatedSpaces; i++) {
213 startup_deserializer->allocator()->high_water_[i] =
214 startup_deserializer->allocator()->reservations_[i][0].start;
215 }
216
217 return true;
218 }
219
ReservationsAreFullyUsed() const220 bool DefaultDeserializerAllocator::ReservationsAreFullyUsed() const {
221 for (int space = 0; space < kNumberOfPreallocatedSpaces; space++) {
222 const uint32_t chunk_index = current_chunk_[space];
223 if (reservations_[space].size() != chunk_index + 1) {
224 return false;
225 }
226 if (reservations_[space][chunk_index].end != high_water_[space]) {
227 return false;
228 }
229 }
230 return (allocated_maps_.size() == next_map_index_);
231 }
232
233 void DefaultDeserializerAllocator::
RegisterDeserializedObjectsForBlackAllocation()234 RegisterDeserializedObjectsForBlackAllocation() {
235 isolate()->heap()->RegisterDeserializedObjectsForBlackAllocation(
236 reservations_, deserialized_large_objects_, allocated_maps_);
237 }
238
isolate() const239 Isolate* DefaultDeserializerAllocator::isolate() const {
240 return deserializer_->isolate();
241 }
242
243 } // namespace internal
244 } // namespace v8
245