1 /*
2 * Copyright (C) 2014 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "transaction.h"
18
19 #include <android-base/logging.h>
20
21 #include "base/mutex-inl.h"
22 #include "base/stl_util.h"
23 #include "common_throws.h"
24 #include "dex/descriptors_names.h"
25 #include "gc/accounting/card_table-inl.h"
26 #include "gc/heap.h"
27 #include "gc_root-inl.h"
28 #include "intern_table.h"
29 #include "mirror/class-inl.h"
30 #include "mirror/dex_cache-inl.h"
31 #include "mirror/object-inl.h"
32 #include "mirror/object_array-inl.h"
33 #include "oat/aot_class_linker.h"
34 #include "obj_ptr-inl.h"
35 #include "runtime.h"
36
37 #include <list>
38
39 namespace art HIDDEN {
40
41 // TODO: remove (only used for debugging purpose).
42 static constexpr bool kEnableTransactionStats = false;
43
Transaction(bool strict,mirror::Class * root,ArenaStack * arena_stack,ArenaPool * arena_pool)44 Transaction::Transaction(bool strict,
45 mirror::Class* root,
46 ArenaStack* arena_stack,
47 ArenaPool* arena_pool)
48 : arena_stack_(std::nullopt),
49 allocator_(arena_stack != nullptr ? arena_stack : &arena_stack_.emplace(arena_pool)),
50 object_logs_(std::less<mirror::Object*>(), allocator_.Adapter(kArenaAllocTransaction)),
51 array_logs_(std::less<mirror::Array*>(), allocator_.Adapter(kArenaAllocTransaction)),
52 intern_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
53 resolve_string_logs_(allocator_.Adapter(kArenaAllocTransaction)),
54 resolve_method_type_logs_(allocator_.Adapter(kArenaAllocTransaction)),
55 aborted_(false),
56 rolling_back_(false),
57 heap_(Runtime::Current()->GetHeap()),
58 strict_(strict),
59 root_(root),
60 last_allocated_object_(nullptr),
61 assert_no_new_records_reason_(nullptr) {
62 DCHECK(Runtime::Current()->IsAotCompiler());
63 DCHECK_NE(arena_stack != nullptr, arena_pool != nullptr);
64 }
65
~Transaction()66 Transaction::~Transaction() {
67 if (kEnableTransactionStats) {
68 size_t objects_count = object_logs_.size();
69 size_t field_values_count = 0;
70 for (const auto& it : object_logs_) {
71 field_values_count += it.second.Size();
72 }
73 size_t array_count = array_logs_.size();
74 size_t array_values_count = 0;
75 for (const auto& it : array_logs_) {
76 array_values_count += it.second.Size();
77 }
78 size_t intern_string_count =
79 std::distance(intern_string_logs_.begin(), intern_string_logs_.end());
80 size_t resolve_string_count =
81 std::distance(resolve_string_logs_.begin(), resolve_string_logs_.end());
82 size_t resolve_method_type_count =
83 std::distance(resolve_method_type_logs_.begin(), resolve_method_type_logs_.end());
84 LOG(INFO) << "Transaction::~Transaction"
85 << ": objects_count=" << objects_count
86 << ", field_values_count=" << field_values_count
87 << ", array_count=" << array_count
88 << ", array_values_count=" << array_values_count
89 << ", intern_string_count=" << intern_string_count
90 << ", resolve_string_count=" << resolve_string_count
91 << ", resolve_method_type_count=" << resolve_method_type_count;
92 }
93 }
94
Abort(const std::string & abort_message)95 void Transaction::Abort(const std::string& abort_message) {
96 // We may abort more than once if the exception thrown at the time of the
97 // previous abort has been caught during execution of a class initializer.
98 // We just keep the message of the first abort because it will cause the
99 // transaction to be rolled back anyway.
100 if (!aborted_) {
101 aborted_ = true;
102 abort_message_ = abort_message;
103 }
104 }
105
ThrowAbortError(Thread * self,const std::string * abort_message)106 void Transaction::ThrowAbortError(Thread* self, const std::string* abort_message) {
107 const bool rethrow = (abort_message == nullptr);
108 if (kIsDebugBuild && rethrow) {
109 CHECK(IsAborted()) << "Rethrow " << DescriptorToDot(kTransactionAbortErrorDescriptor)
110 << " while transaction is not aborted";
111 }
112 if (rethrow) {
113 // Rethrow an exception with the earlier abort message stored in the transaction.
114 self->ThrowNewWrappedException(kTransactionAbortErrorDescriptor, GetAbortMessage().c_str());
115 } else {
116 // Throw an exception with the given abort message.
117 self->ThrowNewWrappedException(kTransactionAbortErrorDescriptor, abort_message->c_str());
118 }
119 }
120
GetAbortMessage() const121 const std::string& Transaction::GetAbortMessage() const {
122 return abort_message_;
123 }
124
WriteConstraint(ObjPtr<mirror::Object> obj) const125 bool Transaction::WriteConstraint(ObjPtr<mirror::Object> obj) const {
126 DCHECK(obj != nullptr);
127
128 // Prevent changes in boot image spaces for app or boot image extension.
129 // For boot image there are no boot image spaces and this condition evaluates to false.
130 if (heap_->ObjectIsInBootImageSpace(obj)) {
131 return true;
132 }
133
134 // For apps, also prevent writing to other classes.
135 return IsStrict() &&
136 obj->IsClass() && // no constraint updating instances or arrays
137 obj != root_; // modifying other classes' static field, fail
138 }
139
WriteValueConstraint(ObjPtr<mirror::Object> value) const140 bool Transaction::WriteValueConstraint(ObjPtr<mirror::Object> value) const {
141 if (value == nullptr) {
142 return false; // We can always store null values.
143 }
144 if (IsStrict()) {
145 // TODO: Should we restrict writes the same way as for boot image extension?
146 return false;
147 } else if (heap_->GetBootImageSpaces().empty()) {
148 return false; // No constraints for boot image.
149 } else {
150 // Boot image extension.
151 ObjPtr<mirror::Class> klass = value->IsClass() ? value->AsClass() : value->GetClass();
152 return !AotClassLinker::CanReferenceInBootImageExtensionOrAppImage(klass, heap_);
153 }
154 }
155
ReadConstraint(ObjPtr<mirror::Object> obj) const156 bool Transaction::ReadConstraint(ObjPtr<mirror::Object> obj) const {
157 // Read constraints are checked only for static field reads as there are
158 // no constraints on reading instance fields and array elements.
159 DCHECK(obj->IsClass());
160 if (IsStrict()) {
161 return obj != root_; // fail if not self-updating
162 } else {
163 // For boot image and boot image extension, allow reading any field.
164 return false;
165 }
166 }
167
RecordNewObject(ObjPtr<mirror::Object> obj)168 void Transaction::RecordNewObject(ObjPtr<mirror::Object> obj) {
169 last_allocated_object_ = obj.Ptr();
170 ObjectLog log(&allocator_);
171 log.MarkAsNewObject();
172 object_logs_.Put(obj.Ptr(), std::move(log));
173 }
174
RecordNewArray(ObjPtr<mirror::Array> array)175 void Transaction::RecordNewArray(ObjPtr<mirror::Array> array) {
176 if (array->IsObjectArray()) {
177 // `ObjectArray<T>::SetWithoutChecks()` uses `SetFieldObject()` which records value
178 // changes in `object_log_`, so we need to record new object arrays as normal objects.
179 RecordNewObject(array);
180 return;
181 }
182 last_allocated_object_ = array.Ptr();
183 ArrayLog log(&allocator_);
184 log.MarkAsNewArray();
185 array_logs_.Put(array.Ptr(), std::move(log));
186 }
187
ObjectNeedsTransactionRecords(ObjPtr<mirror::Object> obj)188 bool Transaction::ObjectNeedsTransactionRecords(ObjPtr<mirror::Object> obj) {
189 if (obj == last_allocated_object_) {
190 return false;
191 }
192 auto it = object_logs_.find(obj.Ptr());
193 return it == object_logs_.end() || !it->second.IsNewObject();
194 }
195
ArrayNeedsTransactionRecords(ObjPtr<mirror::Array> array)196 bool Transaction::ArrayNeedsTransactionRecords(ObjPtr<mirror::Array> array) {
197 if (array == last_allocated_object_) {
198 return false;
199 }
200 auto it = array_logs_.find(array.Ptr());
201 return it == array_logs_.end() || !it->second.IsNewArray();
202 }
203
GetOrCreateObjectLog(mirror::Object * obj)204 inline Transaction::ObjectLog& Transaction::GetOrCreateObjectLog(mirror::Object* obj) {
205 return object_logs_.GetOrCreate(obj, [&]() { return ObjectLog(&allocator_); });
206 }
207
RecordWriteFieldBoolean(mirror::Object * obj,MemberOffset field_offset,uint8_t value,bool is_volatile)208 void Transaction::RecordWriteFieldBoolean(mirror::Object* obj,
209 MemberOffset field_offset,
210 uint8_t value,
211 bool is_volatile) {
212 DCHECK(obj != nullptr);
213 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
214 if (obj != last_allocated_object_) {
215 ObjectLog& object_log = GetOrCreateObjectLog(obj);
216 object_log.LogBooleanValue(field_offset, value, is_volatile);
217 }
218 }
219
RecordWriteFieldByte(mirror::Object * obj,MemberOffset field_offset,int8_t value,bool is_volatile)220 void Transaction::RecordWriteFieldByte(mirror::Object* obj,
221 MemberOffset field_offset,
222 int8_t value,
223 bool is_volatile) {
224 DCHECK(obj != nullptr);
225 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
226 if (obj != last_allocated_object_) {
227 ObjectLog& object_log = GetOrCreateObjectLog(obj);
228 object_log.LogByteValue(field_offset, value, is_volatile);
229 }
230 }
231
RecordWriteFieldChar(mirror::Object * obj,MemberOffset field_offset,uint16_t value,bool is_volatile)232 void Transaction::RecordWriteFieldChar(mirror::Object* obj,
233 MemberOffset field_offset,
234 uint16_t value,
235 bool is_volatile) {
236 DCHECK(obj != nullptr);
237 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
238 if (obj != last_allocated_object_) {
239 ObjectLog& object_log = GetOrCreateObjectLog(obj);
240 object_log.LogCharValue(field_offset, value, is_volatile);
241 }
242 }
243
244
RecordWriteFieldShort(mirror::Object * obj,MemberOffset field_offset,int16_t value,bool is_volatile)245 void Transaction::RecordWriteFieldShort(mirror::Object* obj,
246 MemberOffset field_offset,
247 int16_t value,
248 bool is_volatile) {
249 DCHECK(obj != nullptr);
250 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
251 if (obj != last_allocated_object_) {
252 ObjectLog& object_log = GetOrCreateObjectLog(obj);
253 object_log.LogShortValue(field_offset, value, is_volatile);
254 }
255 }
256
257
RecordWriteField32(mirror::Object * obj,MemberOffset field_offset,uint32_t value,bool is_volatile)258 void Transaction::RecordWriteField32(mirror::Object* obj,
259 MemberOffset field_offset,
260 uint32_t value,
261 bool is_volatile) {
262 DCHECK(obj != nullptr);
263 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
264 if (obj != last_allocated_object_) {
265 ObjectLog& object_log = GetOrCreateObjectLog(obj);
266 object_log.Log32BitsValue(field_offset, value, is_volatile);
267 }
268 }
269
RecordWriteField64(mirror::Object * obj,MemberOffset field_offset,uint64_t value,bool is_volatile)270 void Transaction::RecordWriteField64(mirror::Object* obj,
271 MemberOffset field_offset,
272 uint64_t value,
273 bool is_volatile) {
274 DCHECK(obj != nullptr);
275 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
276 if (obj != last_allocated_object_) {
277 ObjectLog& object_log = GetOrCreateObjectLog(obj);
278 object_log.Log64BitsValue(field_offset, value, is_volatile);
279 }
280 }
281
RecordWriteFieldReference(mirror::Object * obj,MemberOffset field_offset,mirror::Object * value,bool is_volatile)282 void Transaction::RecordWriteFieldReference(mirror::Object* obj,
283 MemberOffset field_offset,
284 mirror::Object* value,
285 bool is_volatile) {
286 DCHECK(obj != nullptr);
287 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
288 if (obj != last_allocated_object_) {
289 ObjectLog& object_log = GetOrCreateObjectLog(obj);
290 object_log.LogReferenceValue(field_offset, value, is_volatile);
291 }
292 }
293
RecordWriteArray(mirror::Array * array,size_t index,uint64_t value)294 void Transaction::RecordWriteArray(mirror::Array* array, size_t index, uint64_t value) {
295 DCHECK(array != nullptr);
296 DCHECK(array->IsArrayInstance());
297 DCHECK(!array->IsObjectArray());
298 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
299 if (array != last_allocated_object_) {
300 ArrayLog& array_log = array_logs_.GetOrCreate(array, [&]() { return ArrayLog(&allocator_); });
301 array_log.LogValue(index, value);
302 }
303 }
304
RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)305 void Transaction::RecordResolveString(ObjPtr<mirror::DexCache> dex_cache,
306 dex::StringIndex string_idx) {
307 DCHECK(dex_cache != nullptr);
308 DCHECK_LT(string_idx.index_, dex_cache->GetDexFile()->NumStringIds());
309 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
310 resolve_string_logs_.emplace_front(dex_cache, string_idx);
311 }
312
RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)313 void Transaction::RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache,
314 dex::ProtoIndex proto_idx) {
315 DCHECK(dex_cache != nullptr);
316 DCHECK_LT(proto_idx.index_, dex_cache->GetDexFile()->NumProtoIds());
317 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
318 resolve_method_type_logs_.emplace_front(dex_cache, proto_idx);
319 }
320
RecordStrongStringInsertion(ObjPtr<mirror::String> s)321 void Transaction::RecordStrongStringInsertion(ObjPtr<mirror::String> s) {
322 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kInsert);
323 LogInternedString(std::move(log));
324 }
325
RecordWeakStringInsertion(ObjPtr<mirror::String> s)326 void Transaction::RecordWeakStringInsertion(ObjPtr<mirror::String> s) {
327 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kInsert);
328 LogInternedString(std::move(log));
329 }
330
RecordStrongStringRemoval(ObjPtr<mirror::String> s)331 void Transaction::RecordStrongStringRemoval(ObjPtr<mirror::String> s) {
332 InternStringLog log(s, InternStringLog::kStrongString, InternStringLog::kRemove);
333 LogInternedString(std::move(log));
334 }
335
RecordWeakStringRemoval(ObjPtr<mirror::String> s)336 void Transaction::RecordWeakStringRemoval(ObjPtr<mirror::String> s) {
337 InternStringLog log(s, InternStringLog::kWeakString, InternStringLog::kRemove);
338 LogInternedString(std::move(log));
339 }
340
LogInternedString(InternStringLog && log)341 void Transaction::LogInternedString(InternStringLog&& log) {
342 Locks::intern_table_lock_->AssertExclusiveHeld(Thread::Current());
343 DCHECK(assert_no_new_records_reason_ == nullptr) << assert_no_new_records_reason_;
344 intern_string_logs_.push_front(std::move(log));
345 }
346
Rollback()347 void Transaction::Rollback() {
348 Thread* self = Thread::Current();
349 self->AssertNoPendingException();
350 MutexLock mu(self, *Locks::intern_table_lock_);
351 rolling_back_ = true;
352 CHECK(!Runtime::Current()->IsActiveTransaction());
353 UndoObjectModifications();
354 UndoArrayModifications();
355 UndoInternStringTableModifications();
356 UndoResolveStringModifications();
357 UndoResolveMethodTypeModifications();
358 rolling_back_ = false;
359 }
360
UndoObjectModifications()361 void Transaction::UndoObjectModifications() {
362 // TODO we may not need to restore objects allocated during this transaction. Or we could directly
363 // remove them from the heap.
364 for (const auto& it : object_logs_) {
365 it.second.Undo(it.first);
366 }
367 object_logs_.clear();
368 }
369
UndoArrayModifications()370 void Transaction::UndoArrayModifications() {
371 // TODO we may not need to restore array allocated during this transaction. Or we could directly
372 // remove them from the heap.
373 for (const auto& it : array_logs_) {
374 it.second.Undo(it.first);
375 }
376 array_logs_.clear();
377 }
378
UndoInternStringTableModifications()379 void Transaction::UndoInternStringTableModifications() {
380 InternTable* const intern_table = Runtime::Current()->GetInternTable();
381 // We want to undo each operation from the most recent to the oldest. List has been filled so the
382 // most recent operation is at list begin so just have to iterate over it.
383 for (const InternStringLog& string_log : intern_string_logs_) {
384 string_log.Undo(intern_table);
385 }
386 intern_string_logs_.clear();
387 }
388
UndoResolveStringModifications()389 void Transaction::UndoResolveStringModifications() {
390 for (ResolveStringLog& string_log : resolve_string_logs_) {
391 string_log.Undo();
392 }
393 resolve_string_logs_.clear();
394 }
395
UndoResolveMethodTypeModifications()396 void Transaction::UndoResolveMethodTypeModifications() {
397 for (ResolveMethodTypeLog& method_type_log : resolve_method_type_logs_) {
398 method_type_log.Undo();
399 }
400 resolve_method_type_logs_.clear();
401 }
402
VisitRoots(RootVisitor * visitor)403 void Transaction::VisitRoots(RootVisitor* visitor) {
404 // Transactions are used for single-threaded initialization.
405 // This is the only function that should be called from a different thread,
406 // namely the GC thread, and it is called with the mutator lock held exclusively,
407 // so the data structures in the `Transaction` are protected from concurrent use.
408 DCHECK(Locks::mutator_lock_->IsExclusiveHeld(Thread::Current()));
409
410 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&root_), RootInfo(kRootUnknown));
411 visitor->VisitRoot(&last_allocated_object_, RootInfo(kRootUnknown));
412 {
413 // Create a separate `ArenaStack` for this thread.
414 ArenaStack arena_stack(Runtime::Current()->GetArenaPool());
415 VisitObjectLogs(visitor, &arena_stack);
416 VisitArrayLogs(visitor, &arena_stack);
417 }
418 VisitInternStringLogs(visitor);
419 VisitResolveStringLogs(visitor);
420 VisitResolveMethodTypeLogs(visitor);
421 }
422
423 template <typename MovingRoots, typename Container>
UpdateKeys(const MovingRoots & moving_roots,Container & container)424 void UpdateKeys(const MovingRoots& moving_roots, Container& container) {
425 for (const auto& pair : moving_roots) {
426 auto* old_root = pair.first;
427 auto* new_root = pair.second;
428 auto node = container.extract(old_root);
429 CHECK(!node.empty());
430 node.key() = new_root;
431 bool inserted = container.insert(std::move(node)).inserted;
432 CHECK(inserted);
433 }
434 }
435
VisitObjectLogs(RootVisitor * visitor,ArenaStack * arena_stack)436 void Transaction::VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
437 // List of moving roots.
438 ScopedArenaAllocator allocator(arena_stack);
439 using ObjectPair = std::pair<mirror::Object*, mirror::Object*>;
440 ScopedArenaForwardList<ObjectPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
441
442 // Visit roots.
443 for (auto& it : object_logs_) {
444 it.second.VisitRoots(visitor);
445 mirror::Object* old_root = it.first;
446 mirror::Object* new_root = old_root;
447 visitor->VisitRoot(&new_root, RootInfo(kRootUnknown));
448 if (new_root != old_root) {
449 moving_roots.push_front(std::make_pair(old_root, new_root));
450 }
451 }
452
453 // Update object logs with moving roots.
454 UpdateKeys(moving_roots, object_logs_);
455 }
456
VisitArrayLogs(RootVisitor * visitor,ArenaStack * arena_stack)457 void Transaction::VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack) {
458 // List of moving roots.
459 ScopedArenaAllocator allocator(arena_stack);
460 using ArrayPair = std::pair<mirror::Array*, mirror::Array*>;
461 ScopedArenaForwardList<ArrayPair> moving_roots(allocator.Adapter(kArenaAllocTransaction));
462
463 for (auto& it : array_logs_) {
464 mirror::Array* old_root = it.first;
465 mirror::Array* new_root = old_root;
466 visitor->VisitRoot(reinterpret_cast<mirror::Object**>(&new_root), RootInfo(kRootUnknown));
467 if (new_root != old_root) {
468 moving_roots.push_front(std::make_pair(old_root, new_root));
469 }
470 }
471
472 // Update array logs with moving roots.
473 UpdateKeys(moving_roots, array_logs_);
474 }
475
VisitInternStringLogs(RootVisitor * visitor)476 void Transaction::VisitInternStringLogs(RootVisitor* visitor) {
477 for (InternStringLog& log : intern_string_logs_) {
478 log.VisitRoots(visitor);
479 }
480 }
481
VisitResolveStringLogs(RootVisitor * visitor)482 void Transaction::VisitResolveStringLogs(RootVisitor* visitor) {
483 for (ResolveStringLog& log : resolve_string_logs_) {
484 log.VisitRoots(visitor);
485 }
486 }
487
VisitResolveMethodTypeLogs(RootVisitor * visitor)488 void Transaction::VisitResolveMethodTypeLogs(RootVisitor* visitor) {
489 for (ResolveMethodTypeLog& log : resolve_method_type_logs_) {
490 log.VisitRoots(visitor);
491 }
492 }
493
LogBooleanValue(MemberOffset offset,uint8_t value,bool is_volatile)494 void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
495 LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
496 }
497
LogByteValue(MemberOffset offset,int8_t value,bool is_volatile)498 void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) {
499 LogValue(ObjectLog::kByte, offset, value, is_volatile);
500 }
501
LogCharValue(MemberOffset offset,uint16_t value,bool is_volatile)502 void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) {
503 LogValue(ObjectLog::kChar, offset, value, is_volatile);
504 }
505
LogShortValue(MemberOffset offset,int16_t value,bool is_volatile)506 void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) {
507 LogValue(ObjectLog::kShort, offset, value, is_volatile);
508 }
509
Log32BitsValue(MemberOffset offset,uint32_t value,bool is_volatile)510 void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) {
511 LogValue(ObjectLog::k32Bits, offset, value, is_volatile);
512 }
513
Log64BitsValue(MemberOffset offset,uint64_t value,bool is_volatile)514 void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) {
515 LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
516 }
517
LogReferenceValue(MemberOffset offset,mirror::Object * obj,bool is_volatile)518 void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset,
519 mirror::Object* obj,
520 bool is_volatile) {
521 LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
522 }
523
LogValue(ObjectLog::FieldValueKind kind,MemberOffset offset,uint64_t value,bool is_volatile)524 void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
525 MemberOffset offset,
526 uint64_t value,
527 bool is_volatile) {
528 if (is_new_object_) {
529 return;
530 }
531 auto it = field_values_.find(offset.Uint32Value());
532 if (it == field_values_.end()) {
533 ObjectLog::FieldValue field_value;
534 field_value.value = value;
535 field_value.is_volatile = is_volatile;
536 field_value.kind = kind;
537 field_values_.emplace(offset.Uint32Value(), std::move(field_value));
538 }
539 }
540
Undo(mirror::Object * obj) const541 void Transaction::ObjectLog::Undo(mirror::Object* obj) const {
542 for (auto& it : field_values_) {
543 // Garbage collector needs to access object's class and array's length. So we don't rollback
544 // these values.
545 MemberOffset field_offset(it.first);
546 if (field_offset.Uint32Value() == mirror::Class::ClassOffset().Uint32Value()) {
547 // Skip Object::class field.
548 continue;
549 }
550 if (obj->IsArrayInstance() &&
551 field_offset.Uint32Value() == mirror::Array::LengthOffset().Uint32Value()) {
552 // Skip Array::length field.
553 continue;
554 }
555 const FieldValue& field_value = it.second;
556 UndoFieldWrite(obj, field_offset, field_value);
557 }
558 }
559
UndoFieldWrite(mirror::Object * obj,MemberOffset field_offset,const FieldValue & field_value) const560 void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj,
561 MemberOffset field_offset,
562 const FieldValue& field_value) const {
563 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
564 // we'd need to disable the check.
565 constexpr bool kCheckTransaction = false;
566 switch (field_value.kind) {
567 case kBoolean:
568 if (UNLIKELY(field_value.is_volatile)) {
569 obj->SetFieldBooleanVolatile<false, kCheckTransaction>(
570 field_offset,
571 field_value.value);
572 } else {
573 obj->SetFieldBoolean<false, kCheckTransaction>(
574 field_offset,
575 field_value.value);
576 }
577 break;
578 case kByte:
579 if (UNLIKELY(field_value.is_volatile)) {
580 obj->SetFieldByteVolatile<false, kCheckTransaction>(
581 field_offset,
582 static_cast<int8_t>(field_value.value));
583 } else {
584 obj->SetFieldByte<false, kCheckTransaction>(
585 field_offset,
586 static_cast<int8_t>(field_value.value));
587 }
588 break;
589 case kChar:
590 if (UNLIKELY(field_value.is_volatile)) {
591 obj->SetFieldCharVolatile<false, kCheckTransaction>(
592 field_offset,
593 static_cast<uint16_t>(field_value.value));
594 } else {
595 obj->SetFieldChar<false, kCheckTransaction>(
596 field_offset,
597 static_cast<uint16_t>(field_value.value));
598 }
599 break;
600 case kShort:
601 if (UNLIKELY(field_value.is_volatile)) {
602 obj->SetFieldShortVolatile<false, kCheckTransaction>(
603 field_offset,
604 static_cast<int16_t>(field_value.value));
605 } else {
606 obj->SetFieldShort<false, kCheckTransaction>(
607 field_offset,
608 static_cast<int16_t>(field_value.value));
609 }
610 break;
611 case k32Bits:
612 if (UNLIKELY(field_value.is_volatile)) {
613 obj->SetField32Volatile<false, kCheckTransaction>(
614 field_offset,
615 static_cast<uint32_t>(field_value.value));
616 } else {
617 obj->SetField32<false, kCheckTransaction>(
618 field_offset,
619 static_cast<uint32_t>(field_value.value));
620 }
621 break;
622 case k64Bits:
623 if (UNLIKELY(field_value.is_volatile)) {
624 obj->SetField64Volatile<false, kCheckTransaction>(field_offset, field_value.value);
625 } else {
626 obj->SetField64<false, kCheckTransaction>(field_offset, field_value.value);
627 }
628 break;
629 case kReference:
630 if (UNLIKELY(field_value.is_volatile)) {
631 obj->SetFieldObjectVolatile<false, kCheckTransaction>(
632 field_offset,
633 reinterpret_cast<mirror::Object*>(field_value.value));
634 } else {
635 obj->SetFieldObject<false, kCheckTransaction>(
636 field_offset,
637 reinterpret_cast<mirror::Object*>(field_value.value));
638 }
639 break;
640 }
641 }
642
VisitRoots(RootVisitor * visitor)643 void Transaction::ObjectLog::VisitRoots(RootVisitor* visitor) {
644 for (auto& it : field_values_) {
645 FieldValue& field_value = it.second;
646 if (field_value.kind == ObjectLog::kReference) {
647 visitor->VisitRootIfNonNull(reinterpret_cast<mirror::Object**>(&field_value.value),
648 RootInfo(kRootUnknown));
649 }
650 }
651 }
652
Undo(InternTable * intern_table) const653 void Transaction::InternStringLog::Undo(InternTable* intern_table) const {
654 DCHECK(!Runtime::Current()->IsActiveTransaction());
655 DCHECK(intern_table != nullptr);
656 ObjPtr<mirror::String> s = str_.Read();
657 uint32_t hash = static_cast<uint32_t>(s->GetStoredHashCode());
658 switch (string_op_) {
659 case InternStringLog::kInsert: {
660 switch (string_kind_) {
661 case InternStringLog::kStrongString:
662 intern_table->RemoveStrong(s, hash);
663 break;
664 case InternStringLog::kWeakString:
665 intern_table->RemoveWeak(s, hash);
666 break;
667 default:
668 LOG(FATAL) << "Unknown interned string kind";
669 UNREACHABLE();
670 }
671 break;
672 }
673 case InternStringLog::kRemove: {
674 switch (string_kind_) {
675 case InternStringLog::kStrongString:
676 intern_table->InsertStrong(s, hash);
677 break;
678 case InternStringLog::kWeakString:
679 intern_table->InsertWeak(s, hash);
680 break;
681 default:
682 LOG(FATAL) << "Unknown interned string kind";
683 UNREACHABLE();
684 }
685 break;
686 }
687 default:
688 LOG(FATAL) << "Unknown interned string op";
689 UNREACHABLE();
690 }
691 }
692
VisitRoots(RootVisitor * visitor)693 void Transaction::InternStringLog::VisitRoots(RootVisitor* visitor) {
694 str_.VisitRoot(visitor, RootInfo(kRootInternedString));
695 }
696
Undo() const697 void Transaction::ResolveStringLog::Undo() const {
698 dex_cache_.Read()->ClearString(string_idx_);
699 }
700
ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,dex::StringIndex string_idx)701 Transaction::ResolveStringLog::ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache,
702 dex::StringIndex string_idx)
703 : dex_cache_(dex_cache),
704 string_idx_(string_idx) {
705 DCHECK(dex_cache != nullptr);
706 DCHECK_LT(string_idx_.index_, dex_cache->GetDexFile()->NumStringIds());
707 }
708
VisitRoots(RootVisitor * visitor)709 void Transaction::ResolveStringLog::VisitRoots(RootVisitor* visitor) {
710 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
711 }
712
Undo() const713 void Transaction::ResolveMethodTypeLog::Undo() const {
714 dex_cache_.Read()->ClearMethodType(proto_idx_);
715 }
716
ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,dex::ProtoIndex proto_idx)717 Transaction::ResolveMethodTypeLog::ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache,
718 dex::ProtoIndex proto_idx)
719 : dex_cache_(dex_cache),
720 proto_idx_(proto_idx) {
721 DCHECK(dex_cache != nullptr);
722 DCHECK_LT(proto_idx_.index_, dex_cache->GetDexFile()->NumProtoIds());
723 }
724
VisitRoots(RootVisitor * visitor)725 void Transaction::ResolveMethodTypeLog::VisitRoots(RootVisitor* visitor) {
726 dex_cache_.VisitRoot(visitor, RootInfo(kRootVMInternal));
727 }
728
InternStringLog(ObjPtr<mirror::String> s,StringKind kind,StringOp op)729 Transaction::InternStringLog::InternStringLog(ObjPtr<mirror::String> s,
730 StringKind kind,
731 StringOp op)
732 : str_(s),
733 string_kind_(kind),
734 string_op_(op) {
735 DCHECK(s != nullptr);
736 }
737
LogValue(size_t index,uint64_t value)738 void Transaction::ArrayLog::LogValue(size_t index, uint64_t value) {
739 if (is_new_array_) {
740 return;
741 }
742 // Add a mapping if there is none yet.
743 array_values_.FindOrAdd(index, value);
744 }
745
Undo(mirror::Array * array) const746 void Transaction::ArrayLog::Undo(mirror::Array* array) const {
747 DCHECK(array != nullptr);
748 DCHECK(array->IsArrayInstance());
749 Primitive::Type type = array->GetClass()->GetComponentType()->GetPrimitiveType();
750 for (auto it : array_values_) {
751 UndoArrayWrite(array, type, it.first, it.second);
752 }
753 }
754
UndoArrayWrite(mirror::Array * array,Primitive::Type array_type,size_t index,uint64_t value) const755 void Transaction::ArrayLog::UndoArrayWrite(mirror::Array* array,
756 Primitive::Type array_type,
757 size_t index,
758 uint64_t value) const {
759 // TODO We may want to abort a transaction while still being in transaction mode. In this case,
760 // we'd need to disable the check.
761 constexpr bool kCheckTransaction = false;
762 switch (array_type) {
763 case Primitive::kPrimBoolean:
764 array->AsBooleanArray()->SetWithoutChecks<false, kCheckTransaction>(
765 index, static_cast<uint8_t>(value));
766 break;
767 case Primitive::kPrimByte:
768 array->AsByteArray()->SetWithoutChecks<false, kCheckTransaction>(
769 index, static_cast<int8_t>(value));
770 break;
771 case Primitive::kPrimChar:
772 array->AsCharArray()->SetWithoutChecks<false, kCheckTransaction>(
773 index, static_cast<uint16_t>(value));
774 break;
775 case Primitive::kPrimShort:
776 array->AsShortArray()->SetWithoutChecks<false, kCheckTransaction>(
777 index, static_cast<int16_t>(value));
778 break;
779 case Primitive::kPrimInt:
780 array->AsIntArray()->SetWithoutChecks<false, kCheckTransaction>(
781 index, static_cast<int32_t>(value));
782 break;
783 case Primitive::kPrimFloat:
784 array->AsFloatArray()->SetWithoutChecks<false, kCheckTransaction>(
785 index, static_cast<float>(value));
786 break;
787 case Primitive::kPrimLong:
788 array->AsLongArray()->SetWithoutChecks<false, kCheckTransaction>(
789 index, static_cast<int64_t>(value));
790 break;
791 case Primitive::kPrimDouble:
792 array->AsDoubleArray()->SetWithoutChecks<false, kCheckTransaction>(
793 index, static_cast<double>(value));
794 break;
795 case Primitive::kPrimNot:
796 LOG(FATAL) << "ObjectArray should be treated as Object";
797 UNREACHABLE();
798 default:
799 LOG(FATAL) << "Unsupported type " << array_type;
800 UNREACHABLE();
801 }
802 }
803
InstallAssertion(const char * reason)804 Transaction* ScopedAssertNoNewTransactionRecords::InstallAssertion(const char* reason) {
805 Transaction* transaction = nullptr;
806 if (kIsDebugBuild && Runtime::Current()->IsActiveTransaction()) {
807 AotClassLinker* class_linker = down_cast<AotClassLinker*>(Runtime::Current()->GetClassLinker());
808 transaction = class_linker->GetTransaction();
809 if (transaction != nullptr) {
810 CHECK(transaction->assert_no_new_records_reason_ == nullptr)
811 << "old: " << transaction->assert_no_new_records_reason_ << " new: " << reason;
812 transaction->assert_no_new_records_reason_ = reason;
813 }
814 }
815 return transaction;
816 }
817
RemoveAssertion(Transaction * transaction)818 void ScopedAssertNoNewTransactionRecords::RemoveAssertion(Transaction* transaction) {
819 if (kIsDebugBuild) {
820 AotClassLinker* class_linker = down_cast<AotClassLinker*>(Runtime::Current()->GetClassLinker());
821 CHECK(class_linker->GetTransaction() == transaction);
822 CHECK(transaction->assert_no_new_records_reason_ != nullptr);
823 transaction->assert_no_new_records_reason_ = nullptr;
824 }
825 }
826
827 } // namespace art
828