1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_TRANSACTION_H_
18 #define ART_RUNTIME_TRANSACTION_H_
19 
20 #include "base/scoped_arena_containers.h"
21 #include "base/macros.h"
22 #include "base/mutex.h"
23 #include "base/safe_map.h"
24 #include "base/value_object.h"
25 #include "dex/dex_file_types.h"
26 #include "dex/primitive.h"
27 #include "gc_root.h"
28 #include "offsets.h"
29 
30 #include <list>
31 #include <map>
32 
33 namespace art HIDDEN {
34 namespace gc {
35 class Heap;
36 }  // namespace gc
37 namespace mirror {
38 class Array;
39 class Class;
40 class DexCache;
41 class Object;
42 class String;
43 }  // namespace mirror
44 class InternTable;
45 template<class MirrorType> class ObjPtr;
46 
47 class Transaction final {
48  public:
49   Transaction(bool strict, mirror::Class* root, ArenaStack* arena_stack, ArenaPool* arena_pool);
50   ~Transaction();
51 
GetArenaStack()52   ArenaStack* GetArenaStack() {
53     return allocator_.GetArenaStack();
54   }
55 
56   void Abort(const std::string& abort_message)
57       REQUIRES_SHARED(Locks::mutator_lock_);
58   void ThrowAbortError(Thread* self, const std::string* abort_message)
59       REQUIRES_SHARED(Locks::mutator_lock_);
IsAborted()60   bool IsAborted() const {
61     return aborted_;
62   }
63 
64   // If the transaction is rollbacking. Transactions will set this flag when they start rollbacking,
65   // because the nested transaction should be disabled when rollbacking to restore the memory.
IsRollingBack()66   bool IsRollingBack() const {
67     return rolling_back_;
68   }
69 
70   // If the transaction is in strict mode, then all access of static fields will be constrained,
71   // one class's clinit will not be allowed to read or modify another class's static fields, unless
72   // the transaction is aborted.
IsStrict()73   bool IsStrict() const {
74     return strict_;
75   }
76 
77   // Record newly allocated object/array.
78   //
79   // There is no reason to record old values for newly allocated objects because they become
80   // unreachable when the transaction is rolled back, so their data does not need to be rolled back.
81   //
82   // Implementation details: We track all newly allocated objects/arrays by creating an
83   // `ObjectLog`/`ArrayLog` flagged as a new object/array. We also cache the last allocated
84   // object/array which often helps avoid the search for the flagged `ObjectLog`/`ArrayLog`.
85   void RecordNewObject(ObjPtr<mirror::Object> allocated_object)
86       REQUIRES_SHARED(Locks::mutator_lock_);
87   void RecordNewArray(ObjPtr<mirror::Array> allocated_object)
88       REQUIRES_SHARED(Locks::mutator_lock_);
89 
90   bool ObjectNeedsTransactionRecords(ObjPtr<mirror::Object> obj)
91       REQUIRES_SHARED(Locks::mutator_lock_);
92   bool ArrayNeedsTransactionRecords(ObjPtr<mirror::Array> array)
93       REQUIRES_SHARED(Locks::mutator_lock_);
94 
95   // Record object field changes.
96   void RecordWriteFieldBoolean(mirror::Object* obj,
97                                MemberOffset field_offset,
98                                uint8_t value,
99                                bool is_volatile);
100   void RecordWriteFieldByte(mirror::Object* obj,
101                             MemberOffset field_offset,
102                             int8_t value,
103                             bool is_volatile);
104   void RecordWriteFieldChar(mirror::Object* obj,
105                             MemberOffset field_offset,
106                             uint16_t value,
107                             bool is_volatile);
108   void RecordWriteFieldShort(mirror::Object* obj,
109                              MemberOffset field_offset,
110                              int16_t value,
111                              bool is_volatile);
112   void RecordWriteField32(mirror::Object* obj,
113                           MemberOffset field_offset,
114                           uint32_t value,
115                           bool is_volatile);
116   void RecordWriteField64(mirror::Object* obj,
117                           MemberOffset field_offset,
118                           uint64_t value,
119                           bool is_volatile);
120   void RecordWriteFieldReference(mirror::Object* obj,
121                                  MemberOffset field_offset,
122                                  mirror::Object* value,
123                                  bool is_volatile);
124 
125   // Record array change.
126   void RecordWriteArray(mirror::Array* array, size_t index, uint64_t value)
127       REQUIRES_SHARED(Locks::mutator_lock_);
128 
129   // Record intern string table changes.
130   void RecordStrongStringInsertion(ObjPtr<mirror::String> s)
131       REQUIRES(Locks::intern_table_lock_);
132   void RecordWeakStringInsertion(ObjPtr<mirror::String> s)
133       REQUIRES(Locks::intern_table_lock_);
134   void RecordStrongStringRemoval(ObjPtr<mirror::String> s)
135       REQUIRES(Locks::intern_table_lock_);
136   void RecordWeakStringRemoval(ObjPtr<mirror::String> s)
137       REQUIRES(Locks::intern_table_lock_);
138 
139   // Record resolve string.
140   void RecordResolveString(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx)
141       REQUIRES_SHARED(Locks::mutator_lock_);
142 
143   // Record resolve method type.
144   void RecordResolveMethodType(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx)
145       REQUIRES_SHARED(Locks::mutator_lock_);
146 
147   // Abort transaction by undoing all recorded changes.
148   void Rollback()
149       REQUIRES_SHARED(Locks::mutator_lock_);
150 
151   void VisitRoots(RootVisitor* visitor)
152       REQUIRES_SHARED(Locks::mutator_lock_);
153 
154   bool ReadConstraint(ObjPtr<mirror::Object> obj) const
155       REQUIRES_SHARED(Locks::mutator_lock_);
156 
157   bool WriteConstraint(ObjPtr<mirror::Object> obj) const
158       REQUIRES_SHARED(Locks::mutator_lock_);
159 
160   bool WriteValueConstraint(ObjPtr<mirror::Object> value) const
161       REQUIRES_SHARED(Locks::mutator_lock_);
162 
163  private:
164   class ObjectLog : public ValueObject {
165    public:
166     void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
167     void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
168     void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile);
169     void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile);
170     void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
171     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
172     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
173 
174     void Undo(mirror::Object* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
175     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
176 
Size()177     size_t Size() const {
178       return field_values_.size();
179     }
180 
MarkAsNewObject()181     void MarkAsNewObject() {
182       DCHECK(field_values_.empty());
183       is_new_object_ = true;
184     }
185 
IsNewObject()186     bool IsNewObject() const {
187       return is_new_object_;
188     }
189 
ObjectLog(ScopedArenaAllocator * allocator)190     explicit ObjectLog(ScopedArenaAllocator* allocator)
191         : is_new_object_(false),
192           field_values_(std::less<uint32_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
193     ObjectLog(ObjectLog&& log) = default;
194 
195    private:
196     enum FieldValueKind {
197       kBoolean,
198       kByte,
199       kChar,
200       kShort,
201       k32Bits,
202       k64Bits,
203       kReference
204     };
205     struct FieldValue : public ValueObject {
206       // TODO use JValue instead ?
207       uint64_t value;
208       FieldValueKind kind;
209       bool is_volatile;
210 
FieldValueFieldValue211       FieldValue() : value(0), kind(FieldValueKind::kBoolean), is_volatile(false) {}
212       FieldValue(FieldValue&& log) = default;
213 
214      private:
215       DISALLOW_COPY_AND_ASSIGN(FieldValue);
216     };
217 
218     void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
219     void UndoFieldWrite(mirror::Object* obj,
220                         MemberOffset field_offset,
221                         const FieldValue& field_value) const REQUIRES_SHARED(Locks::mutator_lock_);
222 
223     // Whether this is a new object. We do not need to keep transaction records for objects
224     // created inside a transaction because they become unreachable on rollback.
225     bool is_new_object_;
226 
227     // Maps field's offset to its value.
228     ScopedArenaSafeMap<uint32_t, FieldValue> field_values_;
229 
230     DISALLOW_COPY_AND_ASSIGN(ObjectLog);
231   };
232 
233   class ArrayLog : public ValueObject {
234    public:
235     void LogValue(size_t index, uint64_t value);
236 
237     void Undo(mirror::Array* obj) const REQUIRES_SHARED(Locks::mutator_lock_);
238 
Size()239     size_t Size() const {
240       return array_values_.size();
241     }
242 
MarkAsNewArray()243     void MarkAsNewArray() {
244       DCHECK(array_values_.empty());
245       is_new_array_ = true;
246     }
247 
IsNewArray()248     bool IsNewArray() const {
249       return is_new_array_;
250     }
251 
ArrayLog(ScopedArenaAllocator * allocator)252     explicit ArrayLog(ScopedArenaAllocator* allocator)
253         : is_new_array_(false),
254           array_values_(std::less<size_t>(), allocator->Adapter(kArenaAllocTransaction)) {}
255 
256     ArrayLog(ArrayLog&& log) = default;
257 
258    private:
259     void UndoArrayWrite(mirror::Array* array,
260                         Primitive::Type array_type,
261                         size_t index,
262                         uint64_t value) const REQUIRES_SHARED(Locks::mutator_lock_);
263 
264     // Whether this is a new array. We do not need to keep transaction records for arrays
265     // created inside a transaction because they become unreachable on rollback.
266     bool is_new_array_;
267 
268     // Maps index to value.
269     // TODO use JValue instead ?
270     ScopedArenaSafeMap<size_t, uint64_t> array_values_;
271 
272     DISALLOW_COPY_AND_ASSIGN(ArrayLog);
273   };
274 
275   class InternStringLog : public ValueObject {
276    public:
277     enum StringKind {
278       kStrongString,
279       kWeakString
280     };
281     enum StringOp {
282       kInsert,
283       kRemove
284     };
285     InternStringLog(ObjPtr<mirror::String> s, StringKind kind, StringOp op);
286 
287     void Undo(InternTable* intern_table) const
288         REQUIRES_SHARED(Locks::mutator_lock_)
289         REQUIRES(Locks::intern_table_lock_);
290     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
291 
292     // Only the move constructor is supported.
293     InternStringLog() = delete;
294     InternStringLog(const InternStringLog& log) = delete;
295     InternStringLog& operator=(const InternStringLog& log) = delete;
296     InternStringLog(InternStringLog&& log) = default;
297     InternStringLog& operator=(InternStringLog&& log) = delete;
298 
299    private:
300     mutable GcRoot<mirror::String> str_;
301     const StringKind string_kind_;
302     const StringOp string_op_;
303   };
304 
305   class ResolveStringLog : public ValueObject {
306    public:
307     ResolveStringLog(ObjPtr<mirror::DexCache> dex_cache, dex::StringIndex string_idx);
308 
309     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
310 
311     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
312 
313    private:
314     GcRoot<mirror::DexCache> dex_cache_;
315     const dex::StringIndex string_idx_;
316 
317     DISALLOW_COPY_AND_ASSIGN(ResolveStringLog);
318   };
319 
320   class ResolveMethodTypeLog : public ValueObject {
321    public:
322     ResolveMethodTypeLog(ObjPtr<mirror::DexCache> dex_cache, dex::ProtoIndex proto_idx);
323 
324     void Undo() const REQUIRES_SHARED(Locks::mutator_lock_);
325 
326     void VisitRoots(RootVisitor* visitor) REQUIRES_SHARED(Locks::mutator_lock_);
327 
328    private:
329     GcRoot<mirror::DexCache> dex_cache_;
330     const dex::ProtoIndex proto_idx_;
331 
332     DISALLOW_COPY_AND_ASSIGN(ResolveMethodTypeLog);
333   };
334 
335   void LogInternedString(InternStringLog&& log)
336       REQUIRES(Locks::intern_table_lock_);
337 
338   void UndoObjectModifications()
339       REQUIRES_SHARED(Locks::mutator_lock_);
340   void UndoArrayModifications()
341       REQUIRES_SHARED(Locks::mutator_lock_);
342   void UndoInternStringTableModifications()
343       REQUIRES(Locks::intern_table_lock_)
344       REQUIRES_SHARED(Locks::mutator_lock_);
345   void UndoResolveStringModifications()
346       REQUIRES_SHARED(Locks::mutator_lock_);
347   void UndoResolveMethodTypeModifications()
348       REQUIRES_SHARED(Locks::mutator_lock_);
349 
350   void VisitObjectLogs(RootVisitor* visitor, ArenaStack* arena_stack)
351       REQUIRES_SHARED(Locks::mutator_lock_);
352   void VisitArrayLogs(RootVisitor* visitor, ArenaStack* arena_stack)
353       REQUIRES_SHARED(Locks::mutator_lock_);
354   void VisitInternStringLogs(RootVisitor* visitor)
355       REQUIRES_SHARED(Locks::mutator_lock_);
356   void VisitResolveStringLogs(RootVisitor* visitor)
357       REQUIRES_SHARED(Locks::mutator_lock_);
358   void VisitResolveMethodTypeLogs(RootVisitor* visitor)
359       REQUIRES_SHARED(Locks::mutator_lock_);
360 
361   const std::string& GetAbortMessage() const;
362 
363   ObjectLog& GetOrCreateObjectLog(mirror::Object* obj);
364 
365   // The top-level transaction creates an `ArenaStack` which is then
366   // passed down to nested transactions.
367   std::optional<ArenaStack> arena_stack_;
368   // The allocator uses the `ArenaStack` from the top-level transaction.
369   ScopedArenaAllocator allocator_;
370 
371   ScopedArenaSafeMap<mirror::Object*, ObjectLog> object_logs_;
372   ScopedArenaSafeMap<mirror::Array*, ArrayLog> array_logs_;
373   ScopedArenaForwardList<InternStringLog> intern_string_logs_;
374   ScopedArenaForwardList<ResolveStringLog> resolve_string_logs_;
375   ScopedArenaForwardList<ResolveMethodTypeLog> resolve_method_type_logs_;
376   bool aborted_;
377   bool rolling_back_;  // Single thread, no race.
378   gc::Heap* const heap_;
379   const bool strict_;
380   std::string abort_message_;
381   mirror::Class* root_;
382   mirror::Object* last_allocated_object_;
383   const char* assert_no_new_records_reason_;
384 
385   friend class ScopedAssertNoNewTransactionRecords;
386 
387   DISALLOW_COPY_AND_ASSIGN(Transaction);
388 };
389 
390 class ScopedAssertNoNewTransactionRecords {
391  public:
ScopedAssertNoNewTransactionRecords(const char * reason)392   explicit ScopedAssertNoNewTransactionRecords(const char* reason)
393     : transaction_(kIsDebugBuild ? InstallAssertion(reason) : nullptr) {}
394 
~ScopedAssertNoNewTransactionRecords()395   ~ScopedAssertNoNewTransactionRecords() {
396     if (kIsDebugBuild && transaction_ != nullptr) {
397       RemoveAssertion(transaction_);
398     }
399   }
400 
401  private:
402   static Transaction* InstallAssertion(const char* reason);
403   static void RemoveAssertion(Transaction* transaction);
404 
405   Transaction* transaction_;
406 };
407 
408 }  // namespace art
409 
410 #endif  // ART_RUNTIME_TRANSACTION_H_
411