/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_MIRROR_OBJECT_H_ #define ART_RUNTIME_MIRROR_OBJECT_H_ #include "atomic.h" #include "base/casts.h" #include "base/enums.h" #include "globals.h" #include "obj_ptr.h" #include "object_reference.h" #include "offsets.h" #include "verify_object.h" namespace art { class ArtField; class ArtMethod; class ImageWriter; class LockWord; class Monitor; struct ObjectOffsets; class Thread; class VoidFunctor; namespace mirror { class Array; class Class; class ClassLoader; class DexCache; class FinalizerReference; template class ObjectArray; template class PrimitiveArray; typedef PrimitiveArray BooleanArray; typedef PrimitiveArray ByteArray; typedef PrimitiveArray CharArray; typedef PrimitiveArray DoubleArray; typedef PrimitiveArray FloatArray; typedef PrimitiveArray IntArray; typedef PrimitiveArray LongArray; typedef PrimitiveArray ShortArray; class Reference; class String; class Throwable; // Fields within mirror objects aren't accessed directly so that the appropriate amount of // handshaking is done with GC (for example, read and write barriers). This macro is used to // compute an offset for the Set/Get methods defined in Object that can safely access fields. #define OFFSET_OF_OBJECT_MEMBER(type, field) \ MemberOffset(OFFSETOF_MEMBER(type, field)) // Checks that we don't do field assignments which violate the typing system. static constexpr bool kCheckFieldAssignments = false; // Size of Object. static constexpr uint32_t kObjectHeaderSize = kUseBrooksReadBarrier ? 16 : 8; // C++ mirror of java.lang.Object class MANAGED LOCKABLE Object { public: // The number of vtable entries in java.lang.Object. static constexpr size_t kVTableLength = 11; // The size of the java.lang.Class representing a java.lang.Object. static uint32_t ClassSize(PointerSize pointer_size); // Size of an instance of java.lang.Object. static constexpr uint32_t InstanceSize() { return sizeof(Object); } static MemberOffset ClassOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, klass_); } template ALWAYS_INLINE Class* GetClass() REQUIRES_SHARED(Locks::mutator_lock_); template void SetClass(ObjPtr new_klass) REQUIRES_SHARED(Locks::mutator_lock_); // Get the read barrier state with a fake address dependency. // '*fake_address_dependency' will be set to 0. ALWAYS_INLINE uint32_t GetReadBarrierState(uintptr_t* fake_address_dependency) REQUIRES_SHARED(Locks::mutator_lock_); // This version does not offer any special mechanism to prevent load-load reordering. ALWAYS_INLINE uint32_t GetReadBarrierState() REQUIRES_SHARED(Locks::mutator_lock_); // Get the read barrier state with a load-acquire. ALWAYS_INLINE uint32_t GetReadBarrierStateAcquire() REQUIRES_SHARED(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif ALWAYS_INLINE void SetReadBarrierState(uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE bool AtomicSetReadBarrierState(uint32_t expected_rb_state, uint32_t rb_state) REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE uint32_t GetMarkBit() REQUIRES_SHARED(Locks::mutator_lock_); ALWAYS_INLINE bool AtomicSetMarkBit(uint32_t expected_mark_bit, uint32_t mark_bit) REQUIRES_SHARED(Locks::mutator_lock_); // Assert that the read barrier state is in the default (white) state. ALWAYS_INLINE void AssertReadBarrierState() const REQUIRES_SHARED(Locks::mutator_lock_); // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in // invoke-interface to detect incompatible interface types. template bool VerifierInstanceOf(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE bool InstanceOf(ObjPtr klass) REQUIRES_SHARED(Locks::mutator_lock_); template size_t SizeOf() REQUIRES_SHARED(Locks::mutator_lock_); Object* Clone(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Roles::uninterruptible_); int32_t IdentityHashCode() REQUIRES_SHARED(Locks::mutator_lock_) REQUIRES(!Locks::thread_list_lock_, !Locks::thread_suspend_count_lock_); static MemberOffset MonitorOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); } // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. template LockWord GetLockWord(bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_); template void SetLockWord(LockWord new_val, bool as_volatile) REQUIRES_SHARED(Locks::mutator_lock_); bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) REQUIRES_SHARED(Locks::mutator_lock_); bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) REQUIRES_SHARED(Locks::mutator_lock_); bool CasLockWordWeakAcquire(LockWord old_val, LockWord new_val) REQUIRES_SHARED(Locks::mutator_lock_); bool CasLockWordWeakRelease(LockWord old_val, LockWord new_val) REQUIRES_SHARED(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); // Try to enter the monitor, returns non null if we succeeded. mirror::Object* MonitorTryEnter(Thread* self) EXCLUSIVE_LOCK_FUNCTION() REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); mirror::Object* MonitorEnter(Thread* self) EXCLUSIVE_LOCK_FUNCTION() REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_); bool MonitorExit(Thread* self) REQUIRES(!Roles::uninterruptible_) REQUIRES_SHARED(Locks::mutator_lock_) UNLOCK_FUNCTION(); void Notify(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); void NotifyAll(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); void Wait(Thread* self) REQUIRES_SHARED(Locks::mutator_lock_); void Wait(Thread* self, int64_t timeout, int32_t nanos) REQUIRES_SHARED(Locks::mutator_lock_); template bool IsClass() REQUIRES_SHARED(Locks::mutator_lock_); template Class* AsClass() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_); template ObjectArray* AsObjectArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_); template ClassLoader* AsClassLoader() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsDexCache() REQUIRES_SHARED(Locks::mutator_lock_); template DexCache* AsDexCache() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsArrayInstance() REQUIRES_SHARED(Locks::mutator_lock_); template Array* AsArray() REQUIRES_SHARED(Locks::mutator_lock_); template BooleanArray* AsBooleanArray() REQUIRES_SHARED(Locks::mutator_lock_); template ByteArray* AsByteArray() REQUIRES_SHARED(Locks::mutator_lock_); template ByteArray* AsByteSizedArray() REQUIRES_SHARED(Locks::mutator_lock_); template CharArray* AsCharArray() REQUIRES_SHARED(Locks::mutator_lock_); template ShortArray* AsShortArray() REQUIRES_SHARED(Locks::mutator_lock_); template ShortArray* AsShortSizedArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsIntArray() REQUIRES_SHARED(Locks::mutator_lock_); template IntArray* AsIntArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsLongArray() REQUIRES_SHARED(Locks::mutator_lock_); template LongArray* AsLongArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_); template FloatArray* AsFloatArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_); template DoubleArray* AsDoubleArray() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsString() REQUIRES_SHARED(Locks::mutator_lock_); template String* AsString() REQUIRES_SHARED(Locks::mutator_lock_); template Throwable* AsThrowable() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); template Reference* AsReference() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsWeakReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsSoftReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsFinalizerReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); template FinalizerReference* AsFinalizerReference() REQUIRES_SHARED(Locks::mutator_lock_); template bool IsPhantomReferenceInstance() REQUIRES_SHARED(Locks::mutator_lock_); // Accessor for Java type fields. template ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakReleaseObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongRelaxedObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongReleaseObjectWithoutWriteBarrier(MemberOffset field_offset, ObjPtr old_value, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); template HeapReference* GetFieldObjectReferenceAddr(MemberOffset field_offset); template ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } return GetField(field_offset); } template ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { return GetField32(field_offset); } template ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakAcquire32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakRelease32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { if (kVerifyFlags & kVerifyThis) { VerifyObject(this); } return GetField(field_offset); } template ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { return GetField64(field_offset); } template ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) REQUIRES_SHARED(Locks::mutator_lock_); template void SetFieldPtr(MemberOffset field_offset, T new_value) REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldPtrWithSize( field_offset, new_value, kRuntimePointerSize); } template void SetFieldPtr64(MemberOffset field_offset, T new_value) REQUIRES_SHARED(Locks::mutator_lock_) { SetFieldPtrWithSize( field_offset, new_value, 8u); } template ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { if (pointer_size == PointerSize::k32) { uintptr_t ptr = reinterpret_cast(new_value); DCHECK_EQ(static_cast(ptr), ptr); // Check that we dont lose any non 0 bits. SetField32( field_offset, static_cast(static_cast(ptr))); } else { SetField64( field_offset, reinterpret_cast64(new_value)); } } // TODO fix thread safety analysis broken by the use of template. This should be // REQUIRES_SHARED(Locks::mutator_lock_). template void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor) NO_THREAD_SAFETY_ANALYSIS; ArtField* FindFieldByOffset(MemberOffset offset) REQUIRES_SHARED(Locks::mutator_lock_); // Used by object_test. static void SetHashCodeSeed(uint32_t new_seed); // Generate an identity hash code. Public for object test. static uint32_t GenerateIdentityHashCode(); // Returns a human-readable form of the name of the *class* of the given object. // So given an instance of java.lang.String, the output would // be "java.lang.String". Given an array of int, the output would be "int[]". // Given String.class, the output would be "java.lang.Class". static std::string PrettyTypeOf(ObjPtr obj) REQUIRES_SHARED(Locks::mutator_lock_); std::string PrettyTypeOf() REQUIRES_SHARED(Locks::mutator_lock_); protected: // Accessors for non-Java type fields template T GetFieldPtr(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldPtrWithSize(field_offset, kRuntimePointerSize); } template T GetFieldPtr64(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { return GetFieldPtrWithSize(field_offset, PointerSize::k64); } template ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, PointerSize pointer_size) REQUIRES_SHARED(Locks::mutator_lock_) { if (pointer_size == PointerSize::k32) { uint64_t address = static_cast(GetField32(field_offset)); return reinterpret_cast(static_cast(address)); } else { int64_t v = GetField64(field_offset); return reinterpret_cast64(v); } } // TODO: Fixme when anotatalysis works with visitors. template void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) HOT_ATTR NO_THREAD_SAFETY_ANALYSIS; template void VisitInstanceFieldsReferences(ObjPtr klass, const Visitor& visitor) HOT_ATTR REQUIRES_SHARED(Locks::mutator_lock_); template void VisitStaticFieldsReferences(ObjPtr klass, const Visitor& visitor) HOT_ATTR REQUIRES_SHARED(Locks::mutator_lock_); private: template ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value) REQUIRES_SHARED(Locks::mutator_lock_) { uint8_t* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); kSize* addr = reinterpret_cast(raw_addr); if (kIsVolatile) { reinterpret_cast*>(addr)->StoreSequentiallyConsistent(new_value); } else { reinterpret_cast*>(addr)->StoreJavaData(new_value); } } template ALWAYS_INLINE kSize GetField(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_) { const uint8_t* raw_addr = reinterpret_cast(this) + field_offset.Int32Value(); const kSize* addr = reinterpret_cast(raw_addr); if (kIsVolatile) { return reinterpret_cast*>(addr)->LoadSequentiallyConsistent(); } else { return reinterpret_cast*>(addr)->LoadJavaData(); } } // Get a field with acquire semantics. template ALWAYS_INLINE kSize GetFieldAcquire(MemberOffset field_offset) REQUIRES_SHARED(Locks::mutator_lock_); // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. void CheckFieldAssignmentImpl(MemberOffset field_offset, ObjPtr new_value) REQUIRES_SHARED(Locks::mutator_lock_); void CheckFieldAssignment(MemberOffset field_offset, ObjPtrnew_value) REQUIRES_SHARED(Locks::mutator_lock_) { if (kCheckFieldAssignments) { CheckFieldAssignmentImpl(field_offset, new_value); } } // A utility function that copies an object in a read barrier and write barrier-aware way. // This is internally used by Clone() and Class::CopyOf(). If the object is finalizable, // it is the callers job to call Heap::AddFinalizerReference. static Object* CopyObject(ObjPtr dest, ObjPtr src, size_t num_bytes) REQUIRES_SHARED(Locks::mutator_lock_); static Atomic hash_code_seed; // The Class representing the type of the object. HeapReference klass_; // Monitor and hash code information. uint32_t monitor_; #ifdef USE_BROOKS_READ_BARRIER // Note names use a 'x' prefix and the x_rb_ptr_ is of type int // instead of Object to go with the alphabetical/by-type field order // on the Java side. uint32_t x_rb_ptr_; // For the Brooks pointer. uint32_t x_xpadding_; // For 8-byte alignment. TODO: get rid of this. #endif friend class art::ImageWriter; friend class art::Monitor; friend struct art::ObjectOffsets; // for verifying offset information friend class CopyObjectVisitor; // for CopyObject(). friend class CopyClassVisitor; // for CopyObject(). DISALLOW_ALLOCATION(); DISALLOW_IMPLICIT_CONSTRUCTORS(Object); }; } // namespace mirror } // namespace art #endif // ART_RUNTIME_MIRROR_OBJECT_H_