/* * Copyright (C) 2011 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef ART_RUNTIME_MIRROR_OBJECT_H_ #define ART_RUNTIME_MIRROR_OBJECT_H_ #include "globals.h" #include "object_reference.h" #include "offsets.h" #include "verify_object.h" namespace art { class ArtField; class ArtMethod; class ImageWriter; class LockWord; class Monitor; struct ObjectOffsets; class Thread; class VoidFunctor; namespace mirror { class Array; class Class; class FinalizerReference; template class ObjectArray; template class PrimitiveArray; typedef PrimitiveArray BooleanArray; typedef PrimitiveArray ByteArray; typedef PrimitiveArray CharArray; typedef PrimitiveArray DoubleArray; typedef PrimitiveArray FloatArray; typedef PrimitiveArray IntArray; typedef PrimitiveArray LongArray; typedef PrimitiveArray ShortArray; class Reference; class String; class Throwable; // Fields within mirror objects aren't accessed directly so that the appropriate amount of // handshaking is done with GC (for example, read and write barriers). This macro is used to // compute an offset for the Set/Get methods defined in Object that can safely access fields. #define OFFSET_OF_OBJECT_MEMBER(type, field) \ MemberOffset(OFFSETOF_MEMBER(type, field)) // Checks that we don't do field assignments which violate the typing system. static constexpr bool kCheckFieldAssignments = false; // Size of Object. static constexpr uint32_t kObjectHeaderSize = kUseBrooksReadBarrier ? 16 : 8; // C++ mirror of java.lang.Object class MANAGED LOCKABLE Object { public: // The number of vtable entries in java.lang.Object. static constexpr size_t kVTableLength = 11; // The size of the java.lang.Class representing a java.lang.Object. static uint32_t ClassSize(size_t pointer_size); // Size of an instance of java.lang.Object. static constexpr uint32_t InstanceSize() { return sizeof(Object); } static MemberOffset ClassOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, klass_); } template ALWAYS_INLINE Class* GetClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template void SetClass(Class* new_klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* GetReadBarrierPointer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif void SetReadBarrierPointer(Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); #ifndef USE_BAKER_OR_BROOKS_READ_BARRIER NO_RETURN #endif bool AtomicSetReadBarrierPointer(Object* expected_rb_ptr, Object* rb_ptr) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void AssertReadBarrierPointer() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // The verifier treats all interfaces as java.lang.Object and relies on runtime checks in // invoke-interface to detect incompatible interface types. template bool VerifierInstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE bool InstanceOf(Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template size_t SizeOf() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); Object* Clone(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); int32_t IdentityHashCode() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) LOCKS_EXCLUDED(Locks::thread_list_lock_, Locks::thread_suspend_count_lock_); static MemberOffset MonitorOffset() { return OFFSET_OF_OBJECT_MEMBER(Object, monitor_); } // As_volatile can be false if the mutators are suspended. This is an optimization since it // avoids the barriers. LockWord GetLockWord(bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetLockWord(LockWord new_val, bool as_volatile) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool CasLockWordWeakSequentiallyConsistent(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); bool CasLockWordWeakRelaxed(LockWord old_val, LockWord new_val) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); uint32_t GetLockOwnerThreadId(); mirror::Object* MonitorEnter(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) EXCLUSIVE_LOCK_FUNCTION(); bool MonitorExit(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) UNLOCK_FUNCTION(); void Notify(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void NotifyAll(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void Wait(Thread* self, int64_t timeout, int32_t nanos) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template Class* AsClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ObjectArray* AsObjectArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsArrayInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template Array* AsArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template BooleanArray* AsBooleanArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ByteArray* AsByteArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ByteArray* AsByteSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template CharArray* AsCharArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ShortArray* AsShortArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ShortArray* AsShortSizedArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template IntArray* AsIntArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template LongArray* AsLongArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template FloatArray* AsFloatArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template DoubleArray* AsDoubleArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template String* AsString() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template Throwable* AsThrowable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template Reference* AsReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsWeakReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsSoftReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsFinalizerReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template FinalizerReference* AsFinalizerReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool IsPhantomReferenceInstance() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Accessor for Java type fields. template ALWAYS_INLINE T* GetFieldObject(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE T* GetFieldObjectVolatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObjectWithoutWriteBarrier(MemberOffset field_offset, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObject(MemberOffset field_offset, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldObjectVolatile(MemberOffset field_offset, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistentObject(MemberOffset field_offset, Object* old_value, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistentObjectWithoutWriteBarrier(MemberOffset field_offset, Object* old_value, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template HeapReference* GetFieldObjectReferenceAddr(MemberOffset field_offset); template ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int32_t GetField32Volatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField32(MemberOffset field_offset, int32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField32Volatile(MemberOffset field_offset, int32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE bool CasFieldWeakSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldWeakRelaxed32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistent32(MemberOffset field_offset, int32_t old_value, int32_t new_value) ALWAYS_INLINE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int64_t GetField64(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE int64_t GetField64Volatile(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField64(MemberOffset field_offset, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE void SetField64Volatile(MemberOffset field_offset, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template bool CasFieldStrongSequentiallyConsistent64(MemberOffset field_offset, int64_t old_value, int64_t new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template void SetFieldPtr(MemberOffset field_offset, T new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { SetFieldPtrWithSize( field_offset, new_value, sizeof(void*)); } template ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { intptr_t ptr = reinterpret_cast(new_value); DCHECK_EQ(static_cast(ptr), ptr); // Check that we dont lose any non 0 bits. SetField32( field_offset, static_cast(ptr)); } else { SetField64( field_offset, static_cast(reinterpret_cast(new_value))); } } // TODO fix thread safety analysis broken by the use of template. This should be // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). template void VisitReferences(const Visitor& visitor, const JavaLangRefVisitor& ref_visitor) NO_THREAD_SAFETY_ANALYSIS; ArtField* FindFieldByOffset(MemberOffset offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Used by object_test. static void SetHashCodeSeed(uint32_t new_seed); // Generate an identity hash code. Public for object test. static uint32_t GenerateIdentityHashCode(); protected: // Accessors for non-Java type fields template T GetFieldPtr(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { return GetFieldPtrWithSize(field_offset, sizeof(void*)); } template ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; if (pointer_size == 4) { return reinterpret_cast(GetField32(field_offset)); } else { int64_t v = GetField64(field_offset); // Check that we dont lose any non 0 bits. DCHECK_EQ(static_cast(static_cast(v)), v); return reinterpret_cast(static_cast(v)); } } // TODO: Fixme when anotatalysis works with visitors. template void VisitFieldsReferences(uint32_t ref_offsets, const Visitor& visitor) HOT_ATTR NO_THREAD_SAFETY_ANALYSIS; template void VisitInstanceFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template void VisitStaticFieldsReferences(mirror::Class* klass, const Visitor& visitor) HOT_ATTR SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); private: template ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); template ALWAYS_INLINE kSize GetField(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); // Verify the type correctness of stores to fields. // TODO: This can cause thread suspension and isn't moving GC safe. void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void CheckFieldAssignment(MemberOffset field_offset, Object* new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { if (kCheckFieldAssignments) { CheckFieldAssignmentImpl(field_offset, new_value); } } // A utility function that copies an object in a read barrier and // write barrier-aware way. This is internally used by Clone() and // Class::CopyOf(). static Object* CopyObject(Thread* self, mirror::Object* dest, mirror::Object* src, size_t num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); static Atomic hash_code_seed; // The Class representing the type of the object. HeapReference klass_; // Monitor and hash code information. uint32_t monitor_; #ifdef USE_BROOKS_READ_BARRIER // Note names use a 'x' prefix and the x_rb_ptr_ is of type int // instead of Object to go with the alphabetical/by-type field order // on the Java side. uint32_t x_rb_ptr_; // For the Brooks pointer. uint32_t x_xpadding_; // For 8-byte alignment. TODO: get rid of this. #endif friend class art::ImageWriter; friend class art::Monitor; friend struct art::ObjectOffsets; // for verifying offset information friend class CopyObjectVisitor; // for CopyObject(). friend class CopyClassVisitor; // for CopyObject(). DISALLOW_ALLOCATION(); DISALLOW_IMPLICIT_CONSTRUCTORS(Object); }; } // namespace mirror } // namespace art #endif // ART_RUNTIME_MIRROR_OBJECT_H_