1 /*
2  * Copyright (C) 2011 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_ART_METHOD_H_
18 #define ART_RUNTIME_ART_METHOD_H_
19 
20 #include "dex_file.h"
21 #include "gc_root.h"
22 #include "invoke_type.h"
23 #include "method_reference.h"
24 #include "modifiers.h"
25 #include "mirror/object.h"
26 #include "object_callbacks.h"
27 #include "quick/quick_method_frame_info.h"
28 #include "read_barrier_option.h"
29 #include "stack.h"
30 #include "stack_map.h"
31 #include "utils.h"
32 
33 namespace art {
34 
35 union JValue;
36 class ScopedObjectAccessAlreadyRunnable;
37 class StringPiece;
38 class ShadowFrame;
39 
40 namespace mirror {
41 class Array;
42 class Class;
43 class PointerArray;
44 }  // namespace mirror
45 
46 typedef void (EntryPointFromInterpreter)(Thread* self, const DexFile::CodeItem* code_item,
47                                          ShadowFrame* shadow_frame, JValue* result);
48 
49 class ArtMethod FINAL {
50  public:
ArtMethod()51   ArtMethod() : access_flags_(0), dex_code_item_offset_(0), dex_method_index_(0),
52       method_index_(0) { }
53 
ArtMethod(const ArtMethod & src,size_t image_pointer_size)54   ArtMethod(const ArtMethod& src, size_t image_pointer_size) {
55     CopyFrom(&src, image_pointer_size);
56   }
57 
58   static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa,
59                                         jobject jlr_method)
60       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
61 
62   ALWAYS_INLINE mirror::Class* GetDeclaringClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
63 
64   ALWAYS_INLINE mirror::Class* GetDeclaringClassNoBarrier()
65       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
66 
67   ALWAYS_INLINE mirror::Class* GetDeclaringClassUnchecked()
68       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
69 
70   void SetDeclaringClass(mirror::Class *new_declaring_class)
71       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
72 
DeclaringClassOffset()73   static MemberOffset DeclaringClassOffset() {
74     return MemberOffset(OFFSETOF_MEMBER(ArtMethod, declaring_class_));
75   }
76 
77   ALWAYS_INLINE uint32_t GetAccessFlags() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
78 
SetAccessFlags(uint32_t new_access_flags)79   void SetAccessFlags(uint32_t new_access_flags) {
80     // Not called within a transaction.
81     access_flags_ = new_access_flags;
82   }
83 
84   // Approximate what kind of method call would be used for this method.
85   InvokeType GetInvokeType() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
86 
87   // Returns true if the method is declared public.
IsPublic()88   bool IsPublic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
89     return (GetAccessFlags() & kAccPublic) != 0;
90   }
91 
92   // Returns true if the method is declared private.
IsPrivate()93   bool IsPrivate() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
94     return (GetAccessFlags() & kAccPrivate) != 0;
95   }
96 
97   // Returns true if the method is declared static.
IsStatic()98   bool IsStatic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
99     return (GetAccessFlags() & kAccStatic) != 0;
100   }
101 
102   // Returns true if the method is a constructor.
IsConstructor()103   bool IsConstructor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
104     return (GetAccessFlags() & kAccConstructor) != 0;
105   }
106 
107   // Returns true if the method is a class initializer.
IsClassInitializer()108   bool IsClassInitializer() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
109     return IsConstructor() && IsStatic();
110   }
111 
112   // Returns true if the method is static, private, or a constructor.
IsDirect()113   bool IsDirect() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
114     return IsDirect(GetAccessFlags());
115   }
116 
IsDirect(uint32_t access_flags)117   static bool IsDirect(uint32_t access_flags) {
118     return (access_flags & (kAccStatic | kAccPrivate | kAccConstructor)) != 0;
119   }
120 
121   // Returns true if the method is declared synchronized.
IsSynchronized()122   bool IsSynchronized() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
123     uint32_t synchonized = kAccSynchronized | kAccDeclaredSynchronized;
124     return (GetAccessFlags() & synchonized) != 0;
125   }
126 
IsFinal()127   bool IsFinal() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
128     return (GetAccessFlags() & kAccFinal) != 0;
129   }
130 
IsMiranda()131   bool IsMiranda() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
132     return (GetAccessFlags() & kAccMiranda) != 0;
133   }
134 
IsNative()135   bool IsNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
136     return (GetAccessFlags() & kAccNative) != 0;
137   }
138 
ShouldNotInline()139   bool ShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
140     return (GetAccessFlags() & kAccDontInline) != 0;
141   }
142 
SetShouldNotInline()143   void SetShouldNotInline() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
144     SetAccessFlags(GetAccessFlags() | kAccDontInline);
145   }
146 
IsFastNative()147   bool IsFastNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
148     uint32_t mask = kAccFastNative | kAccNative;
149     return (GetAccessFlags() & mask) == mask;
150   }
151 
IsAbstract()152   bool IsAbstract() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
153     return (GetAccessFlags() & kAccAbstract) != 0;
154   }
155 
IsSynthetic()156   bool IsSynthetic() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
157     return (GetAccessFlags() & kAccSynthetic) != 0;
158   }
159 
160   bool IsProxyMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
161 
IsPreverified()162   bool IsPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
163     return (GetAccessFlags() & kAccPreverified) != 0;
164   }
165 
SetPreverified()166   void SetPreverified() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
167     DCHECK(!IsPreverified());
168     SetAccessFlags(GetAccessFlags() | kAccPreverified);
169   }
170 
IsOptimized(size_t pointer_size)171   bool IsOptimized(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
172     // Temporary solution for detecting if a method has been optimized: the compiler
173     // does not create a GC map. Instead, the vmap table contains the stack map
174     // (as in stack_map.h).
175     return !IsNative()
176         && GetEntryPointFromQuickCompiledCodePtrSize(pointer_size) != nullptr
177         && GetQuickOatCodePointer(pointer_size) != nullptr
178         && GetNativeGcMap(pointer_size) == nullptr;
179   }
180 
181   bool CheckIncompatibleClassChange(InvokeType type) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
182 
183   uint16_t GetMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
184 
185   // Doesn't do erroneous / unresolved class checks.
186   uint16_t GetMethodIndexDuringLinking() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
187 
GetVtableIndex()188   size_t GetVtableIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
189     return GetMethodIndex();
190   }
191 
SetMethodIndex(uint16_t new_method_index)192   void SetMethodIndex(uint16_t new_method_index) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
193     // Not called within a transaction.
194     method_index_ = new_method_index;
195   }
196 
DexMethodIndexOffset()197   static MemberOffset DexMethodIndexOffset() {
198     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_method_index_);
199   }
200 
MethodIndexOffset()201   static MemberOffset MethodIndexOffset() {
202     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_);
203   }
204 
GetCodeItemOffset()205   uint32_t GetCodeItemOffset() {
206     return dex_code_item_offset_;
207   }
208 
SetCodeItemOffset(uint32_t new_code_off)209   void SetCodeItemOffset(uint32_t new_code_off) {
210     // Not called within a transaction.
211     dex_code_item_offset_ = new_code_off;
212   }
213 
214   // Number of 32bit registers that would be required to hold all the arguments
215   static size_t NumArgRegisters(const StringPiece& shorty);
216 
217   ALWAYS_INLINE uint32_t GetDexMethodIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
218 
SetDexMethodIndex(uint32_t new_idx)219   void SetDexMethodIndex(uint32_t new_idx) {
220     // Not called within a transaction.
221     dex_method_index_ = new_idx;
222   }
223 
DexCacheResolvedMethodsOffset()224   static MemberOffset DexCacheResolvedMethodsOffset() {
225     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_methods_);
226   }
227 
DexCacheResolvedTypesOffset()228   static MemberOffset DexCacheResolvedTypesOffset() {
229     return OFFSET_OF_OBJECT_MEMBER(ArtMethod, dex_cache_resolved_types_);
230   }
231 
232   ALWAYS_INLINE mirror::PointerArray* GetDexCacheResolvedMethods()
233       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
234   ALWAYS_INLINE ArtMethod* GetDexCacheResolvedMethod(uint16_t method_idx, size_t ptr_size)
235       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
236   ALWAYS_INLINE void SetDexCacheResolvedMethod(uint16_t method_idx, ArtMethod* new_method,
237                                                size_t ptr_size)
238       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
239   ALWAYS_INLINE void SetDexCacheResolvedMethods(mirror::PointerArray* new_dex_cache_methods)
240       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
241   bool HasDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
242   bool HasSameDexCacheResolvedMethods(ArtMethod* other)
243       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
244   bool HasSameDexCacheResolvedMethods(mirror::PointerArray* other_cache)
245       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
246 
247   template <bool kWithCheck = true>
248   mirror::Class* GetDexCacheResolvedType(uint32_t type_idx)
249       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
250   void SetDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* new_dex_cache_types)
251       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
252   bool HasDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
253   bool HasSameDexCacheResolvedTypes(ArtMethod* other) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
254   bool HasSameDexCacheResolvedTypes(mirror::ObjectArray<mirror::Class>* other_cache)
255       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
256 
257   // Get the Class* from the type index into this method's dex cache.
258   mirror::Class* GetClassFromTypeIndex(uint16_t type_idx, bool resolve)
259       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
260 
261   // Find the method that this method overrides.
262   ArtMethod* FindOverriddenMethod(size_t pointer_size) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
263 
264   // Find the method index for this method within other_dexfile. If this method isn't present then
265   // return DexFile::kDexNoIndex. The name_and_signature_idx MUST refer to a MethodId with the same
266   // name and signature in the other_dexfile, such as the method index used to resolve this method
267   // in the other_dexfile.
268   uint32_t FindDexMethodIndexInOtherDexFile(const DexFile& other_dexfile,
269                                             uint32_t name_and_signature_idx)
270       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
271 
272   void Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result, const char* shorty)
273       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
274 
GetEntryPointFromInterpreter()275   EntryPointFromInterpreter* GetEntryPointFromInterpreter() {
276     return GetEntryPointFromInterpreterPtrSize(sizeof(void*));
277   }
GetEntryPointFromInterpreterPtrSize(size_t pointer_size)278   EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) {
279     return GetEntryPoint<EntryPointFromInterpreter*>(
280         EntryPointFromInterpreterOffset(pointer_size), pointer_size);
281   }
282 
SetEntryPointFromInterpreter(EntryPointFromInterpreter * entry_point_from_interpreter)283   void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) {
284     SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*));
285   }
SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter * entry_point_from_interpreter,size_t pointer_size)286   void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter,
287                                            size_t pointer_size) {
288     SetEntryPoint(EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter,
289                   pointer_size);
290   }
291 
GetEntryPointFromQuickCompiledCode()292   const void* GetEntryPointFromQuickCompiledCode() {
293     return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*));
294   }
GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size)295   ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) {
296     return GetEntryPoint<const void*>(
297         EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size);
298   }
299 
SetEntryPointFromQuickCompiledCode(const void * entry_point_from_quick_compiled_code)300   void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) {
301     SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code,
302                                               sizeof(void*));
303   }
SetEntryPointFromQuickCompiledCodePtrSize(const void * entry_point_from_quick_compiled_code,size_t pointer_size)304   ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize(
305       const void* entry_point_from_quick_compiled_code, size_t pointer_size) {
306     SetEntryPoint(EntryPointFromQuickCompiledCodeOffset(pointer_size),
307                   entry_point_from_quick_compiled_code, pointer_size);
308   }
309 
310   uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
311 
312   // Check whether the given PC is within the quick compiled code associated with this method's
313   // quick entrypoint. This code isn't robust for instrumentation, etc. and is only used for
314   // debug purposes.
PcIsWithinQuickCode(uintptr_t pc)315   bool PcIsWithinQuickCode(uintptr_t pc) {
316     return PcIsWithinQuickCode(
317         reinterpret_cast<uintptr_t>(GetEntryPointFromQuickCompiledCode()), pc);
318   }
319 
320   void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
321 
322   // Returns true if the entrypoint points to the interpreter, as
323   // opposed to the compiled code, that is, this method will be
324   // interpretered on invocation.
325   bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
326 
327   uint32_t GetQuickOatCodeOffset();
328   void SetQuickOatCodeOffset(uint32_t code_offset);
329 
EntryPointToCodePointer(const void * entry_point)330   ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) {
331     uintptr_t code = reinterpret_cast<uintptr_t>(entry_point);
332     // TODO: Make this Thumb2 specific. It is benign on other architectures as code is always at
333     //       least 2 byte aligned.
334     code &= ~0x1;
335     return reinterpret_cast<const void*>(code);
336   }
337 
338   // Actual entry point pointer to compiled oat code or null.
339   const void* GetQuickOatEntryPoint(size_t pointer_size)
340       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
341   // Actual pointer to compiled oat code or null.
GetQuickOatCodePointer(size_t pointer_size)342   const void* GetQuickOatCodePointer(size_t pointer_size)
343       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
344     return EntryPointToCodePointer(GetQuickOatEntryPoint(pointer_size));
345   }
346 
347   // Callers should wrap the uint8_t* in a MappingTable instance for convenient access.
348   const uint8_t* GetMappingTable(size_t pointer_size)
349       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
350   const uint8_t* GetMappingTable(const void* code_pointer, size_t pointer_size)
351       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
352 
353   // Callers should wrap the uint8_t* in a VmapTable instance for convenient access.
354   const uint8_t* GetVmapTable(size_t pointer_size)
355       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
356   const uint8_t* GetVmapTable(const void* code_pointer, size_t pointer_size)
357       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
358 
359   CodeInfo GetOptimizedCodeInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
360 
361   // Callers should wrap the uint8_t* in a GcMap instance for convenient access.
362   const uint8_t* GetNativeGcMap(size_t pointer_size)
363       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
364   const uint8_t* GetNativeGcMap(const void* code_pointer, size_t pointer_size)
365       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
366 
367   template <bool kCheckFrameSize = true>
GetFrameSizeInBytes()368   uint32_t GetFrameSizeInBytes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
369     uint32_t result = GetQuickFrameInfo().FrameSizeInBytes();
370     if (kCheckFrameSize) {
371       DCHECK_LE(static_cast<size_t>(kStackAlignment), result);
372     }
373     return result;
374   }
375 
376   QuickMethodFrameInfo GetQuickFrameInfo() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
377   QuickMethodFrameInfo GetQuickFrameInfo(const void* code_pointer)
378       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
379 
GetReturnPcOffset()380   FrameOffset GetReturnPcOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
381     return GetReturnPcOffset(GetFrameSizeInBytes());
382   }
383 
GetReturnPcOffset(uint32_t frame_size_in_bytes)384   FrameOffset GetReturnPcOffset(uint32_t frame_size_in_bytes)
385       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
386     DCHECK_EQ(frame_size_in_bytes, GetFrameSizeInBytes());
387     return FrameOffset(frame_size_in_bytes - sizeof(void*));
388   }
389 
GetHandleScopeOffset()390   FrameOffset GetHandleScopeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
391     constexpr size_t handle_scope_offset = sizeof(ArtMethod*);
392     DCHECK_LT(handle_scope_offset, GetFrameSizeInBytes());
393     return FrameOffset(handle_scope_offset);
394   }
395 
396   void RegisterNative(const void* native_method, bool is_fast)
397       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
398 
399   void UnregisterNative() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
400 
EntryPointFromInterpreterOffset(size_t pointer_size)401   static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) {
402     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
403         PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size);
404   }
405 
EntryPointFromJniOffset(size_t pointer_size)406   static MemberOffset EntryPointFromJniOffset(size_t pointer_size) {
407     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
408         PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size);
409   }
410 
EntryPointFromQuickCompiledCodeOffset(size_t pointer_size)411   static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) {
412     return MemberOffset(PtrSizedFieldsOffset(pointer_size) + OFFSETOF_MEMBER(
413         PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size);
414   }
415 
GetEntryPointFromJni()416   void* GetEntryPointFromJni() {
417     return GetEntryPointFromJniPtrSize(sizeof(void*));
418   }
GetEntryPointFromJniPtrSize(size_t pointer_size)419   ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) {
420     return GetEntryPoint<void*>(EntryPointFromJniOffset(pointer_size), pointer_size);
421   }
422 
SetEntryPointFromJni(const void * entrypoint)423   void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
424     SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*));
425   }
SetEntryPointFromJniPtrSize(const void * entrypoint,size_t pointer_size)426   ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) {
427     SetEntryPoint(EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size);
428   }
429 
430   // Is this a CalleSaveMethod or ResolutionMethod and therefore doesn't adhere to normal
431   // conventions for a method of managed code. Returns false for Proxy methods.
432   ALWAYS_INLINE bool IsRuntimeMethod();
433 
434   // Is this a hand crafted method used for something like describing callee saves?
435   bool IsCalleeSaveMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
436 
437   bool IsResolutionMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
438 
439   bool IsImtConflictMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
440 
441   bool IsImtUnimplementedMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
442 
443   uintptr_t NativeQuickPcOffset(const uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
444 #ifdef NDEBUG
NativeQuickPcOffset(const uintptr_t pc,const void * quick_entry_point)445   uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
446       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
447     return pc - reinterpret_cast<uintptr_t>(quick_entry_point);
448   }
449 #else
450   uintptr_t NativeQuickPcOffset(const uintptr_t pc, const void* quick_entry_point)
451       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
452 #endif
453 
454   // Converts a native PC to a dex PC.
455   uint32_t ToDexPc(const uintptr_t pc, bool abort_on_failure = true)
456       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
457 
458   // Converts a dex PC to a native PC.
459   uintptr_t ToNativeQuickPc(const uint32_t dex_pc, bool abort_on_failure = true)
460       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
461 
ToMethodReference()462   MethodReference ToMethodReference() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
463     return MethodReference(GetDexFile(), GetDexMethodIndex());
464   }
465 
466   // Find the catch block for the given exception type and dex_pc. When a catch block is found,
467   // indicates whether the found catch block is responsible for clearing the exception or whether
468   // a move-exception instruction is present.
469   uint32_t FindCatchBlock(Handle<mirror::Class> exception_type, uint32_t dex_pc,
470                           bool* has_no_move_exception)
471       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
472 
473   template<typename RootVisitorType>
474   void VisitRoots(RootVisitorType& visitor) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
475 
476   const DexFile* GetDexFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
477 
478   const char* GetDeclaringClassDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
479 
GetShorty()480   const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
481     uint32_t unused_length;
482     return GetShorty(&unused_length);
483   }
484 
485   const char* GetShorty(uint32_t* out_length) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
486 
487   const Signature GetSignature() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
488 
489   ALWAYS_INLINE const char* GetName() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
490 
491   mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
492 
493   const DexFile::CodeItem* GetCodeItem() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
494 
495   bool IsResolvedTypeIdx(uint16_t type_idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
496 
497   int32_t GetLineNumFromDexPC(uint32_t dex_pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
498 
499   const DexFile::ProtoId& GetPrototype() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
500 
501   const DexFile::TypeList* GetParameterTypeList() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
502 
503   const char* GetDeclaringClassSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
504 
505   uint16_t GetClassDefIndex() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
506 
507   const DexFile::ClassDef& GetClassDef() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
508 
509   const char* GetReturnTypeDescriptor() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
510 
511   const char* GetTypeDescriptorFromTypeIdx(uint16_t type_idx)
512       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
513 
514   // May cause thread suspension due to GetClassFromTypeIdx calling ResolveType this caused a large
515   // number of bugs at call sites.
516   mirror::Class* GetReturnType(bool resolve = true) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
517 
518   mirror::ClassLoader* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
519 
520   mirror::DexCache* GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
521 
522   ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy(size_t pointer_size)
523       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
524 
525   // May cause thread suspension due to class resolution.
526   bool EqualParameters(Handle<mirror::ObjectArray<mirror::Class>> params)
527       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
528 
529   // Size of an instance of this object.
ObjectSize(size_t pointer_size)530   static size_t ObjectSize(size_t pointer_size) {
531     return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size) +
532         (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size;
533   }
534 
535   void CopyFrom(const ArtMethod* src, size_t image_pointer_size)
536       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
537 
538   ALWAYS_INLINE mirror::ObjectArray<mirror::Class>* GetDexCacheResolvedTypes()
539       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
540 
541  protected:
542   // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses".
543   // The class we are a part of.
544   GcRoot<mirror::Class> declaring_class_;
545 
546   // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
547   GcRoot<mirror::PointerArray> dex_cache_resolved_methods_;
548 
549   // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access.
550   GcRoot<mirror::ObjectArray<mirror::Class>> dex_cache_resolved_types_;
551 
552   // Access flags; low 16 bits are defined by spec.
553   uint32_t access_flags_;
554 
555   /* Dex file fields. The defining dex file is available via declaring_class_->dex_cache_ */
556 
557   // Offset to the CodeItem.
558   uint32_t dex_code_item_offset_;
559 
560   // Index into method_ids of the dex file associated with this method.
561   uint32_t dex_method_index_;
562 
563   /* End of dex file fields. */
564 
565   // Entry within a dispatch table for this method. For static/direct methods the index is into
566   // the declaringClass.directMethods, for virtual methods the vtable and for interface methods the
567   // ifTable.
568   uint32_t method_index_;
569 
570   // Fake padding field gets inserted here.
571 
572   // Must be the last fields in the method.
573   // PACKED(4) is necessary for the correctness of
574   // RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size).
575   struct PACKED(4) PtrSizedFields {
576     // Method dispatch from the interpreter invokes this pointer which may cause a bridge into
577     // compiled code.
578     void* entry_point_from_interpreter_;
579 
580     // Pointer to JNI function registered to this method, or a function to resolve the JNI function.
581     void* entry_point_from_jni_;
582 
583     // Method dispatch from quick compiled code invokes this pointer which may cause bridging into
584     // the interpreter.
585     void* entry_point_from_quick_compiled_code_;
586   } ptr_sized_fields_;
587 
588  private:
PtrSizedFieldsOffset(size_t pointer_size)589   static size_t PtrSizedFieldsOffset(size_t pointer_size) {
590     // Round up to pointer size for padding field.
591     return RoundUp(OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_), pointer_size);
592   }
593 
594   template<typename T>
GetEntryPoint(MemberOffset offset,size_t pointer_size)595   ALWAYS_INLINE T GetEntryPoint(MemberOffset offset, size_t pointer_size) const {
596     DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
597     const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
598     if (pointer_size == sizeof(uint32_t)) {
599       return reinterpret_cast<T>(*reinterpret_cast<const uint32_t*>(addr));
600     } else {
601       auto v = *reinterpret_cast<const uint64_t*>(addr);
602       DCHECK_EQ(reinterpret_cast<uint64_t>(reinterpret_cast<T>(v)), v) << "Conversion lost bits";
603       return reinterpret_cast<T>(v);
604     }
605   }
606 
607   template<typename T>
SetEntryPoint(MemberOffset offset,T new_value,size_t pointer_size)608   ALWAYS_INLINE void SetEntryPoint(MemberOffset offset, T new_value, size_t pointer_size) {
609     DCHECK(ValidPointerSize(pointer_size)) << pointer_size;
610     const auto addr = reinterpret_cast<uintptr_t>(this) + offset.Uint32Value();
611     if (pointer_size == sizeof(uint32_t)) {
612       uintptr_t ptr = reinterpret_cast<uintptr_t>(new_value);
613       DCHECK_EQ(static_cast<uint32_t>(ptr), ptr) << "Conversion lost bits";
614       *reinterpret_cast<uint32_t*>(addr) = static_cast<uint32_t>(ptr);
615     } else {
616       *reinterpret_cast<uint64_t*>(addr) = reinterpret_cast<uintptr_t>(new_value);
617     }
618   }
619 
620   // Code points to the start of the quick code.
621   static uint32_t GetCodeSize(const void* code);
622 
PcIsWithinQuickCode(uintptr_t code,uintptr_t pc)623   static bool PcIsWithinQuickCode(uintptr_t code, uintptr_t pc) {
624     if (code == 0) {
625       return pc == 0;
626     }
627     /*
628      * During a stack walk, a return PC may point past-the-end of the code
629      * in the case that the last instruction is a call that isn't expected to
630      * return.  Thus, we check <= code + GetCodeSize().
631      *
632      * NOTE: For Thumb both pc and code are offset by 1 indicating the Thumb state.
633      */
634     return code <= pc && pc <= code + GetCodeSize(
635         EntryPointToCodePointer(reinterpret_cast<const void*>(code)));
636   }
637 
638   DISALLOW_COPY_AND_ASSIGN(ArtMethod);  // Need to use CopyFrom to deal with 32 vs 64 bits.
639 };
640 
641 }  // namespace art
642 
643 #endif  // ART_RUNTIME_ART_METHOD_H_
644