1 /*
2  * Copyright (C) 2014 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_
18 #define ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_
19 
20 #include "base/bit_utils.h"
21 #include "base/mutex.h"
22 #include "gc/space/space.h"
23 #include "globals.h"
24 #include "mem_map.h"
25 
26 namespace art {
27 namespace gc {
28 namespace accounting {
29 
30 // Used to decide whether to take the read barrier fast/slow paths for
31 // kUseTableLookupReadBarrier. If an entry is set, take the read
32 // barrier slow path. There's an entry per region.
33 class ReadBarrierTable {
34  public:
ReadBarrierTable()35   ReadBarrierTable() {
36     size_t capacity = static_cast<size_t>(kHeapCapacity / kRegionSize);
37     DCHECK_EQ(kHeapCapacity / kRegionSize,
38               static_cast<uint64_t>(static_cast<size_t>(kHeapCapacity / kRegionSize)));
39     std::string error_msg;
40     MemMap* mem_map = MemMap::MapAnonymous("read barrier table", nullptr, capacity,
41                                            PROT_READ | PROT_WRITE, false, false, &error_msg);
42     CHECK(mem_map != nullptr && mem_map->Begin() != nullptr)
43         << "couldn't allocate read barrier table: " << error_msg;
44     mem_map_.reset(mem_map);
45   }
ClearForSpace(space::ContinuousSpace * space)46   void ClearForSpace(space::ContinuousSpace* space) {
47     uint8_t* entry_start = EntryFromAddr(space->Begin());
48     uint8_t* entry_end = EntryFromAddr(space->Limit());
49     memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
50   }
Clear(uint8_t * start_addr,uint8_t * end_addr)51   void Clear(uint8_t* start_addr, uint8_t* end_addr) {
52     DCHECK(IsValidHeapAddr(start_addr)) << start_addr;
53     DCHECK(IsValidHeapAddr(end_addr)) << end_addr;
54     DCHECK_ALIGNED(start_addr, kRegionSize);
55     DCHECK_ALIGNED(end_addr, kRegionSize);
56     uint8_t* entry_start = EntryFromAddr(start_addr);
57     uint8_t* entry_end = EntryFromAddr(end_addr);
58     memset(reinterpret_cast<void*>(entry_start), 0, entry_end - entry_start);
59   }
IsSet(const void * heap_addr)60   bool IsSet(const void* heap_addr) const {
61     DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
62     uint8_t entry_value = *EntryFromAddr(heap_addr);
63     DCHECK(entry_value == 0 || entry_value == kSetEntryValue);
64     return entry_value == kSetEntryValue;
65   }
ClearAll()66   void ClearAll() {
67     mem_map_->MadviseDontNeedAndZero();
68   }
SetAll()69   void SetAll() {
70     memset(mem_map_->Begin(), kSetEntryValue, mem_map_->Size());
71   }
IsAllCleared()72   bool IsAllCleared() const {
73     for (uint32_t* p = reinterpret_cast<uint32_t*>(mem_map_->Begin());
74          p < reinterpret_cast<uint32_t*>(mem_map_->End()); ++p) {
75       if (*p != 0) {
76         return false;
77       }
78     }
79     return true;
80   }
81 
82   // This should match RegionSpace::kRegionSize. static_assert'ed in concurrent_copying.h.
83   static constexpr size_t kRegionSize = 1 * MB;
84 
85  private:
86   static constexpr uint64_t kHeapCapacity = 4ULL * GB;  // low 4gb.
87   static constexpr uint8_t kSetEntryValue = 0x01;
88 
EntryFromAddr(const void * heap_addr)89   uint8_t* EntryFromAddr(const void* heap_addr) const {
90     DCHECK(IsValidHeapAddr(heap_addr)) << heap_addr;
91     uint8_t* entry_addr = mem_map_->Begin() + reinterpret_cast<uintptr_t>(heap_addr) / kRegionSize;
92     DCHECK(IsValidEntry(entry_addr)) << "heap_addr: " << heap_addr
93                                      << " entry_addr: " << reinterpret_cast<void*>(entry_addr);
94     return entry_addr;
95   }
96 
IsValidHeapAddr(const void * heap_addr)97   bool IsValidHeapAddr(const void* heap_addr) const {
98 #ifdef __LP64__
99     return reinterpret_cast<uint64_t>(heap_addr) < kHeapCapacity;
100 #else
101     UNUSED(heap_addr);
102     return true;
103 #endif
104   }
105 
IsValidEntry(const uint8_t * entry_addr)106   bool IsValidEntry(const uint8_t* entry_addr) const {
107     uint8_t* begin = mem_map_->Begin();
108     uint8_t* end = mem_map_->End();
109     return entry_addr >= begin && entry_addr < end;
110   }
111 
112   std::unique_ptr<MemMap> mem_map_;
113 };
114 
115 }  // namespace accounting
116 }  // namespace gc
117 }  // namespace art
118 
119 #endif  // ART_RUNTIME_GC_ACCOUNTING_READ_BARRIER_TABLE_H_
120