1 /*
2  * Copyright (C) 2012 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
18 #define ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
19 
20 #include <memory>
21 
22 #include "base/arena_object.h"
23 #include "base/logging.h"
24 #include "dex_instruction_utils.h"
25 #include "global_value_numbering.h"
26 
27 namespace art {
28 
29 class DexFile;
30 
31 // Enable/disable tracking values stored in the FILLED_NEW_ARRAY result.
32 static constexpr bool kLocalValueNumberingEnableFilledNewArrayTracking = true;
33 
34 class LocalValueNumbering : public DeletableArenaObject<kArenaAllocMisc> {
35  private:
36   static constexpr uint16_t kNoValue = GlobalValueNumbering::kNoValue;
37 
38  public:
39   LocalValueNumbering(GlobalValueNumbering* gvn, BasicBlockId id, ScopedArenaAllocator* allocator);
40 
Id()41   BasicBlockId Id() const {
42     return id_;
43   }
44 
45   bool Equals(const LocalValueNumbering& other) const;
46 
IsValueNullChecked(uint16_t value_name)47   bool IsValueNullChecked(uint16_t value_name) const {
48     return null_checked_.find(value_name) != null_checked_.end();
49   }
50 
IsValueDivZeroChecked(uint16_t value_name)51   bool IsValueDivZeroChecked(uint16_t value_name) const {
52     return div_zero_checked_.find(value_name) != div_zero_checked_.end();
53   }
54 
GetSregValue(uint16_t s_reg)55   uint16_t GetSregValue(uint16_t s_reg) const {
56     DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
57     return GetSregValueImpl(s_reg, &sreg_value_map_);
58   }
59 
GetSregValueWide(uint16_t s_reg)60   uint16_t GetSregValueWide(uint16_t s_reg) const {
61     DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
62     return GetSregValueImpl(s_reg, &sreg_wide_value_map_);
63   }
64 
65   // Get the starting value number for a given dalvik register.
GetStartingVregValueNumber(int v_reg)66   uint16_t GetStartingVregValueNumber(int v_reg) const {
67     return GetStartingVregValueNumberImpl(v_reg, false);
68   }
69 
70   // Get the starting value number for a given wide dalvik register.
GetStartingVregValueNumberWide(int v_reg)71   uint16_t GetStartingVregValueNumberWide(int v_reg) const {
72     return GetStartingVregValueNumberImpl(v_reg, true);
73   }
74 
75   enum MergeType {
76     kNormalMerge,
77     kCatchMerge,
78     kReturnMerge,  // RETURN or PHI+RETURN. Merge only sreg maps.
79   };
80 
81   void MergeOne(const LocalValueNumbering& other, MergeType merge_type);
82   void Merge(MergeType merge_type);  // Merge gvn_->merge_lvns_.
83   void PrepareEntryBlock();
84 
85   uint16_t GetValueNumber(MIR* mir);
86 
87  private:
88   // A set of value names.
89   typedef GlobalValueNumbering::ValueNameSet ValueNameSet;
90 
91   // Key is s_reg, value is value name.
92   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SregValueMap;
93 
94   uint16_t GetEndingVregValueNumberImpl(int v_reg, bool wide) const;
95   uint16_t GetStartingVregValueNumberImpl(int v_reg, bool wide) const;
96 
GetSregValueImpl(int s_reg,const SregValueMap * map)97   uint16_t GetSregValueImpl(int s_reg, const SregValueMap* map) const {
98     uint16_t res = kNoValue;
99     auto lb = map->find(s_reg);
100     if (lb != map->end()) {
101       res = lb->second;
102     } else {
103       res = gvn_->FindValue(kNoValue, s_reg, kNoValue, kNoValue);
104     }
105     return res;
106   }
107 
SetOperandValueImpl(uint16_t s_reg,uint16_t value,SregValueMap * map)108   void SetOperandValueImpl(uint16_t s_reg, uint16_t value, SregValueMap* map) {
109     DCHECK_EQ(map->count(s_reg), 0u);
110     map->Put(s_reg, value);
111   }
112 
GetOperandValueImpl(int s_reg,const SregValueMap * map)113   uint16_t GetOperandValueImpl(int s_reg, const SregValueMap* map) const {
114     uint16_t res = kNoValue;
115     auto lb = map->find(s_reg);
116     if (lb != map->end()) {
117       res = lb->second;
118     } else {
119       // Using the original value; s_reg refers to an input reg.
120       res = gvn_->LookupValue(kNoValue, s_reg, kNoValue, kNoValue);
121     }
122     return res;
123   }
124 
SetOperandValue(uint16_t s_reg,uint16_t value)125   void SetOperandValue(uint16_t s_reg, uint16_t value) {
126     DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
127     DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
128     SetOperandValueImpl(s_reg, value, &sreg_value_map_);
129   }
130 
GetOperandValue(int s_reg)131   uint16_t GetOperandValue(int s_reg) const {
132     DCHECK_EQ(sreg_wide_value_map_.count(s_reg), 0u);
133     DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
134     return GetOperandValueImpl(s_reg, &sreg_value_map_);
135   }
136 
SetOperandValueWide(uint16_t s_reg,uint16_t value)137   void SetOperandValueWide(uint16_t s_reg, uint16_t value) {
138     DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
139     DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
140     DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
141     SetOperandValueImpl(s_reg, value, &sreg_wide_value_map_);
142   }
143 
GetOperandValueWide(int s_reg)144   uint16_t GetOperandValueWide(int s_reg) const {
145     DCHECK_EQ(sreg_value_map_.count(s_reg), 0u);
146     DCHECK(gvn_->GetMirGraph()->GetRegLocation(s_reg).wide);
147     DCHECK(!gvn_->GetMirGraph()->GetRegLocation(s_reg).high_word);
148     return GetOperandValueImpl(s_reg, &sreg_wide_value_map_);
149   }
150 
151   struct RangeCheckKey {
152     uint16_t array;
153     uint16_t index;
154 
155     // NOTE: Can't define this at namespace scope for a private struct.
156     bool operator==(const RangeCheckKey& other) const {
157       return array == other.array && index == other.index;
158     }
159   };
160 
161   struct RangeCheckKeyComparator {
operatorRangeCheckKeyComparator162     bool operator()(const RangeCheckKey& lhs, const RangeCheckKey& rhs) const {
163       if (lhs.array != rhs.array) {
164         return lhs.array < rhs.array;
165       }
166       return lhs.index < rhs.index;
167     }
168   };
169 
170   typedef ScopedArenaSet<RangeCheckKey, RangeCheckKeyComparator> RangeCheckSet;
171 
172   // Maps instance field "location" (derived from base, field_id and type) to value name.
173   typedef ScopedArenaSafeMap<uint16_t, uint16_t> IFieldLocToValueMap;
174 
175   // Maps static field id to value name
176   typedef ScopedArenaSafeMap<uint16_t, uint16_t> SFieldToValueMap;
177 
178   struct EscapedIFieldClobberKey {
179     uint16_t base;      // Or array.
180     uint16_t type;
181     uint16_t field_id;  // None (kNoValue) for arrays and unresolved instance field stores.
182 
183     // NOTE: Can't define this at namespace scope for a private struct.
184     bool operator==(const EscapedIFieldClobberKey& other) const {
185       return base == other.base && type == other.type && field_id == other.field_id;
186     }
187   };
188 
189   struct EscapedIFieldClobberKeyComparator {
operatorEscapedIFieldClobberKeyComparator190     bool operator()(const EscapedIFieldClobberKey& lhs, const EscapedIFieldClobberKey& rhs) const {
191       // Compare base first. This makes sequential iteration respect the order of base.
192       if (lhs.base != rhs.base) {
193         return lhs.base < rhs.base;
194       }
195       // Compare type second. This makes the type-clobber entries (field_id == kNoValue) last
196       // for given base and type and makes it easy to prune unnecessary entries when merging
197       // escaped_ifield_clobber_set_ from multiple LVNs.
198       if (lhs.type != rhs.type) {
199         return lhs.type < rhs.type;
200       }
201       return lhs.field_id < rhs.field_id;
202     }
203   };
204 
205   typedef ScopedArenaSet<EscapedIFieldClobberKey, EscapedIFieldClobberKeyComparator>
206       EscapedIFieldClobberSet;
207 
208   struct EscapedArrayClobberKey {
209     uint16_t base;
210     uint16_t type;
211 
212     // NOTE: Can't define this at namespace scope for a private struct.
213     bool operator==(const EscapedArrayClobberKey& other) const {
214       return base == other.base && type == other.type;
215     }
216   };
217 
218   struct EscapedArrayClobberKeyComparator {
operatorEscapedArrayClobberKeyComparator219     bool operator()(const EscapedArrayClobberKey& lhs, const EscapedArrayClobberKey& rhs) const {
220       // Compare base first. This makes sequential iteration respect the order of base.
221       if (lhs.base != rhs.base) {
222         return lhs.base < rhs.base;
223       }
224       return lhs.type < rhs.type;
225     }
226   };
227 
228   // Clobber set for previously non-aliasing array refs that escaped.
229   typedef ScopedArenaSet<EscapedArrayClobberKey, EscapedArrayClobberKeyComparator>
230       EscapedArrayClobberSet;
231 
232   // Known location values for an aliasing set. The set can be tied to one of:
233   //   1. Instance field. The locations are aliasing references used to access the field.
234   //   2. Non-aliasing array reference. The locations are indexes to the array.
235   //   3. Aliasing array type. The locations are (reference, index) pair ids assigned by GVN.
236   // In each case we keep track of the last stored value, if any, and the set of locations
237   // where it was stored. We also keep track of all values known for the current write state
238   // (load_value_map), which can be known either because they have been loaded since the last
239   // store or because they contained the last_stored_value before the store and thus could not
240   // have changed as a result.
241   struct AliasingValues {
AliasingValuesAliasingValues242     explicit AliasingValues(LocalValueNumbering* lvn)
243         : memory_version_before_stores(kNoValue),
244           last_stored_value(kNoValue),
245           store_loc_set(std::less<uint16_t>(), lvn->null_checked_.get_allocator()),
246           last_load_memory_version(kNoValue),
247           load_value_map(std::less<uint16_t>(), lvn->null_checked_.get_allocator()) {
248     }
249 
250     uint16_t memory_version_before_stores;  // kNoValue if start version for the field.
251     uint16_t last_stored_value;             // Last stored value name, kNoValue if none.
252     ValueNameSet store_loc_set;             // Where was last_stored_value stored.
253 
254     // Maps refs (other than stored_to) to currently known values for this field other. On write,
255     // anything that differs from the written value is removed as it may be overwritten.
256     uint16_t last_load_memory_version;    // kNoValue if not known.
257     ScopedArenaSafeMap<uint16_t, uint16_t> load_value_map;
258 
259     // NOTE: Can't define this at namespace scope for a private struct.
260     bool operator==(const AliasingValues& other) const {
261       return memory_version_before_stores == other.memory_version_before_stores &&
262           last_load_memory_version == other.last_load_memory_version &&
263           last_stored_value == other.last_stored_value &&
264           store_loc_set == other.store_loc_set &&
265           load_value_map == other.load_value_map;
266     }
267   };
268 
269   // Maps instance field id to AliasingValues, locations are object refs.
270   typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingIFieldValuesMap;
271 
272   // Maps non-aliasing array reference to AliasingValues, locations are array indexes.
273   typedef ScopedArenaSafeMap<uint16_t, AliasingValues> NonAliasingArrayValuesMap;
274 
275   // Maps aliasing array type to AliasingValues, locations are (array, index) pair ids.
276   typedef ScopedArenaSafeMap<uint16_t, AliasingValues> AliasingArrayValuesMap;
277 
278   // Helper classes defining versions for updating and merging the AliasingValues maps above.
279   class AliasingIFieldVersions;
280   class NonAliasingArrayVersions;
281   class AliasingArrayVersions;
282 
283   template <typename Map>
284   AliasingValues* GetAliasingValues(Map* map, const typename Map::key_type& key);
285 
286   template <typename Versions, typename KeyType>
287   void UpdateAliasingValuesLoadVersion(const KeyType& key, AliasingValues* values);
288 
289   template <typename Versions, typename Map>
290   static uint16_t AliasingValuesMergeGet(GlobalValueNumbering* gvn,
291                                          const LocalValueNumbering* lvn,
292                                          Map* map, const typename Map::key_type& key,
293                                          uint16_t location);
294 
295   template <typename Versions, typename Map>
296   uint16_t HandleAliasingValuesGet(Map* map, const typename Map::key_type& key,
297                                    uint16_t location);
298 
299   template <typename Versions, typename Map>
300   bool HandleAliasingValuesPut(Map* map, const typename Map::key_type& key,
301                                uint16_t location, uint16_t value);
302 
303   template <typename K>
304   void CopyAliasingValuesMap(ScopedArenaSafeMap<K, AliasingValues>* dest,
305                              const ScopedArenaSafeMap<K, AliasingValues>& src);
306 
307   uint16_t MarkNonAliasingNonNull(MIR* mir);
308   bool IsNonAliasing(uint16_t reg) const;
309   bool IsNonAliasingIField(uint16_t reg, uint16_t field_id, uint16_t type) const;
310   bool IsNonAliasingArray(uint16_t reg, uint16_t type) const;
311   void HandleNullCheck(MIR* mir, uint16_t reg);
312   void HandleRangeCheck(MIR* mir, uint16_t array, uint16_t index);
313   void HandleDivZeroCheck(MIR* mir, uint16_t reg);
314   void HandlePutObject(MIR* mir);
315   void HandleEscapingRef(uint16_t base);
316   void HandleInvokeArgs(const MIR* mir, const LocalValueNumbering* mir_lvn);
317   uint16_t HandlePhi(MIR* mir);
318   uint16_t HandleConst(MIR* mir, uint32_t value);
319   uint16_t HandleConstWide(MIR* mir, uint64_t value);
320   uint16_t HandleAGet(MIR* mir, uint16_t opcode);
321   void HandleAPut(MIR* mir, uint16_t opcode);
322   uint16_t HandleIGet(MIR* mir, uint16_t opcode);
323   void HandleIPut(MIR* mir, uint16_t opcode);
324   uint16_t HandleSGet(MIR* mir, uint16_t opcode);
325   void HandleSPut(MIR* mir, uint16_t opcode);
326   void RemoveSFieldsForType(uint16_t type);
327   void HandleInvokeOrClInitOrAcquireOp(MIR* mir);
328 
329   bool SameMemoryVersion(const LocalValueNumbering& other) const;
330 
331   uint16_t NewMemoryVersion(uint16_t* new_version);
332   void MergeMemoryVersions(bool clobbered_catch);
333 
334   void PruneNonAliasingRefsForCatch();
335 
336   template <typename Set, Set LocalValueNumbering::* set_ptr>
337   void IntersectSets();
338 
339   void CopyLiveSregValues(SregValueMap* dest, const SregValueMap& src);
340 
341   // Intersect SSA reg value maps as sets, ignore dead regs.
342   template <SregValueMap LocalValueNumbering::* map_ptr>
343   void IntersectSregValueMaps();
344 
345   // Intersect maps as sets. The value type must be equality-comparable.
346   template <typename Map>
347   static void InPlaceIntersectMaps(Map* work_map, const Map& other_map);
348 
349   template <typename Set, Set LocalValueNumbering::*set_ptr, void (LocalValueNumbering::*MergeFn)(
350       const typename Set::value_type& entry, typename Set::iterator hint)>
351   void MergeSets();
352 
353   void IntersectAliasingValueLocations(AliasingValues* work_values, const AliasingValues* values);
354 
355   void MergeEscapedRefs(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
356   void MergeEscapedIFieldTypeClobberSets(const EscapedIFieldClobberSet::value_type& entry,
357                                          EscapedIFieldClobberSet::iterator hint);
358   void MergeEscapedIFieldClobberSets(const EscapedIFieldClobberSet::value_type& entry,
359                                      EscapedIFieldClobberSet::iterator hint);
360   void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
361                                     EscapedArrayClobberSet::iterator hint);
362   void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
363                          SFieldToValueMap::iterator hint);
364   void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
365                                     IFieldLocToValueMap::iterator hint);
366   void MergeNullChecked();
367   void MergeDivZeroChecked();
368 
369   template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
370   void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
371 
372   GlobalValueNumbering* gvn_;
373 
374   // We're using the block id as a 16-bit operand value for some lookups.
375   static_assert(sizeof(BasicBlockId) == sizeof(uint16_t), "BasicBlockId must be 16 bit");
376   BasicBlockId id_;
377 
378   SregValueMap sreg_value_map_;
379   SregValueMap sreg_wide_value_map_;
380 
381   SFieldToValueMap sfield_value_map_;
382   IFieldLocToValueMap non_aliasing_ifield_value_map_;
383   AliasingIFieldValuesMap aliasing_ifield_value_map_;
384   NonAliasingArrayValuesMap non_aliasing_array_value_map_;
385   AliasingArrayValuesMap aliasing_array_value_map_;
386 
387   // Data for dealing with memory clobbering and store/load aliasing.
388   uint16_t global_memory_version_;
389   uint16_t unresolved_sfield_version_[kDexMemAccessTypeCount];
390   uint16_t unresolved_ifield_version_[kDexMemAccessTypeCount];
391   // Value names of references to objects that cannot be reached through a different value name.
392   ValueNameSet non_aliasing_refs_;
393   // Previously non-aliasing refs that escaped but can still be used for non-aliasing AGET/IGET.
394   ValueNameSet escaped_refs_;
395   // Blacklists for cases where escaped_refs_ can't be used.
396   EscapedIFieldClobberSet escaped_ifield_clobber_set_;
397   EscapedArrayClobberSet escaped_array_clobber_set_;
398 
399   // Range check and null check elimination.
400   RangeCheckSet range_checked_;
401   ValueNameSet null_checked_;
402   ValueNameSet div_zero_checked_;
403 
404   // Reuse one vector for all merges to avoid leaking too much memory on the ArenaStack.
405   mutable ScopedArenaVector<uint16_t> merge_names_;
406   // Map to identify when different locations merge the same values.
407   ScopedArenaSafeMap<ScopedArenaVector<uint16_t>, uint16_t> merge_map_;
408   // New memory version for merge, kNoValue if all memory versions matched.
409   uint16_t merge_new_memory_version_;
410 
411   DISALLOW_COPY_AND_ASSIGN(LocalValueNumbering);
412 };
413 
414 }  // namespace art
415 
416 #endif  // ART_COMPILER_DEX_LOCAL_VALUE_NUMBERING_H_
417