1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 #pragma once
15 
16 #include <climits>
17 #include <cstddef>
18 #include <cstdint>
19 #include <span>
20 
21 #include "pw_containers/vector.h"
22 #include "pw_kvs/flash_memory.h"
23 
24 namespace pw {
25 namespace kvs {
26 namespace internal {
27 
28 // Tracks the available and used space in each sector used by the KVS.
29 class SectorDescriptor {
30  public:
31   // The number of bytes available to be written in this sector. It the sector
32   // is marked as corrupt, no bytes are available.
writable_bytes()33   size_t writable_bytes() const {
34     return (tail_free_bytes_ == kCorruptSector) ? 0 : tail_free_bytes_;
35   }
36 
set_writable_bytes(uint16_t writable_bytes)37   void set_writable_bytes(uint16_t writable_bytes) {
38     tail_free_bytes_ = writable_bytes;
39   }
40 
mark_corrupt()41   void mark_corrupt() { tail_free_bytes_ = kCorruptSector; }
corrupt()42   bool corrupt() const { return tail_free_bytes_ == kCorruptSector; }
43 
44   // The number of bytes of valid data in this sector.
valid_bytes()45   size_t valid_bytes() const { return valid_bytes_; }
46 
47   // Adds valid bytes without updating the writable bytes.
AddValidBytes(uint16_t bytes)48   void AddValidBytes(uint16_t bytes) { valid_bytes_ += bytes; }
49 
50   // Removes valid bytes without updating the writable bytes.
RemoveValidBytes(uint16_t bytes)51   void RemoveValidBytes(uint16_t bytes) {
52     if (bytes > valid_bytes()) {
53       // TODO: use a DCHECK instead -- this is a programming error
54       valid_bytes_ = 0;
55     } else {
56       valid_bytes_ -= bytes;
57     }
58   }
59 
60   // Removes writable bytes without updating the valid bytes.
RemoveWritableBytes(uint16_t bytes)61   void RemoveWritableBytes(uint16_t bytes) {
62     if (bytes > writable_bytes()) {
63       // TODO: use a DCHECK instead -- this is a programming error
64       tail_free_bytes_ = 0;
65     } else {
66       tail_free_bytes_ -= bytes;
67     }
68   }
69 
HasSpace(size_t required_space)70   bool HasSpace(size_t required_space) const {
71     return writable_bytes() >= required_space;
72   }
73 
Empty(size_t sector_size_bytes)74   bool Empty(size_t sector_size_bytes) const {
75     return writable_bytes() == sector_size_bytes;
76   }
77 
78   // Returns the number of bytes that would be recovered if this sector is
79   // garbage collected.
RecoverableBytes(size_t sector_size_bytes)80   size_t RecoverableBytes(size_t sector_size_bytes) const {
81     return sector_size_bytes - valid_bytes_ - writable_bytes();
82   }
83 
max_sector_size()84   static constexpr size_t max_sector_size() { return kMaxSectorSize; }
85 
86  private:
87   friend class Sectors;
88 
89   static constexpr uint16_t kCorruptSector = UINT16_MAX;
90   static constexpr size_t kMaxSectorSize = UINT16_MAX - 1;
91 
SectorDescriptor(uint16_t sector_size_bytes)92   explicit constexpr SectorDescriptor(uint16_t sector_size_bytes)
93       : tail_free_bytes_(sector_size_bytes), valid_bytes_(0) {}
94 
95   uint16_t tail_free_bytes_;  // writable bytes at the end of the sector
96   uint16_t valid_bytes_;      // sum of sizes of valid entries
97 };
98 
99 // Represents a list of sectors usable by the KVS.
100 class Sectors {
101  public:
102   using Address = FlashPartition::Address;
103 
Sectors(Vector<SectorDescriptor> & sectors,FlashPartition & partition,const SectorDescriptor ** temp_sectors_to_skip)104   constexpr Sectors(Vector<SectorDescriptor>& sectors,
105                     FlashPartition& partition,
106                     const SectorDescriptor** temp_sectors_to_skip)
107       : descriptors_(sectors),
108         partition_(partition),
109         last_new_(nullptr),
110         temp_sectors_to_skip_(temp_sectors_to_skip) {}
111 
112   // Resets the Sectors list. Must be called before using the object.
Reset()113   void Reset() {
114     last_new_ = descriptors_.begin();
115     descriptors_.assign(partition_.sector_count(),
116                         SectorDescriptor(partition_.sector_size_bytes()));
117   }
118 
119   // The last sector that was selected as the "new empty sector" to write to.
120   // This last new sector is used as the starting point for the next "find a new
121   // empty sector to write to" operation. By using the last new sector as the
122   // start point we will cycle which empty sector is selected next, spreading
123   // the wear across all the empty sectors, rather than putting more wear on the
124   // lower number sectors.
125   //
126   // Use SectorDescriptor* for the persistent storage rather than sector index
127   // because SectorDescriptor* is the standard way to identify a sector.
last_new()128   SectorDescriptor* last_new() const { return last_new_; }
129 
130   // Sets the last new sector from the provided address.
set_last_new_sector(Address address)131   void set_last_new_sector(Address address) {
132     last_new_ = &FromAddress(address);
133   }
134 
135   // Checks if an address is in the particular sector.
AddressInSector(const SectorDescriptor & sector,Address address)136   bool AddressInSector(const SectorDescriptor& sector, Address address) const {
137     const Address sector_base = BaseAddress(sector);
138     const Address sector_end = sector_base + partition_.sector_size_bytes();
139 
140     return ((address >= sector_base) && (address < sector_end));
141   }
142 
143   // Returns the first address in the provided sector.
BaseAddress(const SectorDescriptor & sector)144   Address BaseAddress(const SectorDescriptor& sector) const {
145     return Index(sector) * partition_.sector_size_bytes();
146   }
147 
FromAddress(Address address)148   SectorDescriptor& FromAddress(Address address) const {
149     // TODO: Add boundary checking once asserts are supported.
150     // DCHECK_LT(index, sector_map_size_);`
151     return descriptors_[address / partition_.sector_size_bytes()];
152   }
153 
NextWritableAddress(const SectorDescriptor & sector)154   Address NextWritableAddress(const SectorDescriptor& sector) const {
155     return BaseAddress(sector) + partition_.sector_size_bytes() -
156            sector.writable_bytes();
157   }
158 
159   // Finds either an existing sector with enough space that is not the sector to
160   // skip, or an empty sector. Maintains the invariant that there is always at
161   // least 1 empty sector. Addresses in reserved_addresses are avoided.
FindSpace(SectorDescriptor ** found_sector,size_t size,std::span<const Address> reserved_addresses)162   Status FindSpace(SectorDescriptor** found_sector,
163                    size_t size,
164                    std::span<const Address> reserved_addresses) {
165     return Find(kAppendEntry, found_sector, size, {}, reserved_addresses);
166   }
167 
168   // Same as FindSpace, except that the 1 empty sector invariant is ignored.
169   // Both addresses_to_skip and reserved_addresses are avoided.
FindSpaceDuringGarbageCollection(SectorDescriptor ** found_sector,size_t size,std::span<const Address> addresses_to_skip,std::span<const Address> reserved_addresses)170   Status FindSpaceDuringGarbageCollection(
171       SectorDescriptor** found_sector,
172       size_t size,
173       std::span<const Address> addresses_to_skip,
174       std::span<const Address> reserved_addresses) {
175     return Find(kGarbageCollect,
176                 found_sector,
177                 size,
178                 addresses_to_skip,
179                 reserved_addresses);
180   }
181 
182   // Finds a sector that is ready to be garbage collected. Returns nullptr if no
183   // sectors can / need to be garbage collected.
184   SectorDescriptor* FindSectorToGarbageCollect(
185       std::span<const Address> addresses_to_avoid) const;
186 
187   // The number of sectors in use.
size()188   size_t size() const { return descriptors_.size(); }
189 
190   // The maximum number of sectors supported.
max_size()191   size_t max_size() const { return descriptors_.max_size(); }
192 
193   // Returns the index of the provided sector. Used for logging.
Index(const SectorDescriptor & sector)194   unsigned Index(const SectorDescriptor& sector) const {
195     return &sector - descriptors_.begin();
196   }
Index(const SectorDescriptor * s)197   unsigned Index(const SectorDescriptor* s) const { return Index(*s); }
Index(Address address)198   unsigned Index(Address address) const { return Index(FromAddress(address)); }
199 
200   // Iterators for iterating over all sectors.
201   using iterator = Vector<SectorDescriptor>::iterator;
202   using const_iterator = Vector<SectorDescriptor>::const_iterator;
203 
begin()204   iterator begin() { return descriptors_.begin(); }
begin()205   const_iterator begin() const { return descriptors_.begin(); }
end()206   iterator end() { return descriptors_.end(); }
end()207   const_iterator end() const { return descriptors_.end(); }
208 
209  private:
210   enum FindMode { kAppendEntry, kGarbageCollect };
211 
212   Status Find(FindMode find_mode,
213               SectorDescriptor** found_sector,
214               size_t size,
215               std::span<const Address> addresses_to_skip,
216               std::span<const Address> reserved_addresses);
217 
218   SectorDescriptor& WearLeveledSectorFromIndex(size_t idx) const;
219 
220   Vector<SectorDescriptor>& descriptors_;
221   FlashPartition& partition_;
222 
223   SectorDescriptor* last_new_;
224 
225   // Temp buffer with space for redundancy * 2 - 1 sector pointers. This list is
226   // used to track sectors that should be excluded from Find functions.
227   const SectorDescriptor** const temp_sectors_to_skip_;
228 };
229 
230 }  // namespace internal
231 }  // namespace kvs
232 }  // namespace pw
233