1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "base/metrics/persistent_histogram_allocator.h"
6 
7 #include <memory>
8 
9 #include "base/atomicops.h"
10 #include "base/files/file_path.h"
11 #include "base/files/file_util.h"
12 #include "base/files/important_file_writer.h"
13 #include "base/files/memory_mapped_file.h"
14 #include "base/lazy_instance.h"
15 #include "base/logging.h"
16 #include "base/memory/ptr_util.h"
17 #include "base/metrics/histogram.h"
18 #include "base/metrics/histogram_base.h"
19 #include "base/metrics/histogram_samples.h"
20 #include "base/metrics/metrics_hashes.h"
21 #include "base/metrics/persistent_sample_map.h"
22 #include "base/metrics/sparse_histogram.h"
23 #include "base/metrics/statistics_recorder.h"
24 #include "base/numerics/safe_conversions.h"
25 #include "base/pickle.h"
26 #include "base/process/process_handle.h"
27 #include "base/strings/string_number_conversions.h"
28 #include "base/strings/string_split.h"
29 #include "base/strings/stringprintf.h"
30 #include "base/synchronization/lock.h"
31 
32 namespace base {
33 
34 namespace {
35 
36 // Type identifiers used when storing in persistent memory so they can be
37 // identified during extraction; the first 4 bytes of the SHA1 of the name
38 // is used as a unique integer. A "version number" is added to the base
39 // so that, if the structure of that object changes, stored older versions
40 // will be safely ignored.
41 enum : uint32_t {
42   kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
43   kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
44 };
45 
46 // The current globally-active persistent allocator for all new histograms.
47 // The object held here will obviously not be destructed at process exit
48 // but that's best since PersistentMemoryAllocator objects (that underlie
49 // GlobalHistogramAllocator objects) are explicitly forbidden from doing
50 // anything essential at exit anyway due to the fact that they depend on data
51 // managed elsewhere and which could be destructed first. An AtomicWord is
52 // used instead of std::atomic because the latter can create global ctors
53 // and dtors.
54 subtle::AtomicWord g_histogram_allocator = 0;
55 
56 // Take an array of range boundaries and create a proper BucketRanges object
57 // which is returned to the caller. A return of nullptr indicates that the
58 // passed boundaries are invalid.
CreateRangesFromData(HistogramBase::Sample * ranges_data,uint32_t ranges_checksum,size_t count)59 std::unique_ptr<BucketRanges> CreateRangesFromData(
60     HistogramBase::Sample* ranges_data,
61     uint32_t ranges_checksum,
62     size_t count) {
63   // To avoid racy destruction at shutdown, the following may be leaked.
64   std::unique_ptr<BucketRanges> ranges(new BucketRanges(count));
65   DCHECK_EQ(count, ranges->size());
66   for (size_t i = 0; i < count; ++i) {
67     if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
68       return nullptr;
69     ranges->set_range(i, ranges_data[i]);
70   }
71 
72   ranges->ResetChecksum();
73   if (ranges->checksum() != ranges_checksum)
74     return nullptr;
75 
76   return ranges;
77 }
78 
79 // Calculate the number of bytes required to store all of a histogram's
80 // "counts". This will return zero (0) if |bucket_count| is not valid.
CalculateRequiredCountsBytes(size_t bucket_count)81 size_t CalculateRequiredCountsBytes(size_t bucket_count) {
82   // 2 because each "sample count" also requires a backup "logged count"
83   // used for calculating the delta during snapshot operations.
84   const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
85 
86   // If the |bucket_count| is such that it would overflow the return type,
87   // perhaps as the result of a malicious actor, then return zero to
88   // indicate the problem to the caller.
89   if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
90     return 0;
91 
92   return bucket_count * kBytesPerBucket;
93 }
94 
95 }  // namespace
96 
97 const Feature kPersistentHistogramsFeature{
98   "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
99 };
100 
101 
PersistentSparseHistogramDataManager(PersistentMemoryAllocator * allocator)102 PersistentSparseHistogramDataManager::PersistentSparseHistogramDataManager(
103     PersistentMemoryAllocator* allocator)
104     : allocator_(allocator), record_iterator_(allocator) {}
105 
106 PersistentSparseHistogramDataManager::~PersistentSparseHistogramDataManager() =
107     default;
108 
109 PersistentSampleMapRecords*
UseSampleMapRecords(uint64_t id,const void * user)110 PersistentSparseHistogramDataManager::UseSampleMapRecords(uint64_t id,
111                                                           const void* user) {
112   base::AutoLock auto_lock(lock_);
113   return GetSampleMapRecordsWhileLocked(id)->Acquire(user);
114 }
115 
116 PersistentSampleMapRecords*
GetSampleMapRecordsWhileLocked(uint64_t id)117 PersistentSparseHistogramDataManager::GetSampleMapRecordsWhileLocked(
118     uint64_t id) {
119   lock_.AssertAcquired();
120 
121   auto found = sample_records_.find(id);
122   if (found != sample_records_.end())
123     return found->second.get();
124 
125   std::unique_ptr<PersistentSampleMapRecords>& samples = sample_records_[id];
126   samples = std::make_unique<PersistentSampleMapRecords>(this, id);
127   return samples.get();
128 }
129 
LoadRecords(PersistentSampleMapRecords * sample_map_records)130 bool PersistentSparseHistogramDataManager::LoadRecords(
131     PersistentSampleMapRecords* sample_map_records) {
132   // DataManager must be locked in order to access the found_ field of any
133   // PersistentSampleMapRecords object.
134   base::AutoLock auto_lock(lock_);
135   bool found = false;
136 
137   // If there are already "found" entries for the passed object, move them.
138   if (!sample_map_records->found_.empty()) {
139     sample_map_records->records_.reserve(sample_map_records->records_.size() +
140                                          sample_map_records->found_.size());
141     sample_map_records->records_.insert(sample_map_records->records_.end(),
142                                         sample_map_records->found_.begin(),
143                                         sample_map_records->found_.end());
144     sample_map_records->found_.clear();
145     found = true;
146   }
147 
148   // Acquiring a lock is a semi-expensive operation so load some records with
149   // each call. More than this number may be loaded if it takes longer to
150   // find at least one matching record for the passed object.
151   const int kMinimumNumberToLoad = 10;
152   const uint64_t match_id = sample_map_records->sample_map_id_;
153 
154   // Loop while no enty is found OR we haven't yet loaded the minimum number.
155   // This will continue reading even after a match is found.
156   for (int count = 0; !found || count < kMinimumNumberToLoad; ++count) {
157     // Get the next sample-record. The iterator will always resume from where
158     // it left off even if it previously had nothing further to return.
159     uint64_t found_id;
160     PersistentMemoryAllocator::Reference ref =
161         PersistentSampleMap::GetNextPersistentRecord(record_iterator_,
162                                                      &found_id);
163 
164     // Stop immediately if there are none.
165     if (!ref)
166       break;
167 
168     // The sample-record could be for any sparse histogram. Add the reference
169     // to the appropriate collection for later use.
170     if (found_id == match_id) {
171       sample_map_records->records_.push_back(ref);
172       found = true;
173     } else {
174       PersistentSampleMapRecords* samples =
175           GetSampleMapRecordsWhileLocked(found_id);
176       DCHECK(samples);
177       samples->found_.push_back(ref);
178     }
179   }
180 
181   return found;
182 }
183 
184 
PersistentSampleMapRecords(PersistentSparseHistogramDataManager * data_manager,uint64_t sample_map_id)185 PersistentSampleMapRecords::PersistentSampleMapRecords(
186     PersistentSparseHistogramDataManager* data_manager,
187     uint64_t sample_map_id)
188     : data_manager_(data_manager), sample_map_id_(sample_map_id) {}
189 
190 PersistentSampleMapRecords::~PersistentSampleMapRecords() = default;
191 
Acquire(const void * user)192 PersistentSampleMapRecords* PersistentSampleMapRecords::Acquire(
193     const void* user) {
194   DCHECK(!user_);
195   user_ = user;
196   seen_ = 0;
197   return this;
198 }
199 
Release(const void * user)200 void PersistentSampleMapRecords::Release(const void* user) {
201   DCHECK_EQ(user_, user);
202   user_ = nullptr;
203 }
204 
GetNext()205 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::GetNext() {
206   DCHECK(user_);
207 
208   // If there are no unseen records, lock and swap in all the found ones.
209   if (records_.size() == seen_) {
210     if (!data_manager_->LoadRecords(this))
211       return false;
212   }
213 
214   // Return the next record. Records *must* be returned in the same order
215   // they are found in the persistent memory in order to ensure that all
216   // objects using this data always have the same state. Race conditions
217   // can cause duplicate records so using the "first found" is the only
218   // guarantee that all objects always access the same one.
219   DCHECK_LT(seen_, records_.size());
220   return records_[seen_++];
221 }
222 
CreateNew(HistogramBase::Sample value)223 PersistentMemoryAllocator::Reference PersistentSampleMapRecords::CreateNew(
224     HistogramBase::Sample value) {
225   return PersistentSampleMap::CreatePersistentRecord(data_manager_->allocator_,
226                                                      sample_map_id_, value);
227 }
228 
229 
230 // This data will be held in persistent memory in order for processes to
231 // locate and use histograms created elsewhere.
232 struct PersistentHistogramAllocator::PersistentHistogramData {
233   // SHA1(Histogram): Increment this if structure changes!
234   static constexpr uint32_t kPersistentTypeId = 0xF1645910 + 3;
235 
236   // Expected size for 32/64-bit check.
237   static constexpr size_t kExpectedInstanceSize =
238       40 + 2 * HistogramSamples::Metadata::kExpectedInstanceSize;
239 
240   int32_t histogram_type;
241   int32_t flags;
242   int32_t minimum;
243   int32_t maximum;
244   uint32_t bucket_count;
245   PersistentMemoryAllocator::Reference ranges_ref;
246   uint32_t ranges_checksum;
247   subtle::Atomic32 counts_ref;  // PersistentMemoryAllocator::Reference
248   HistogramSamples::Metadata samples_metadata;
249   HistogramSamples::Metadata logged_metadata;
250 
251   // Space for the histogram name will be added during the actual allocation
252   // request. This must be the last field of the structure. A zero-size array
253   // or a "flexible" array would be preferred but is not (yet) valid C++.
254   char name[sizeof(uint64_t)];  // Force 64-bit alignment on 32-bit builds.
255 };
256 
Iterator(PersistentHistogramAllocator * allocator)257 PersistentHistogramAllocator::Iterator::Iterator(
258     PersistentHistogramAllocator* allocator)
259     : allocator_(allocator), memory_iter_(allocator->memory_allocator()) {}
260 
261 std::unique_ptr<HistogramBase>
GetNextWithIgnore(Reference ignore)262 PersistentHistogramAllocator::Iterator::GetNextWithIgnore(Reference ignore) {
263   PersistentMemoryAllocator::Reference ref;
264   while ((ref = memory_iter_.GetNextOfType<PersistentHistogramData>()) != 0) {
265     if (ref != ignore)
266       return allocator_->GetHistogram(ref);
267   }
268   return nullptr;
269 }
270 
271 
PersistentHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)272 PersistentHistogramAllocator::PersistentHistogramAllocator(
273     std::unique_ptr<PersistentMemoryAllocator> memory)
274     : memory_allocator_(std::move(memory)),
275       sparse_histogram_data_manager_(memory_allocator_.get()) {}
276 
277 PersistentHistogramAllocator::~PersistentHistogramAllocator() = default;
278 
GetHistogram(Reference ref)279 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
280     Reference ref) {
281   // Unfortunately, the histogram "pickle" methods cannot be used as part of
282   // the persistance because the deserialization methods always create local
283   // count data (while these must reference the persistent counts) and always
284   // add it to the local list of known histograms (while these may be simple
285   // references to histograms in other processes).
286   PersistentHistogramData* data =
287       memory_allocator_->GetAsObject<PersistentHistogramData>(ref);
288   const size_t length = memory_allocator_->GetAllocSize(ref);
289 
290   // Check that metadata is reasonable: name is null-terminated and non-empty,
291   // ID fields have been loaded with a hash of the name (0 is considered
292   // unset/invalid).
293   if (!data || data->name[0] == '\0' ||
294       reinterpret_cast<char*>(data)[length - 1] != '\0' ||
295       data->samples_metadata.id == 0 || data->logged_metadata.id == 0 ||
296       // Note: Sparse histograms use |id + 1| in |logged_metadata|.
297       (data->logged_metadata.id != data->samples_metadata.id &&
298        data->logged_metadata.id != data->samples_metadata.id + 1) ||
299       // Most non-matching values happen due to truncated names. Ideally, we
300       // could just verify the name length based on the overall alloc length,
301       // but that doesn't work because the allocated block may have been
302       // aligned to the next boundary value.
303       HashMetricName(data->name) != data->samples_metadata.id) {
304     return nullptr;
305   }
306   return CreateHistogram(data);
307 }
308 
AllocateHistogram(HistogramType histogram_type,const std::string & name,int minimum,int maximum,const BucketRanges * bucket_ranges,int32_t flags,Reference * ref_ptr)309 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
310     HistogramType histogram_type,
311     const std::string& name,
312     int minimum,
313     int maximum,
314     const BucketRanges* bucket_ranges,
315     int32_t flags,
316     Reference* ref_ptr) {
317   // If the allocator is corrupt, don't waste time trying anything else.
318   // This also allows differentiating on the dashboard between allocations
319   // failed due to a corrupt allocator and the number of process instances
320   // with one, the latter being idicated by "newly corrupt", below.
321   if (memory_allocator_->IsCorrupt())
322     return nullptr;
323 
324   // Create the metadata necessary for a persistent sparse histogram. This
325   // is done first because it is a small subset of what is required for
326   // other histograms. The type is "under construction" so that a crash
327   // during the datafill doesn't leave a bad record around that could cause
328   // confusion by another process trying to read it. It will be corrected
329   // once histogram construction is complete.
330   PersistentHistogramData* histogram_data =
331       memory_allocator_->New<PersistentHistogramData>(
332           offsetof(PersistentHistogramData, name) + name.length() + 1);
333   if (histogram_data) {
334     memcpy(histogram_data->name, name.c_str(), name.size() + 1);
335     histogram_data->histogram_type = histogram_type;
336     histogram_data->flags = flags | HistogramBase::kIsPersistent;
337   }
338 
339   // Create the remaining metadata necessary for regular histograms.
340   if (histogram_type != SPARSE_HISTOGRAM) {
341     size_t bucket_count = bucket_ranges->bucket_count();
342     size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
343     if (counts_bytes == 0) {
344       // |bucket_count| was out-of-range.
345       return nullptr;
346     }
347 
348     // Since the StasticsRecorder keeps a global collection of BucketRanges
349     // objects for re-use, it would be dangerous for one to hold a reference
350     // from a persistent allocator that is not the global one (which is
351     // permanent once set). If this stops being the case, this check can
352     // become an "if" condition beside "!ranges_ref" below and before
353     // set_persistent_reference() farther down.
354     DCHECK_EQ(this, GlobalHistogramAllocator::Get());
355 
356     // Re-use an existing BucketRanges persistent allocation if one is known;
357     // otherwise, create one.
358     PersistentMemoryAllocator::Reference ranges_ref =
359         bucket_ranges->persistent_reference();
360     if (!ranges_ref) {
361       size_t ranges_count = bucket_count + 1;
362       size_t ranges_bytes = ranges_count * sizeof(HistogramBase::Sample);
363       ranges_ref =
364           memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
365       if (ranges_ref) {
366         HistogramBase::Sample* ranges_data =
367             memory_allocator_->GetAsArray<HistogramBase::Sample>(
368                 ranges_ref, kTypeIdRangesArray, ranges_count);
369         if (ranges_data) {
370           for (size_t i = 0; i < bucket_ranges->size(); ++i)
371             ranges_data[i] = bucket_ranges->range(i);
372           bucket_ranges->set_persistent_reference(ranges_ref);
373         } else {
374           // This should never happen but be tolerant if it does.
375           ranges_ref = PersistentMemoryAllocator::kReferenceNull;
376         }
377       }
378     } else {
379       DCHECK_EQ(kTypeIdRangesArray, memory_allocator_->GetType(ranges_ref));
380     }
381 
382 
383     // Only continue here if all allocations were successful. If they weren't,
384     // there is no way to free the space but that's not really a problem since
385     // the allocations only fail because the space is full or corrupt and so
386     // any future attempts will also fail.
387     if (ranges_ref && histogram_data) {
388       histogram_data->minimum = minimum;
389       histogram_data->maximum = maximum;
390       // |bucket_count| must fit within 32-bits or the allocation of the counts
391       // array would have failed for being too large; the allocator supports
392       // less than 4GB total size.
393       histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
394       histogram_data->ranges_ref = ranges_ref;
395       histogram_data->ranges_checksum = bucket_ranges->checksum();
396     } else {
397       histogram_data = nullptr;  // Clear this for proper handling below.
398     }
399   }
400 
401   if (histogram_data) {
402     // Create the histogram using resources in persistent memory. This ends up
403     // resolving the "ref" values stored in histogram_data instad of just
404     // using what is already known above but avoids duplicating the switch
405     // statement here and serves as a double-check that everything is
406     // correct before commiting the new histogram to persistent space.
407     std::unique_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
408     DCHECK(histogram);
409     DCHECK_NE(0U, histogram_data->samples_metadata.id);
410     DCHECK_NE(0U, histogram_data->logged_metadata.id);
411 
412     PersistentMemoryAllocator::Reference histogram_ref =
413         memory_allocator_->GetAsReference(histogram_data);
414     if (ref_ptr != nullptr)
415       *ref_ptr = histogram_ref;
416 
417     // By storing the reference within the allocator to this histogram, the
418     // next import (which will happen before the next histogram creation)
419     // will know to skip it.
420     // See also the comment in ImportHistogramsToStatisticsRecorder().
421     subtle::NoBarrier_Store(&last_created_, histogram_ref);
422     return histogram;
423   }
424 
425   return nullptr;
426 }
427 
FinalizeHistogram(Reference ref,bool registered)428 void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
429                                                      bool registered) {
430   if (registered) {
431     // If the created persistent histogram was registered then it needs to
432     // be marked as "iterable" in order to be found by other processes. This
433     // happens only after the histogram is fully formed so it's impossible for
434     // code iterating through the allocator to read a partially created record.
435     memory_allocator_->MakeIterable(ref);
436   } else {
437     // If it wasn't registered then a race condition must have caused two to
438     // be created. The allocator does not support releasing the acquired memory
439     // so just change the type to be empty.
440     memory_allocator_->ChangeType(ref, 0,
441                                   PersistentHistogramData::kPersistentTypeId,
442                                   /*clear=*/false);
443   }
444 }
445 
MergeHistogramDeltaToStatisticsRecorder(HistogramBase * histogram)446 void PersistentHistogramAllocator::MergeHistogramDeltaToStatisticsRecorder(
447     HistogramBase* histogram) {
448   DCHECK(histogram);
449 
450   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
451   if (!existing) {
452     // The above should never fail but if it does, no real harm is done.
453     // The data won't be merged but it also won't be recorded as merged
454     // so a future try, if successful, will get what was missed. If it
455     // continues to fail, some metric data will be lost but that is better
456     // than crashing.
457     return;
458   }
459 
460   // Merge the delta from the passed object to the one in the SR.
461   existing->AddSamples(*histogram->SnapshotDelta());
462 }
463 
MergeHistogramFinalDeltaToStatisticsRecorder(const HistogramBase * histogram)464 void PersistentHistogramAllocator::MergeHistogramFinalDeltaToStatisticsRecorder(
465     const HistogramBase* histogram) {
466   DCHECK(histogram);
467 
468   HistogramBase* existing = GetOrCreateStatisticsRecorderHistogram(histogram);
469   if (!existing) {
470     // The above should never fail but if it does, no real harm is done.
471     // Some metric data will be lost but that is better than crashing.
472     return;
473   }
474 
475   // Merge the delta from the passed object to the one in the SR.
476   existing->AddSamples(*histogram->SnapshotFinalDelta());
477 }
478 
UseSampleMapRecords(uint64_t id,const void * user)479 PersistentSampleMapRecords* PersistentHistogramAllocator::UseSampleMapRecords(
480     uint64_t id,
481     const void* user) {
482   return sparse_histogram_data_manager_.UseSampleMapRecords(id, user);
483 }
484 
CreateTrackingHistograms(StringPiece name)485 void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
486   memory_allocator_->CreateTrackingHistograms(name);
487 }
488 
UpdateTrackingHistograms()489 void PersistentHistogramAllocator::UpdateTrackingHistograms() {
490   memory_allocator_->UpdateTrackingHistograms();
491 }
492 
ClearLastCreatedReferenceForTesting()493 void PersistentHistogramAllocator::ClearLastCreatedReferenceForTesting() {
494   subtle::NoBarrier_Store(&last_created_, 0);
495 }
496 
CreateHistogram(PersistentHistogramData * histogram_data_ptr)497 std::unique_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
498     PersistentHistogramData* histogram_data_ptr) {
499   if (!histogram_data_ptr)
500     return nullptr;
501 
502   // Sparse histograms are quite different so handle them as a special case.
503   if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
504     std::unique_ptr<HistogramBase> histogram =
505         SparseHistogram::PersistentCreate(this, histogram_data_ptr->name,
506                                           &histogram_data_ptr->samples_metadata,
507                                           &histogram_data_ptr->logged_metadata);
508     DCHECK(histogram);
509     histogram->SetFlags(histogram_data_ptr->flags);
510     return histogram;
511   }
512 
513   // Copy the configuration fields from histogram_data_ptr to local storage
514   // because anything in persistent memory cannot be trusted as it could be
515   // changed at any moment by a malicious actor that shares access. The local
516   // values are validated below and then used to create the histogram, knowing
517   // they haven't changed between validation and use.
518   int32_t histogram_type = histogram_data_ptr->histogram_type;
519   int32_t histogram_flags = histogram_data_ptr->flags;
520   int32_t histogram_minimum = histogram_data_ptr->minimum;
521   int32_t histogram_maximum = histogram_data_ptr->maximum;
522   uint32_t histogram_bucket_count = histogram_data_ptr->bucket_count;
523   uint32_t histogram_ranges_ref = histogram_data_ptr->ranges_ref;
524   uint32_t histogram_ranges_checksum = histogram_data_ptr->ranges_checksum;
525 
526   HistogramBase::Sample* ranges_data =
527       memory_allocator_->GetAsArray<HistogramBase::Sample>(
528           histogram_ranges_ref, kTypeIdRangesArray,
529           PersistentMemoryAllocator::kSizeAny);
530 
531   const uint32_t max_buckets =
532       std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
533   size_t required_bytes =
534       (histogram_bucket_count + 1) * sizeof(HistogramBase::Sample);
535   size_t allocated_bytes =
536       memory_allocator_->GetAllocSize(histogram_ranges_ref);
537   if (!ranges_data || histogram_bucket_count < 2 ||
538       histogram_bucket_count >= max_buckets ||
539       allocated_bytes < required_bytes) {
540     return nullptr;
541   }
542 
543   std::unique_ptr<const BucketRanges> created_ranges = CreateRangesFromData(
544       ranges_data, histogram_ranges_checksum, histogram_bucket_count + 1);
545   if (!created_ranges)
546     return nullptr;
547   const BucketRanges* ranges =
548       StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
549           created_ranges.release());
550 
551   size_t counts_bytes = CalculateRequiredCountsBytes(histogram_bucket_count);
552   PersistentMemoryAllocator::Reference counts_ref =
553       subtle::Acquire_Load(&histogram_data_ptr->counts_ref);
554   if (counts_bytes == 0 ||
555       (counts_ref != 0 &&
556        memory_allocator_->GetAllocSize(counts_ref) < counts_bytes)) {
557     return nullptr;
558   }
559 
560   // The "counts" data (including both samples and logged samples) is a delayed
561   // persistent allocation meaning that though its size and storage for a
562   // reference is defined, no space is reserved until actually needed. When
563   // it is needed, memory will be allocated from the persistent segment and
564   // a reference to it stored at the passed address. Other threads can then
565   // notice the valid reference and access the same data.
566   DelayedPersistentAllocation counts_data(memory_allocator_.get(),
567                                           &histogram_data_ptr->counts_ref,
568                                           kTypeIdCountsArray, counts_bytes, 0);
569 
570   // A second delayed allocations is defined using the same reference storage
571   // location as the first so the allocation of one will automatically be found
572   // by the other. Within the block, the first half of the space is for "counts"
573   // and the second half is for "logged counts".
574   DelayedPersistentAllocation logged_data(
575       memory_allocator_.get(), &histogram_data_ptr->counts_ref,
576       kTypeIdCountsArray, counts_bytes, counts_bytes / 2,
577       /*make_iterable=*/false);
578 
579   // Create the right type of histogram.
580   const char* name = histogram_data_ptr->name;
581   std::unique_ptr<HistogramBase> histogram;
582   switch (histogram_type) {
583     case HISTOGRAM:
584       histogram = Histogram::PersistentCreate(
585           name, histogram_minimum, histogram_maximum, ranges, counts_data,
586           logged_data, &histogram_data_ptr->samples_metadata,
587           &histogram_data_ptr->logged_metadata);
588       DCHECK(histogram);
589       break;
590     case LINEAR_HISTOGRAM:
591       histogram = LinearHistogram::PersistentCreate(
592           name, histogram_minimum, histogram_maximum, ranges, counts_data,
593           logged_data, &histogram_data_ptr->samples_metadata,
594           &histogram_data_ptr->logged_metadata);
595       DCHECK(histogram);
596       break;
597     case BOOLEAN_HISTOGRAM:
598       histogram = BooleanHistogram::PersistentCreate(
599           name, ranges, counts_data, logged_data,
600           &histogram_data_ptr->samples_metadata,
601           &histogram_data_ptr->logged_metadata);
602       DCHECK(histogram);
603       break;
604     case CUSTOM_HISTOGRAM:
605       histogram = CustomHistogram::PersistentCreate(
606           name, ranges, counts_data, logged_data,
607           &histogram_data_ptr->samples_metadata,
608           &histogram_data_ptr->logged_metadata);
609       DCHECK(histogram);
610       break;
611     default:
612       return nullptr;
613   }
614 
615   if (histogram) {
616     DCHECK_EQ(histogram_type, histogram->GetHistogramType());
617     histogram->SetFlags(histogram_flags);
618   }
619 
620   return histogram;
621 }
622 
623 HistogramBase*
GetOrCreateStatisticsRecorderHistogram(const HistogramBase * histogram)624 PersistentHistogramAllocator::GetOrCreateStatisticsRecorderHistogram(
625     const HistogramBase* histogram) {
626   // This should never be called on the global histogram allocator as objects
627   // created there are already within the global statistics recorder.
628   DCHECK_NE(GlobalHistogramAllocator::Get(), this);
629   DCHECK(histogram);
630 
631   HistogramBase* existing =
632       StatisticsRecorder::FindHistogram(histogram->histogram_name());
633   if (existing)
634     return existing;
635 
636   // Adding the passed histogram to the SR would cause a problem if the
637   // allocator that holds it eventually goes away. Instead, create a new
638   // one from a serialized version. Deserialization calls the appropriate
639   // FactoryGet() which will create the histogram in the global persistent-
640   // histogram allocator if such is set.
641   base::Pickle pickle;
642   histogram->SerializeInfo(&pickle);
643   PickleIterator iter(pickle);
644   existing = DeserializeHistogramInfo(&iter);
645   if (!existing)
646     return nullptr;
647 
648   // Make sure there is no "serialization" flag set.
649   DCHECK_EQ(0, existing->flags() & HistogramBase::kIPCSerializationSourceFlag);
650   // Record the newly created histogram in the SR.
651   return StatisticsRecorder::RegisterOrDeleteDuplicate(existing);
652 }
653 
654 GlobalHistogramAllocator::~GlobalHistogramAllocator() = default;
655 
656 // static
CreateWithPersistentMemory(void * base,size_t size,size_t page_size,uint64_t id,StringPiece name)657 void GlobalHistogramAllocator::CreateWithPersistentMemory(
658     void* base,
659     size_t size,
660     size_t page_size,
661     uint64_t id,
662     StringPiece name) {
663   Set(WrapUnique(
664       new GlobalHistogramAllocator(std::make_unique<PersistentMemoryAllocator>(
665           base, size, page_size, id, name, false))));
666 }
667 
668 // static
CreateWithLocalMemory(size_t size,uint64_t id,StringPiece name)669 void GlobalHistogramAllocator::CreateWithLocalMemory(
670     size_t size,
671     uint64_t id,
672     StringPiece name) {
673   Set(WrapUnique(new GlobalHistogramAllocator(
674       std::make_unique<LocalPersistentMemoryAllocator>(size, id, name))));
675 }
676 
677 #if !defined(OS_NACL)
678 // static
CreateWithFile(const FilePath & file_path,size_t size,uint64_t id,StringPiece name)679 bool GlobalHistogramAllocator::CreateWithFile(
680     const FilePath& file_path,
681     size_t size,
682     uint64_t id,
683     StringPiece name) {
684   bool exists = PathExists(file_path);
685   File file(
686       file_path, File::FLAG_OPEN_ALWAYS | File::FLAG_SHARE_DELETE |
687                  File::FLAG_READ | File::FLAG_WRITE);
688 
689   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
690   if (exists) {
691     size = saturated_cast<size_t>(file.GetLength());
692     mmfile->Initialize(std::move(file), MemoryMappedFile::READ_WRITE);
693   } else {
694     mmfile->Initialize(std::move(file), {0, size},
695                        MemoryMappedFile::READ_WRITE_EXTEND);
696   }
697   if (!mmfile->IsValid() ||
698       !FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)) {
699     return false;
700   }
701 
702   Set(WrapUnique(new GlobalHistogramAllocator(
703       std::make_unique<FilePersistentMemoryAllocator>(std::move(mmfile), size,
704                                                       id, name, false))));
705   Get()->SetPersistentLocation(file_path);
706   return true;
707 }
708 
709 // static
CreateWithActiveFile(const FilePath & base_path,const FilePath & active_path,const FilePath & spare_path,size_t size,uint64_t id,StringPiece name)710 bool GlobalHistogramAllocator::CreateWithActiveFile(const FilePath& base_path,
711                                                     const FilePath& active_path,
712                                                     const FilePath& spare_path,
713                                                     size_t size,
714                                                     uint64_t id,
715                                                     StringPiece name) {
716   // Old "active" becomes "base".
717   if (!base::ReplaceFile(active_path, base_path, nullptr))
718     base::DeleteFile(base_path, /*recursive=*/false);
719   DCHECK(!base::PathExists(active_path));
720 
721   // Move any "spare" into "active". Okay to continue if file doesn't exist.
722   if (!spare_path.empty()) {
723     base::ReplaceFile(spare_path, active_path, nullptr);
724     DCHECK(!base::PathExists(spare_path));
725   }
726 
727   return base::GlobalHistogramAllocator::CreateWithFile(active_path, size, id,
728                                                         name);
729 }
730 
731 // static
CreateWithActiveFileInDir(const FilePath & dir,size_t size,uint64_t id,StringPiece name)732 bool GlobalHistogramAllocator::CreateWithActiveFileInDir(const FilePath& dir,
733                                                          size_t size,
734                                                          uint64_t id,
735                                                          StringPiece name) {
736   FilePath base_path, active_path, spare_path;
737   ConstructFilePaths(dir, name, &base_path, &active_path, &spare_path);
738   return CreateWithActiveFile(base_path, active_path, spare_path, size, id,
739                               name);
740 }
741 
742 // static
ConstructFilePath(const FilePath & dir,StringPiece name)743 FilePath GlobalHistogramAllocator::ConstructFilePath(const FilePath& dir,
744                                                      StringPiece name) {
745   return dir.AppendASCII(name).AddExtension(
746       PersistentMemoryAllocator::kFileExtension);
747 }
748 
749 // static
ConstructFilePathForUploadDir(const FilePath & dir,StringPiece name,base::Time stamp,ProcessId pid)750 FilePath GlobalHistogramAllocator::ConstructFilePathForUploadDir(
751     const FilePath& dir,
752     StringPiece name,
753     base::Time stamp,
754     ProcessId pid) {
755   return ConstructFilePath(
756       dir,
757       StringPrintf("%.*s-%lX-%lX", static_cast<int>(name.length()), name.data(),
758                    static_cast<long>(stamp.ToTimeT()), static_cast<long>(pid)));
759 }
760 
761 // static
ParseFilePath(const FilePath & path,std::string * out_name,Time * out_stamp,ProcessId * out_pid)762 bool GlobalHistogramAllocator::ParseFilePath(const FilePath& path,
763                                              std::string* out_name,
764                                              Time* out_stamp,
765                                              ProcessId* out_pid) {
766   std::string filename = path.BaseName().AsUTF8Unsafe();
767   std::vector<base::StringPiece> parts = base::SplitStringPiece(
768       filename, "-.", base::KEEP_WHITESPACE, base::SPLIT_WANT_ALL);
769   if (parts.size() != 4)
770     return false;
771 
772   if (out_name)
773     *out_name = parts[0].as_string();
774 
775   if (out_stamp) {
776     int64_t stamp;
777     if (!HexStringToInt64(parts[1], &stamp))
778       return false;
779     *out_stamp = Time::FromTimeT(static_cast<time_t>(stamp));
780   }
781 
782   if (out_pid) {
783     int64_t pid;
784     if (!HexStringToInt64(parts[2], &pid))
785       return false;
786     *out_pid = static_cast<ProcessId>(pid);
787   }
788 
789   return true;
790 }
791 
792 // static
ConstructFilePaths(const FilePath & dir,StringPiece name,FilePath * out_base_path,FilePath * out_active_path,FilePath * out_spare_path)793 void GlobalHistogramAllocator::ConstructFilePaths(const FilePath& dir,
794                                                   StringPiece name,
795                                                   FilePath* out_base_path,
796                                                   FilePath* out_active_path,
797                                                   FilePath* out_spare_path) {
798   if (out_base_path)
799     *out_base_path = ConstructFilePath(dir, name);
800 
801   if (out_active_path) {
802     *out_active_path =
803         ConstructFilePath(dir, name.as_string().append("-active"));
804   }
805 
806   if (out_spare_path) {
807     *out_spare_path = ConstructFilePath(dir, name.as_string().append("-spare"));
808   }
809 }
810 
811 // static
ConstructFilePathsForUploadDir(const FilePath & active_dir,const FilePath & upload_dir,const std::string & name,FilePath * out_upload_path,FilePath * out_active_path,FilePath * out_spare_path)812 void GlobalHistogramAllocator::ConstructFilePathsForUploadDir(
813     const FilePath& active_dir,
814     const FilePath& upload_dir,
815     const std::string& name,
816     FilePath* out_upload_path,
817     FilePath* out_active_path,
818     FilePath* out_spare_path) {
819   if (out_upload_path) {
820     *out_upload_path = ConstructFilePathForUploadDir(
821         upload_dir, name, Time::Now(), GetCurrentProcId());
822   }
823 
824   if (out_active_path) {
825     *out_active_path =
826         ConstructFilePath(active_dir, name + std::string("-active"));
827   }
828 
829   if (out_spare_path) {
830     *out_spare_path =
831         ConstructFilePath(active_dir, name + std::string("-spare"));
832   }
833 }
834 
835 // static
CreateSpareFile(const FilePath & spare_path,size_t size)836 bool GlobalHistogramAllocator::CreateSpareFile(const FilePath& spare_path,
837                                                size_t size) {
838   FilePath temp_spare_path = spare_path.AddExtension(FILE_PATH_LITERAL(".tmp"));
839   bool success = true;
840   {
841     File spare_file(temp_spare_path, File::FLAG_CREATE_ALWAYS |
842                                          File::FLAG_READ | File::FLAG_WRITE);
843     if (!spare_file.IsValid())
844       return false;
845 
846     MemoryMappedFile mmfile;
847     mmfile.Initialize(std::move(spare_file), {0, size},
848                       MemoryMappedFile::READ_WRITE_EXTEND);
849     success = mmfile.IsValid();
850   }
851 
852   if (success)
853     success = ReplaceFile(temp_spare_path, spare_path, nullptr);
854 
855   if (!success)
856     DeleteFile(temp_spare_path, /*recursive=*/false);
857 
858   return success;
859 }
860 
861 // static
CreateSpareFileInDir(const FilePath & dir,size_t size,StringPiece name)862 bool GlobalHistogramAllocator::CreateSpareFileInDir(const FilePath& dir,
863                                                     size_t size,
864                                                     StringPiece name) {
865   FilePath spare_path;
866   ConstructFilePaths(dir, name, nullptr, nullptr, &spare_path);
867   return CreateSpareFile(spare_path, size);
868 }
869 #endif  // !defined(OS_NACL)
870 
871 // static
CreateWithSharedMemoryHandle(const SharedMemoryHandle & handle,size_t size)872 void GlobalHistogramAllocator::CreateWithSharedMemoryHandle(
873     const SharedMemoryHandle& handle,
874     size_t size) {
875   std::unique_ptr<SharedMemory> shm(
876       new SharedMemory(handle, /*readonly=*/false));
877   if (!shm->Map(size) ||
878       !SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(*shm)) {
879     return;
880   }
881 
882   Set(WrapUnique(new GlobalHistogramAllocator(
883       std::make_unique<SharedPersistentMemoryAllocator>(
884           std::move(shm), 0, StringPiece(), /*readonly=*/false))));
885 }
886 
887 // static
Set(std::unique_ptr<GlobalHistogramAllocator> allocator)888 void GlobalHistogramAllocator::Set(
889     std::unique_ptr<GlobalHistogramAllocator> allocator) {
890   // Releasing or changing an allocator is extremely dangerous because it
891   // likely has histograms stored within it. If the backing memory is also
892   // also released, future accesses to those histograms will seg-fault.
893   CHECK(!subtle::NoBarrier_Load(&g_histogram_allocator));
894   subtle::Release_Store(&g_histogram_allocator,
895                         reinterpret_cast<uintptr_t>(allocator.release()));
896   size_t existing = StatisticsRecorder::GetHistogramCount();
897 
898   DVLOG_IF(1, existing)
899       << existing << " histograms were created before persistence was enabled.";
900 }
901 
902 // static
Get()903 GlobalHistogramAllocator* GlobalHistogramAllocator::Get() {
904   return reinterpret_cast<GlobalHistogramAllocator*>(
905       subtle::Acquire_Load(&g_histogram_allocator));
906 }
907 
908 // static
909 std::unique_ptr<GlobalHistogramAllocator>
ReleaseForTesting()910 GlobalHistogramAllocator::ReleaseForTesting() {
911   GlobalHistogramAllocator* histogram_allocator = Get();
912   if (!histogram_allocator)
913     return nullptr;
914   PersistentMemoryAllocator* memory_allocator =
915       histogram_allocator->memory_allocator();
916 
917   // Before releasing the memory, it's necessary to have the Statistics-
918   // Recorder forget about the histograms contained therein; otherwise,
919   // some operations will try to access them and the released memory.
920   PersistentMemoryAllocator::Iterator iter(memory_allocator);
921   const PersistentHistogramData* data;
922   while ((data = iter.GetNextOfObject<PersistentHistogramData>()) != nullptr) {
923     StatisticsRecorder::ForgetHistogramForTesting(data->name);
924   }
925 
926   subtle::Release_Store(&g_histogram_allocator, 0);
927   return WrapUnique(histogram_allocator);
928 };
929 
SetPersistentLocation(const FilePath & location)930 void GlobalHistogramAllocator::SetPersistentLocation(const FilePath& location) {
931   persistent_location_ = location;
932 }
933 
GetPersistentLocation() const934 const FilePath& GlobalHistogramAllocator::GetPersistentLocation() const {
935   return persistent_location_;
936 }
937 
WriteToPersistentLocation()938 bool GlobalHistogramAllocator::WriteToPersistentLocation() {
939 #if defined(OS_NACL)
940   // NACL doesn't support file operations, including ImportantFileWriter.
941   NOTREACHED();
942   return false;
943 #else
944   // Stop if no destination is set.
945   if (persistent_location_.empty()) {
946     NOTREACHED() << "Could not write \"" << Name() << "\" persistent histograms"
947                  << " to file because no location was set.";
948     return false;
949   }
950 
951   StringPiece contents(static_cast<const char*>(data()), used());
952   if (!ImportantFileWriter::WriteFileAtomically(persistent_location_,
953                                                 contents)) {
954     LOG(ERROR) << "Could not write \"" << Name() << "\" persistent histograms"
955                << " to file: " << persistent_location_.value();
956     return false;
957   }
958 
959   return true;
960 #endif
961 }
962 
DeletePersistentLocation()963 void GlobalHistogramAllocator::DeletePersistentLocation() {
964   memory_allocator()->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
965 
966 #if defined(OS_NACL)
967   NOTREACHED();
968 #else
969   if (persistent_location_.empty())
970     return;
971 
972   // Open (with delete) and then immediately close the file by going out of
973   // scope. This is the only cross-platform safe way to delete a file that may
974   // be open elsewhere. Open handles will continue to operate normally but
975   // new opens will not be possible.
976   File file(persistent_location_,
977             File::FLAG_OPEN | File::FLAG_READ | File::FLAG_DELETE_ON_CLOSE);
978 #endif
979 }
980 
GlobalHistogramAllocator(std::unique_ptr<PersistentMemoryAllocator> memory)981 GlobalHistogramAllocator::GlobalHistogramAllocator(
982     std::unique_ptr<PersistentMemoryAllocator> memory)
983     : PersistentHistogramAllocator(std::move(memory)),
984       import_iterator_(this) {
985 }
986 
ImportHistogramsToStatisticsRecorder()987 void GlobalHistogramAllocator::ImportHistogramsToStatisticsRecorder() {
988   // Skip the import if it's the histogram that was last created. Should a
989   // race condition cause the "last created" to be overwritten before it
990   // is recognized here then the histogram will be created and be ignored
991   // when it is detected as a duplicate by the statistics-recorder. This
992   // simple check reduces the time of creating persistent histograms by
993   // about 40%.
994   Reference record_to_ignore = last_created();
995 
996   // There is no lock on this because the iterator is lock-free while still
997   // guaranteed to only return each entry only once. The StatisticsRecorder
998   // has its own lock so the Register operation is safe.
999   while (true) {
1000     std::unique_ptr<HistogramBase> histogram =
1001         import_iterator_.GetNextWithIgnore(record_to_ignore);
1002     if (!histogram)
1003       break;
1004     StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
1005   }
1006 }
1007 
1008 }  // namespace base
1009