1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define STATSD_DEBUG false // STOPSHIP if true
18 #include "Log.h"
19
20 #include "GaugeMetricProducer.h"
21
22 #include "guardrail/StatsdStats.h"
23 #include "metrics/parsing_utils/metrics_manager_util.h"
24 #include "stats_log_util.h"
25
26 using android::util::FIELD_COUNT_REPEATED;
27 using android::util::FIELD_TYPE_BOOL;
28 using android::util::FIELD_TYPE_FLOAT;
29 using android::util::FIELD_TYPE_INT32;
30 using android::util::FIELD_TYPE_INT64;
31 using android::util::FIELD_TYPE_MESSAGE;
32 using android::util::FIELD_TYPE_STRING;
33 using android::util::ProtoOutputStream;
34 using std::map;
35 using std::string;
36 using std::unordered_map;
37 using std::vector;
38 using std::make_shared;
39 using std::shared_ptr;
40
41 namespace android {
42 namespace os {
43 namespace statsd {
44
45 // for StatsLogReport
46 const int FIELD_ID_ID = 1;
47 const int FIELD_ID_GAUGE_METRICS = 8;
48 const int FIELD_ID_TIME_BASE = 9;
49 const int FIELD_ID_BUCKET_SIZE = 10;
50 const int FIELD_ID_DIMENSION_PATH_IN_WHAT = 11;
51 const int FIELD_ID_IS_ACTIVE = 14;
52 const int FIELD_ID_DIMENSION_GUARDRAIL_HIT = 17;
53 const int FIELD_ID_ESTIMATED_MEMORY_BYTES = 18;
54 // for GaugeMetricDataWrapper
55 const int FIELD_ID_DATA = 1;
56 const int FIELD_ID_SKIPPED = 2;
57 // for SkippedBuckets
58 const int FIELD_ID_SKIPPED_START_MILLIS = 3;
59 const int FIELD_ID_SKIPPED_END_MILLIS = 4;
60 const int FIELD_ID_SKIPPED_DROP_EVENT = 5;
61 // for DumpEvent Proto
62 const int FIELD_ID_BUCKET_DROP_REASON = 1;
63 const int FIELD_ID_DROP_TIME = 2;
64 // for GaugeMetricData
65 const int FIELD_ID_DIMENSION_IN_WHAT = 1;
66 const int FIELD_ID_BUCKET_INFO = 3;
67 const int FIELD_ID_DIMENSION_LEAF_IN_WHAT = 4;
68 // for GaugeBucketInfo
69 const int FIELD_ID_BUCKET_NUM = 6;
70 const int FIELD_ID_START_BUCKET_ELAPSED_MILLIS = 7;
71 const int FIELD_ID_END_BUCKET_ELAPSED_MILLIS = 8;
72 const int FIELD_ID_AGGREGATED_ATOM = 9;
73 // for AggregatedAtomInfo
74 const int FIELD_ID_ATOM_VALUE = 1;
75 const int FIELD_ID_ATOM_TIMESTAMPS = 2;
76
GaugeMetricProducer(const ConfigKey & key,const GaugeMetric & metric,const int conditionIndex,const vector<ConditionState> & initialConditionCache,const sp<ConditionWizard> & wizard,const uint64_t protoHash,const int whatMatcherIndex,const sp<EventMatcherWizard> & matcherWizard,const int pullTagId,const int triggerAtomId,const int atomId,const int64_t timeBaseNs,const int64_t startTimeNs,const sp<StatsPullerManager> & pullerManager,const wp<ConfigMetadataProvider> configMetadataProvider,const unordered_map<int,shared_ptr<Activation>> & eventActivationMap,const unordered_map<int,vector<shared_ptr<Activation>>> & eventDeactivationMap,const size_t dimensionSoftLimit,const size_t dimensionHardLimit)77 GaugeMetricProducer::GaugeMetricProducer(
78 const ConfigKey& key, const GaugeMetric& metric, const int conditionIndex,
79 const vector<ConditionState>& initialConditionCache, const sp<ConditionWizard>& wizard,
80 const uint64_t protoHash, const int whatMatcherIndex,
81 const sp<EventMatcherWizard>& matcherWizard, const int pullTagId, const int triggerAtomId,
82 const int atomId, const int64_t timeBaseNs, const int64_t startTimeNs,
83 const sp<StatsPullerManager>& pullerManager,
84 const wp<ConfigMetadataProvider> configMetadataProvider,
85 const unordered_map<int, shared_ptr<Activation>>& eventActivationMap,
86 const unordered_map<int, vector<shared_ptr<Activation>>>& eventDeactivationMap,
87 const size_t dimensionSoftLimit, const size_t dimensionHardLimit)
88 : MetricProducer(metric.id(), key, timeBaseNs, conditionIndex, initialConditionCache, wizard,
89 protoHash, eventActivationMap, eventDeactivationMap, /*slicedStateAtoms=*/{},
90 /*stateGroupMap=*/{}, getAppUpgradeBucketSplit(metric),
91 configMetadataProvider),
92 mWhatMatcherIndex(whatMatcherIndex),
93 mEventMatcherWizard(matcherWizard),
94 mPullerManager(pullerManager),
95 mPullTagId(pullTagId),
96 mTriggerAtomId(triggerAtomId),
97 mAtomId(atomId),
98 mIsPulled(pullTagId != -1),
99 mMinBucketSizeNs(metric.min_bucket_size_nanos()),
100 mSamplingType(metric.sampling_type()),
101 mMaxPullDelayNs(metric.max_pull_delay_sec() > 0 ? metric.max_pull_delay_sec() * NS_PER_SEC
102 : StatsdStats::kPullMaxDelayNs),
103 mDimensionSoftLimit(dimensionSoftLimit),
104 mDimensionHardLimit(dimensionHardLimit),
105 mGaugeAtomsPerDimensionLimit(metric.max_num_gauge_atoms_per_bucket()),
106 mDimensionGuardrailHit(false),
107 mSamplingPercentage(metric.sampling_percentage()),
108 mPullProbability(metric.pull_probability()) {
109 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>();
110 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>();
111 int64_t bucketSizeMills = 0;
112 if (metric.has_bucket()) {
113 bucketSizeMills = TimeUnitToBucketSizeInMillisGuardrailed(key.GetUid(), metric.bucket());
114 } else {
115 bucketSizeMills = TimeUnitToBucketSizeInMillis(ONE_HOUR);
116 }
117 mBucketSizeNs = bucketSizeMills * 1000000;
118
119 if (!metric.gauge_fields_filter().include_all()) {
120 translateFieldMatcher(metric.gauge_fields_filter().fields(), &mFieldMatchers);
121 }
122
123 if (metric.has_dimensions_in_what()) {
124 translateFieldMatcher(metric.dimensions_in_what(), &mDimensionsInWhat);
125 mContainANYPositionInDimensionsInWhat = HasPositionANY(metric.dimensions_in_what());
126 }
127
128 if (metric.links().size() > 0) {
129 for (const auto& link : metric.links()) {
130 Metric2Condition mc;
131 mc.conditionId = link.condition();
132 translateFieldMatcher(link.fields_in_what(), &mc.metricFields);
133 translateFieldMatcher(link.fields_in_condition(), &mc.conditionFields);
134 mMetric2ConditionLinks.push_back(mc);
135 }
136 mConditionSliced = true;
137 }
138 mShouldUseNestedDimensions = ShouldUseNestedDimensions(metric.dimensions_in_what());
139
140 flushIfNeededLocked(startTimeNs);
141 // Kicks off the puller immediately.
142 if (mIsPulled && isRandomNSamples()) {
143 mPullerManager->RegisterReceiver(mPullTagId, mConfigKey, this, getCurrentBucketEndTimeNs(),
144 mBucketSizeNs);
145 }
146
147 // Adjust start for partial first bucket and then pull if needed
148 mCurrentBucketStartTimeNs = startTimeNs;
149
150 VLOG("Gauge metric %lld created. bucket size %lld start_time: %lld sliced %d",
151 (long long)mMetricId, (long long)mBucketSizeNs, (long long)mTimeBaseNs, mConditionSliced);
152 }
153
~GaugeMetricProducer()154 GaugeMetricProducer::~GaugeMetricProducer() {
155 VLOG("~GaugeMetricProducer() called");
156 if (mIsPulled && isRandomNSamples()) {
157 mPullerManager->UnRegisterReceiver(mPullTagId, mConfigKey, this);
158 }
159 }
160
onConfigUpdatedLocked(const StatsdConfig & config,const int configIndex,const int metricIndex,const vector<sp<AtomMatchingTracker>> & allAtomMatchingTrackers,const unordered_map<int64_t,int> & oldAtomMatchingTrackerMap,const unordered_map<int64_t,int> & newAtomMatchingTrackerMap,const sp<EventMatcherWizard> & matcherWizard,const vector<sp<ConditionTracker>> & allConditionTrackers,const unordered_map<int64_t,int> & conditionTrackerMap,const sp<ConditionWizard> & wizard,const unordered_map<int64_t,int> & metricToActivationMap,unordered_map<int,vector<int>> & trackerToMetricMap,unordered_map<int,vector<int>> & conditionToMetricMap,unordered_map<int,vector<int>> & activationAtomTrackerToMetricMap,unordered_map<int,vector<int>> & deactivationAtomTrackerToMetricMap,vector<int> & metricsWithActivation)161 optional<InvalidConfigReason> GaugeMetricProducer::onConfigUpdatedLocked(
162 const StatsdConfig& config, const int configIndex, const int metricIndex,
163 const vector<sp<AtomMatchingTracker>>& allAtomMatchingTrackers,
164 const unordered_map<int64_t, int>& oldAtomMatchingTrackerMap,
165 const unordered_map<int64_t, int>& newAtomMatchingTrackerMap,
166 const sp<EventMatcherWizard>& matcherWizard,
167 const vector<sp<ConditionTracker>>& allConditionTrackers,
168 const unordered_map<int64_t, int>& conditionTrackerMap, const sp<ConditionWizard>& wizard,
169 const unordered_map<int64_t, int>& metricToActivationMap,
170 unordered_map<int, vector<int>>& trackerToMetricMap,
171 unordered_map<int, vector<int>>& conditionToMetricMap,
172 unordered_map<int, vector<int>>& activationAtomTrackerToMetricMap,
173 unordered_map<int, vector<int>>& deactivationAtomTrackerToMetricMap,
174 vector<int>& metricsWithActivation) {
175 optional<InvalidConfigReason> invalidConfigReason = MetricProducer::onConfigUpdatedLocked(
176 config, configIndex, metricIndex, allAtomMatchingTrackers, oldAtomMatchingTrackerMap,
177 newAtomMatchingTrackerMap, matcherWizard, allConditionTrackers, conditionTrackerMap,
178 wizard, metricToActivationMap, trackerToMetricMap, conditionToMetricMap,
179 activationAtomTrackerToMetricMap, deactivationAtomTrackerToMetricMap,
180 metricsWithActivation);
181 if (invalidConfigReason.has_value()) {
182 return invalidConfigReason;
183 }
184
185 const GaugeMetric& metric = config.gauge_metric(configIndex);
186 // Update appropriate indices: mWhatMatcherIndex, mConditionIndex and MetricsManager maps.
187 invalidConfigReason = handleMetricWithAtomMatchingTrackers(
188 metric.what(), mMetricId, metricIndex, /*enforceOneAtom=*/false,
189 allAtomMatchingTrackers, newAtomMatchingTrackerMap, trackerToMetricMap,
190 mWhatMatcherIndex);
191 if (invalidConfigReason.has_value()) {
192 return invalidConfigReason;
193 }
194
195 // Need to update maps since the index changed, but mTriggerAtomId will not change.
196 int triggerTrackerIndex;
197 if (metric.has_trigger_event()) {
198 invalidConfigReason = handleMetricWithAtomMatchingTrackers(
199 metric.trigger_event(), mMetricId, metricIndex,
200 /*enforceOneAtom=*/true, allAtomMatchingTrackers, newAtomMatchingTrackerMap,
201 trackerToMetricMap, triggerTrackerIndex);
202 if (invalidConfigReason.has_value()) {
203 return invalidConfigReason;
204 }
205 }
206
207 if (metric.has_condition()) {
208 invalidConfigReason = handleMetricWithConditions(
209 metric.condition(), mMetricId, metricIndex, conditionTrackerMap, metric.links(),
210 allConditionTrackers, mConditionTrackerIndex, conditionToMetricMap);
211 if (invalidConfigReason.has_value()) {
212 return invalidConfigReason;
213 }
214 }
215 sp<EventMatcherWizard> tmpEventWizard = mEventMatcherWizard;
216 mEventMatcherWizard = matcherWizard;
217
218 // If this is a config update, we must have just forced a partial bucket. Pull if needed to get
219 // data for the new bucket.
220 if (mCondition == ConditionState::kTrue && mIsActive && mIsPulled && isRandomNSamples()) {
221 pullAndMatchEventsLocked(mCurrentBucketStartTimeNs);
222 }
223 return nullopt;
224 }
225
dumpStatesLocked(int out,bool verbose) const226 void GaugeMetricProducer::dumpStatesLocked(int out, bool verbose) const {
227 if (mCurrentSlicedBucket == nullptr ||
228 mCurrentSlicedBucket->size() == 0) {
229 return;
230 }
231
232 dprintf(out, "GaugeMetric %lld dimension size %lu\n", (long long)mMetricId,
233 (unsigned long)mCurrentSlicedBucket->size());
234 if (verbose) {
235 for (const auto& it : *mCurrentSlicedBucket) {
236 dprintf(out, "\t(what)%s\t(states)%s %d atoms\n",
237 it.first.getDimensionKeyInWhat().toString().c_str(),
238 it.first.getStateValuesKey().toString().c_str(), (int)it.second.size());
239 }
240 }
241 }
242
clearPastBucketsLocked(const int64_t dumpTimeNs)243 void GaugeMetricProducer::clearPastBucketsLocked(const int64_t dumpTimeNs) {
244 flushIfNeededLocked(dumpTimeNs);
245 mPastBuckets.clear();
246 mSkippedBuckets.clear();
247 mTotalDataSize = 0;
248 }
249
onDumpReportLocked(const int64_t dumpTimeNs,const bool include_current_partial_bucket,const bool erase_data,const DumpLatency dumpLatency,std::set<string> * str_set,ProtoOutputStream * protoOutput)250 void GaugeMetricProducer::onDumpReportLocked(const int64_t dumpTimeNs,
251 const bool include_current_partial_bucket,
252 const bool erase_data,
253 const DumpLatency dumpLatency,
254 std::set<string> *str_set,
255 ProtoOutputStream* protoOutput) {
256 VLOG("Gauge metric %lld report now...", (long long)mMetricId);
257 if (include_current_partial_bucket) {
258 flushLocked(dumpTimeNs);
259 } else {
260 flushIfNeededLocked(dumpTimeNs);
261 }
262
263 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ID, (long long)mMetricId);
264 protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_IS_ACTIVE, isActiveLocked());
265
266 if (mPastBuckets.empty() && mSkippedBuckets.empty()) {
267 return;
268 }
269
270 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_ESTIMATED_MEMORY_BYTES,
271 (long long)byteSizeLocked());
272
273 if (mDimensionGuardrailHit) {
274 protoOutput->write(FIELD_TYPE_BOOL | FIELD_ID_DIMENSION_GUARDRAIL_HIT,
275 mDimensionGuardrailHit);
276 }
277
278 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_TIME_BASE, (long long)mTimeBaseNs);
279 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_SIZE, (long long)mBucketSizeNs);
280
281 // Fills the dimension path if not slicing by a primitive repeated field or position ALL.
282 if (!mShouldUseNestedDimensions) {
283 if (!mDimensionsInWhat.empty()) {
284 uint64_t dimenPathToken = protoOutput->start(
285 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_PATH_IN_WHAT);
286 writeDimensionPathToProto(mDimensionsInWhat, protoOutput);
287 protoOutput->end(dimenPathToken);
288 }
289 }
290
291 uint64_t protoToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_GAUGE_METRICS);
292
293 for (const auto& skippedBucket : mSkippedBuckets) {
294 uint64_t wrapperToken =
295 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_SKIPPED);
296 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_START_MILLIS,
297 (long long)(NanoToMillis(skippedBucket.bucketStartTimeNs)));
298 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_SKIPPED_END_MILLIS,
299 (long long)(NanoToMillis(skippedBucket.bucketEndTimeNs)));
300
301 for (const auto& dropEvent : skippedBucket.dropEvents) {
302 uint64_t dropEventToken = protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED |
303 FIELD_ID_SKIPPED_DROP_EVENT);
304 protoOutput->write(FIELD_TYPE_INT32 | FIELD_ID_BUCKET_DROP_REASON, dropEvent.reason);
305 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_DROP_TIME, (long long) (NanoToMillis(dropEvent.dropTimeNs)));
306 protoOutput->end(dropEventToken);
307 }
308 protoOutput->end(wrapperToken);
309 }
310
311 for (const auto& pair : mPastBuckets) {
312 const MetricDimensionKey& dimensionKey = pair.first;
313
314 VLOG("Gauge dimension key %s", dimensionKey.toString().c_str());
315 uint64_t wrapperToken =
316 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_DATA);
317
318 // First fill dimension.
319 if (mShouldUseNestedDimensions) {
320 uint64_t dimensionToken = protoOutput->start(
321 FIELD_TYPE_MESSAGE | FIELD_ID_DIMENSION_IN_WHAT);
322 writeDimensionToProto(dimensionKey.getDimensionKeyInWhat(), str_set, protoOutput);
323 protoOutput->end(dimensionToken);
324 } else {
325 writeDimensionLeafNodesToProto(dimensionKey.getDimensionKeyInWhat(),
326 FIELD_ID_DIMENSION_LEAF_IN_WHAT, str_set, protoOutput);
327 }
328
329 // Then fill bucket_info (GaugeBucketInfo).
330 for (const auto& bucket : pair.second) {
331 uint64_t bucketInfoToken = protoOutput->start(
332 FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_BUCKET_INFO);
333
334 if (bucket.mBucketEndNs - bucket.mBucketStartNs != mBucketSizeNs) {
335 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_START_BUCKET_ELAPSED_MILLIS,
336 (long long)NanoToMillis(bucket.mBucketStartNs));
337 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_END_BUCKET_ELAPSED_MILLIS,
338 (long long)NanoToMillis(bucket.mBucketEndNs));
339 } else {
340 protoOutput->write(FIELD_TYPE_INT64 | FIELD_ID_BUCKET_NUM,
341 (long long)(getBucketNumFromEndTimeNs(bucket.mBucketEndNs)));
342 }
343
344 if (!bucket.mAggregatedAtoms.empty()) {
345 for (const auto& [atomDimensionKey, elapsedTimestampsNs] :
346 bucket.mAggregatedAtoms) {
347 uint64_t aggregatedAtomToken = protoOutput->start(
348 FIELD_TYPE_MESSAGE | FIELD_COUNT_REPEATED | FIELD_ID_AGGREGATED_ATOM);
349 uint64_t atomToken =
350 protoOutput->start(FIELD_TYPE_MESSAGE | FIELD_ID_ATOM_VALUE);
351 writeFieldValueTreeToStream(mAtomId,
352 atomDimensionKey.getAtomFieldValues().getValues(),
353 protoOutput);
354 protoOutput->end(atomToken);
355 for (int64_t timestampNs : elapsedTimestampsNs) {
356 protoOutput->write(
357 FIELD_TYPE_INT64 | FIELD_COUNT_REPEATED | FIELD_ID_ATOM_TIMESTAMPS,
358 (long long)timestampNs);
359 }
360 protoOutput->end(aggregatedAtomToken);
361 }
362 }
363
364 protoOutput->end(bucketInfoToken);
365 VLOG("Gauge \t bucket [%lld - %lld] includes %d atoms.",
366 (long long)bucket.mBucketStartNs, (long long)bucket.mBucketEndNs,
367 (int)bucket.mAggregatedAtoms.size());
368 }
369 protoOutput->end(wrapperToken);
370 }
371 protoOutput->end(protoToken);
372
373
374 if (erase_data) {
375 mPastBuckets.clear();
376 mSkippedBuckets.clear();
377 mDimensionGuardrailHit = false;
378 mTotalDataSize = 0;
379 }
380 }
381
prepareFirstBucketLocked()382 void GaugeMetricProducer::prepareFirstBucketLocked() {
383 if (mCondition == ConditionState::kTrue && mIsActive && mIsPulled && isRandomNSamples()) {
384 pullAndMatchEventsLocked(mCurrentBucketStartTimeNs);
385 }
386 }
387
388 // Only call if mCondition == ConditionState::kTrue && metric is active.
pullAndMatchEventsLocked(const int64_t timestampNs)389 void GaugeMetricProducer::pullAndMatchEventsLocked(const int64_t timestampNs) {
390 bool triggerPuller = false;
391 switch(mSamplingType) {
392 // When the metric wants to do random sampling and there is already one gauge atom for the
393 // current bucket, do not do it again.
394 case GaugeMetric::RANDOM_ONE_SAMPLE: {
395 triggerPuller = mCurrentSlicedBucket->empty();
396 break;
397 }
398 case GaugeMetric::CONDITION_CHANGE_TO_TRUE:
399 case GaugeMetric::FIRST_N_SAMPLES: {
400 triggerPuller = true;
401 break;
402 }
403 default:
404 break;
405 }
406 if (!triggerPuller || !shouldKeepRandomSample(mPullProbability)) {
407 return;
408 }
409 vector<std::shared_ptr<LogEvent>> allData;
410 if (!mPullerManager->Pull(mPullTagId, mConfigKey, timestampNs, &allData)) {
411 ALOGE("Gauge Stats puller failed for tag: %d at %lld", mPullTagId, (long long)timestampNs);
412 return;
413 }
414 const int64_t pullDelayNs = getElapsedRealtimeNs() - timestampNs;
415 StatsdStats::getInstance().notePullDelay(mPullTagId, pullDelayNs);
416 if (pullDelayNs > mMaxPullDelayNs) {
417 ALOGE("Pull finish too late for atom %d", mPullTagId);
418 StatsdStats::getInstance().notePullExceedMaxDelay(mPullTagId);
419 return;
420 }
421 for (const auto& data : allData) {
422 const auto [matchResult, transformedEvent] =
423 mEventMatcherWizard->matchLogEvent(*data, mWhatMatcherIndex);
424 if (matchResult == MatchingState::kMatched) {
425 LogEvent localCopy = transformedEvent == nullptr ? *data : *transformedEvent;
426 localCopy.setElapsedTimestampNs(timestampNs);
427 onMatchedLogEventLocked(mWhatMatcherIndex, localCopy);
428 }
429 }
430 }
431
onActiveStateChangedLocked(const int64_t eventTimeNs,const bool isActive)432 void GaugeMetricProducer::onActiveStateChangedLocked(const int64_t eventTimeNs,
433 const bool isActive) {
434 MetricProducer::onActiveStateChangedLocked(eventTimeNs, isActive);
435
436 if (ConditionState::kTrue != mCondition) {
437 return;
438 }
439
440 if (isActive && mIsPulled && isRandomNSamples()) {
441 pullAndMatchEventsLocked(eventTimeNs);
442 }
443 }
444
onConditionChangedLocked(const bool conditionMet,const int64_t eventTimeNs)445 void GaugeMetricProducer::onConditionChangedLocked(const bool conditionMet,
446 const int64_t eventTimeNs) {
447 VLOG("GaugeMetric %lld onConditionChanged", (long long)mMetricId);
448
449 mCondition = conditionMet ? ConditionState::kTrue : ConditionState::kFalse;
450 if (!mIsActive) {
451 return;
452 }
453
454 flushIfNeededLocked(eventTimeNs);
455 if (conditionMet && mIsPulled &&
456 (isRandomNSamples() || mSamplingType == GaugeMetric::CONDITION_CHANGE_TO_TRUE)) {
457 pullAndMatchEventsLocked(eventTimeNs);
458 } // else: Push mode. No need to proactively pull the gauge data.
459 }
460
onSlicedConditionMayChangeLocked(bool overallCondition,const int64_t eventTimeNs)461 void GaugeMetricProducer::onSlicedConditionMayChangeLocked(bool overallCondition,
462 const int64_t eventTimeNs) {
463 VLOG("GaugeMetric %lld onSlicedConditionMayChange overall condition %d", (long long)mMetricId,
464 overallCondition);
465 mCondition = overallCondition ? ConditionState::kTrue : ConditionState::kFalse;
466 if (!mIsActive) {
467 return;
468 }
469
470 flushIfNeededLocked(eventTimeNs);
471 // If the condition is sliced, mCondition is true if any of the dimensions is true. And we will
472 // pull for every dimension.
473 if (overallCondition && mIsPulled && mTriggerAtomId == -1) {
474 pullAndMatchEventsLocked(eventTimeNs);
475 } // else: Push mode. No need to proactively pull the gauge data.
476 }
477
getGaugeFields(const LogEvent & event)478 std::shared_ptr<vector<FieldValue>> GaugeMetricProducer::getGaugeFields(const LogEvent& event) {
479 std::shared_ptr<vector<FieldValue>> gaugeFields;
480 if (mFieldMatchers.size() > 0) {
481 gaugeFields = std::make_shared<vector<FieldValue>>();
482 filterGaugeValues(mFieldMatchers, event.getValues(), gaugeFields.get());
483 } else {
484 gaugeFields = std::make_shared<vector<FieldValue>>(event.getValues());
485 }
486 // Trim all dimension fields from output. Dimensions will appear in output report and will
487 // benefit from dictionary encoding. For large pulled atoms, this can give the benefit of
488 // optional repeated field.
489 for (const auto& field : mDimensionsInWhat) {
490 for (auto it = gaugeFields->begin(); it != gaugeFields->end();) {
491 if (it->mField.matches(field)) {
492 it = gaugeFields->erase(it);
493 } else {
494 it++;
495 }
496 }
497 }
498 return gaugeFields;
499 }
500
onDataPulled(const std::vector<std::shared_ptr<LogEvent>> & allData,PullResult pullResult,int64_t originalPullTimeNs)501 void GaugeMetricProducer::onDataPulled(const std::vector<std::shared_ptr<LogEvent>>& allData,
502 PullResult pullResult, int64_t originalPullTimeNs) {
503 std::lock_guard<std::mutex> lock(mMutex);
504 if (pullResult != PullResult::PULL_RESULT_SUCCESS || allData.size() == 0) {
505 return;
506 }
507 const int64_t pullDelayNs = getElapsedRealtimeNs() - originalPullTimeNs;
508 StatsdStats::getInstance().notePullDelay(mPullTagId, pullDelayNs);
509 if (pullDelayNs > mMaxPullDelayNs) {
510 ALOGE("Pull finish too late for atom %d", mPullTagId);
511 StatsdStats::getInstance().notePullExceedMaxDelay(mPullTagId);
512 return;
513 }
514 for (const auto& data : allData) {
515 const auto [matchResult, transformedEvent] =
516 mEventMatcherWizard->matchLogEvent(*data, mWhatMatcherIndex);
517 if (matchResult == MatchingState::kMatched) {
518 onMatchedLogEventLocked(mWhatMatcherIndex,
519 transformedEvent == nullptr ? *data : *transformedEvent);
520 }
521 }
522 }
523
hitGuardRailLocked(const MetricDimensionKey & newKey)524 bool GaugeMetricProducer::hitGuardRailLocked(const MetricDimensionKey& newKey) {
525 if (mCurrentSlicedBucket->find(newKey) != mCurrentSlicedBucket->end()) {
526 return false;
527 }
528 // 1. Report the tuple count if the tuple count > soft limit
529 if (mCurrentSlicedBucket->size() >= mDimensionSoftLimit) {
530 size_t newTupleCount = mCurrentSlicedBucket->size() + 1;
531 StatsdStats::getInstance().noteMetricDimensionSize(mConfigKey, mMetricId, newTupleCount);
532 // 2. Don't add more tuples, we are above the allowed threshold. Drop the data.
533 if (newTupleCount > mDimensionHardLimit) {
534 if (!mHasHitGuardrail) {
535 ALOGE("GaugeMetric %lld dropping data for dimension key %s", (long long)mMetricId,
536 newKey.toString().c_str());
537 mHasHitGuardrail = true;
538 }
539 mDimensionGuardrailHit = true;
540 StatsdStats::getInstance().noteHardDimensionLimitReached(mMetricId);
541 return true;
542 }
543 }
544
545 return false;
546 }
547
onMatchedLogEventInternalLocked(const size_t matcherIndex,const MetricDimensionKey & eventKey,const ConditionKey & conditionKey,bool condition,const LogEvent & event,const map<int,HashableDimensionKey> & statePrimaryKeys)548 void GaugeMetricProducer::onMatchedLogEventInternalLocked(
549 const size_t matcherIndex, const MetricDimensionKey& eventKey,
550 const ConditionKey& conditionKey, bool condition, const LogEvent& event,
551 const map<int, HashableDimensionKey>& statePrimaryKeys) {
552 if (condition == false) {
553 return;
554 }
555
556 if (mPullTagId == -1 && mSamplingPercentage < 100 &&
557 !shouldKeepRandomSample(mSamplingPercentage)) {
558 return;
559 }
560
561 int64_t eventTimeNs = event.GetElapsedTimestampNs();
562 if (eventTimeNs < mCurrentBucketStartTimeNs) {
563 VLOG("Gauge Skip event due to late arrival: %lld vs %lld", (long long)eventTimeNs,
564 (long long)mCurrentBucketStartTimeNs);
565 return;
566 }
567 flushIfNeededLocked(eventTimeNs);
568
569 if (mTriggerAtomId == event.GetTagId()) {
570 // Both Active state and Condition are true here.
571 // Active state being true is checked in onMatchedLogEventLocked.
572 // Condition being true is checked at the start of this method.
573 pullAndMatchEventsLocked(eventTimeNs);
574 return;
575 }
576
577 // When gauge metric wants to randomly sample the output atom, we just simply use the first
578 // gauge in the given bucket.
579 if (mCurrentSlicedBucket->find(eventKey) != mCurrentSlicedBucket->end() &&
580 mSamplingType == GaugeMetric::RANDOM_ONE_SAMPLE) {
581 return;
582 }
583 if (hitGuardRailLocked(eventKey)) {
584 return;
585 }
586 if ((*mCurrentSlicedBucket)[eventKey].size() >= mGaugeAtomsPerDimensionLimit) {
587 return;
588 }
589
590 const int64_t truncatedElapsedTimestampNs = truncateTimestampIfNecessary(event);
591 GaugeAtom gaugeAtom(getGaugeFields(event), truncatedElapsedTimestampNs);
592 (*mCurrentSlicedBucket)[eventKey].push_back(gaugeAtom);
593 // Anomaly detection on gauge metric only works when there is one numeric
594 // field specified.
595 if (mAnomalyTrackers.size() > 0) {
596 if (gaugeAtom.mFields->size() == 1) {
597 const Value& value = gaugeAtom.mFields->begin()->mValue;
598 long gaugeVal = 0;
599 if (value.getType() == INT) {
600 gaugeVal = (long)value.int_value;
601 } else if (value.getType() == LONG) {
602 gaugeVal = value.long_value;
603 }
604 for (auto& tracker : mAnomalyTrackers) {
605 tracker->detectAndDeclareAnomaly(eventTimeNs, mCurrentBucketNum, mMetricId,
606 eventKey, gaugeVal);
607 }
608 }
609 }
610 }
611
updateCurrentSlicedBucketForAnomaly()612 void GaugeMetricProducer::updateCurrentSlicedBucketForAnomaly() {
613 for (const auto& slice : *mCurrentSlicedBucket) {
614 if (slice.second.empty()) {
615 continue;
616 }
617 const Value& value = slice.second.front().mFields->front().mValue;
618 long gaugeVal = 0;
619 if (value.getType() == INT) {
620 gaugeVal = (long)value.int_value;
621 } else if (value.getType() == LONG) {
622 gaugeVal = value.long_value;
623 }
624 (*mCurrentSlicedBucketForAnomaly)[slice.first] = gaugeVal;
625 }
626 }
627
dropDataLocked(const int64_t dropTimeNs)628 void GaugeMetricProducer::dropDataLocked(const int64_t dropTimeNs) {
629 flushIfNeededLocked(dropTimeNs);
630 StatsdStats::getInstance().noteBucketDropped(mMetricId);
631 mPastBuckets.clear();
632 mTotalDataSize = 0;
633 }
634
635 // When a new matched event comes in, we check if event falls into the current
636 // bucket. If not, flush the old counter to past buckets and initialize the new
637 // bucket.
638 // if data is pushed, onMatchedLogEvent will only be called through onConditionChanged() inside
639 // the GaugeMetricProducer while holding the lock.
flushIfNeededLocked(const int64_t eventTimeNs)640 void GaugeMetricProducer::flushIfNeededLocked(const int64_t eventTimeNs) {
641 int64_t currentBucketEndTimeNs = getCurrentBucketEndTimeNs();
642
643 if (eventTimeNs < currentBucketEndTimeNs) {
644 VLOG("Gauge eventTime is %lld, less than next bucket start time %lld",
645 (long long)eventTimeNs, (long long)(mCurrentBucketStartTimeNs + mBucketSizeNs));
646 return;
647 }
648
649 // Adjusts the bucket start and end times.
650 int64_t numBucketsForward = 1 + (eventTimeNs - currentBucketEndTimeNs) / mBucketSizeNs;
651 int64_t nextBucketNs = currentBucketEndTimeNs + (numBucketsForward - 1) * mBucketSizeNs;
652 flushCurrentBucketLocked(eventTimeNs, nextBucketNs);
653
654 mCurrentBucketNum += numBucketsForward;
655 VLOG("Gauge metric %lld: new bucket start time: %lld", (long long)mMetricId,
656 (long long)mCurrentBucketStartTimeNs);
657 }
658
flushCurrentBucketLocked(const int64_t eventTimeNs,const int64_t nextBucketStartTimeNs)659 void GaugeMetricProducer::flushCurrentBucketLocked(const int64_t eventTimeNs,
660 const int64_t nextBucketStartTimeNs) {
661 int64_t fullBucketEndTimeNs = getCurrentBucketEndTimeNs();
662 int64_t bucketEndTime = eventTimeNs < fullBucketEndTimeNs ? eventTimeNs : fullBucketEndTimeNs;
663
664 GaugeBucket info;
665 info.mBucketStartNs = mCurrentBucketStartTimeNs;
666 info.mBucketEndNs = bucketEndTime;
667
668 // Add bucket to mPastBuckets if bucket is large enough.
669 // Otherwise, drop the bucket data and add bucket metadata to mSkippedBuckets.
670 bool isBucketLargeEnough = info.mBucketEndNs - mCurrentBucketStartTimeNs >= mMinBucketSizeNs;
671 if (isBucketLargeEnough) {
672 for (const auto& slice : *mCurrentSlicedBucket) {
673 info.mAggregatedAtoms.clear();
674 for (const GaugeAtom& atom : slice.second) {
675 AtomDimensionKey key(mAtomId, HashableDimensionKey(*atom.mFields));
676 vector<int64_t>& elapsedTimestampsNs = info.mAggregatedAtoms[key];
677 elapsedTimestampsNs.push_back(atom.mElapsedTimestampNs);
678 }
679 auto& bucketList = mPastBuckets[slice.first];
680 const bool isFirstBucket = bucketList.empty();
681 bucketList.push_back(info);
682 mTotalDataSize += computeGaugeBucketSizeLocked(eventTimeNs >= fullBucketEndTimeNs,
683 /*dimKey=*/slice.first, isFirstBucket,
684 info.mAggregatedAtoms);
685 VLOG("Gauge gauge metric %lld, dump key value: %s", (long long)mMetricId,
686 slice.first.toString().c_str());
687 }
688 } else if (mIsActive) {
689 mCurrentSkippedBucket.bucketStartTimeNs = mCurrentBucketStartTimeNs;
690 mCurrentSkippedBucket.bucketEndTimeNs = bucketEndTime;
691 if (!maxDropEventsReached()) {
692 mCurrentSkippedBucket.dropEvents.emplace_back(
693 buildDropEvent(eventTimeNs, BucketDropReason::BUCKET_TOO_SMALL));
694 }
695 mSkippedBuckets.emplace_back(mCurrentSkippedBucket);
696 mTotalDataSize += computeSkippedBucketSizeLocked(mCurrentSkippedBucket);
697 }
698
699 // If we have anomaly trackers, we need to update the partial bucket values.
700 if (mAnomalyTrackers.size() > 0) {
701 updateCurrentSlicedBucketForAnomaly();
702
703 if (eventTimeNs > fullBucketEndTimeNs) {
704 // This is known to be a full bucket, so send this data to the anomaly tracker.
705 for (auto& tracker : mAnomalyTrackers) {
706 tracker->addPastBucket(mCurrentSlicedBucketForAnomaly, mCurrentBucketNum);
707 }
708 mCurrentSlicedBucketForAnomaly = std::make_shared<DimToValMap>();
709 }
710 }
711
712 StatsdStats::getInstance().noteBucketCount(mMetricId);
713 mCurrentSlicedBucket = std::make_shared<DimToGaugeAtomsMap>();
714 mCurrentBucketStartTimeNs = nextBucketStartTimeNs;
715 mCurrentSkippedBucket.reset();
716 // Reset mHasHitGuardrail boolean since bucket was reset
717 mHasHitGuardrail = false;
718 }
719
720 // Estimate for the size of a GaugeBucket.
computeGaugeBucketSizeLocked(const bool isFullBucket,const MetricDimensionKey & dimKey,const bool isFirstBucket,const std::unordered_map<AtomDimensionKey,std::vector<int64_t>> & aggregatedAtoms) const721 size_t GaugeMetricProducer::computeGaugeBucketSizeLocked(
722 const bool isFullBucket, const MetricDimensionKey& dimKey, const bool isFirstBucket,
723 const std::unordered_map<AtomDimensionKey, std::vector<int64_t>>& aggregatedAtoms) const {
724 size_t bucketSize =
725 MetricProducer::computeBucketSizeLocked(isFullBucket, dimKey, isFirstBucket);
726
727 // Gauge Atoms and timestamps
728 for (const auto& pair : aggregatedAtoms) {
729 bucketSize += getFieldValuesSizeV2(pair.first.getAtomFieldValues().getValues());
730 bucketSize += sizeof(int64_t) * pair.second.size();
731 }
732
733 return bucketSize;
734 }
735
byteSizeLocked() const736 size_t GaugeMetricProducer::byteSizeLocked() const {
737 sp<ConfigMetadataProvider> configMetadataProvider = getConfigMetadataProvider();
738 if (configMetadataProvider != nullptr && configMetadataProvider->useV2SoftMemoryCalculation()) {
739 return computeOverheadSizeLocked(!mPastBuckets.empty() || !mSkippedBuckets.empty(),
740 mDimensionGuardrailHit) +
741 mTotalDataSize;
742 }
743 size_t totalSize = 0;
744 for (const auto& pair : mPastBuckets) {
745 for (const auto& bucket : pair.second) {
746 for (const auto& [atomDimensionKey, elapsedTimestampsNs] : bucket.mAggregatedAtoms) {
747 totalSize += sizeof(FieldValue) *
748 atomDimensionKey.getAtomFieldValues().getValues().size();
749 totalSize += sizeof(int64_t) * elapsedTimestampsNs.size();
750 }
751 }
752 }
753 return totalSize;
754 }
755
756 } // namespace statsd
757 } // namespace os
758 } // namespace android
759