1 /* 2 * Copyright (C) 2014 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License 15 */ 16 17 package com.android.server.job; 18 19 import static android.net.NetworkCapabilities.NET_CAPABILITY_TEMPORARILY_NOT_METERED; 20 import static android.net.NetworkCapabilities.TRANSPORT_TEST; 21 22 import static com.android.server.job.JobSchedulerService.sElapsedRealtimeClock; 23 import static com.android.server.job.JobSchedulerService.sSystemClock; 24 25 import android.annotation.NonNull; 26 import android.annotation.Nullable; 27 import android.app.job.JobInfo; 28 import android.app.job.JobWorkItem; 29 import android.content.ComponentName; 30 import android.content.Context; 31 import android.net.NetworkRequest; 32 import android.os.Environment; 33 import android.os.Handler; 34 import android.os.PersistableBundle; 35 import android.os.Process; 36 import android.os.SystemClock; 37 import android.text.TextUtils; 38 import android.text.format.DateUtils; 39 import android.util.ArraySet; 40 import android.util.AtomicFile; 41 import android.util.Pair; 42 import android.util.Slog; 43 import android.util.SparseArray; 44 import android.util.SparseBooleanArray; 45 import android.util.SystemConfigFileCommitEventLogger; 46 import android.util.Xml; 47 48 import com.android.internal.annotations.GuardedBy; 49 import com.android.internal.annotations.VisibleForTesting; 50 import com.android.internal.util.ArrayUtils; 51 import com.android.internal.util.BitUtils; 52 import com.android.modules.expresslog.Histogram; 53 import com.android.modules.utils.TypedXmlPullParser; 54 import com.android.modules.utils.TypedXmlSerializer; 55 import com.android.server.AppSchedulingModuleThread; 56 import com.android.server.IoThread; 57 import com.android.server.job.JobSchedulerInternal.JobStorePersistStats; 58 import com.android.server.job.controllers.JobStatus; 59 60 import org.xmlpull.v1.XmlPullParser; 61 import org.xmlpull.v1.XmlPullParserException; 62 import org.xmlpull.v1.XmlSerializer; 63 64 import java.io.File; 65 import java.io.FileInputStream; 66 import java.io.FileNotFoundException; 67 import java.io.FileOutputStream; 68 import java.io.IOException; 69 import java.io.InputStream; 70 import java.util.ArrayList; 71 import java.util.List; 72 import java.util.Objects; 73 import java.util.Set; 74 import java.util.StringJoiner; 75 import java.util.concurrent.CountDownLatch; 76 import java.util.function.Consumer; 77 import java.util.function.Predicate; 78 import java.util.regex.Pattern; 79 80 /** 81 * Maintains the master list of jobs that the job scheduler is tracking. These jobs are compared by 82 * reference, so none of the functions in this class should make a copy. 83 * Also handles read/write of persisted jobs. 84 * 85 * Note on locking: 86 * All callers to this class must <strong>lock on the class object they are calling</strong>. 87 * This is important b/c {@link com.android.server.job.JobStore.WriteJobsMapToDiskRunnable} 88 * and {@link com.android.server.job.JobStore.ReadJobMapFromDiskRunnable} lock on that 89 * object. 90 * 91 * Test: 92 * atest $ANDROID_BUILD_TOP/frameworks/base/services/tests/servicestests/src/com/android/server/job/JobStoreTest.java 93 */ 94 public final class JobStore { 95 private static final String TAG = "JobStore"; 96 private static final boolean DEBUG = JobSchedulerService.DEBUG; 97 98 /** Threshold to adjust how often we want to write to the db. */ 99 private static final long JOB_PERSIST_DELAY = 2000L; 100 private static final long SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS = 30 * 60_000L; 101 @VisibleForTesting 102 static final String JOB_FILE_SPLIT_PREFIX = "jobs_"; 103 private static final Pattern SPLIT_FILE_PATTERN = 104 Pattern.compile("^" + JOB_FILE_SPLIT_PREFIX + "\\d+.xml$"); 105 private static final int ALL_UIDS = -1; 106 @VisibleForTesting 107 static final int INVALID_UID = -2; 108 109 final Object mLock; 110 final Object mWriteScheduleLock; // used solely for invariants around write scheduling 111 final JobSet mJobSet; // per-caller-uid and per-source-uid tracking 112 final Context mContext; 113 114 // Bookkeeping around incorrect boot-time system clock 115 private final long mXmlTimestamp; 116 private boolean mRtcGood; 117 118 @GuardedBy("mWriteScheduleLock") 119 private boolean mWriteScheduled; 120 121 @GuardedBy("mWriteScheduleLock") 122 private boolean mWriteInProgress; 123 124 @GuardedBy("mWriteScheduleLock") 125 private boolean mSplitFileMigrationNeeded; 126 127 private static final Object sSingletonLock = new Object(); 128 private final SystemConfigFileCommitEventLogger mEventLogger; 129 private final AtomicFile mJobsFile; 130 private final File mJobFileDirectory; 131 private final SparseBooleanArray mPendingJobWriteUids = new SparseBooleanArray(); 132 /** Handler backed by IoThread for writing to disk. */ 133 private final Handler mIoHandler = IoThread.getHandler(); 134 private static JobStore sSingleton; 135 136 private boolean mUseSplitFiles = JobSchedulerService.Constants.DEFAULT_PERSIST_IN_SPLIT_FILES; 137 138 private JobStorePersistStats mPersistInfo = new JobStorePersistStats(); 139 140 /** 141 * Separately updated value of the JobSet size to avoid recalculating it frequently for logging 142 * purposes. Continue to use {@link JobSet#size()} for the up-to-date and accurate value. 143 */ 144 private int mCurrentJobSetSize = 0; 145 private int mScheduledJob30MinHighWaterMark = 0; 146 private static final Histogram sScheduledJob30MinHighWaterMarkLogger = new Histogram( 147 "job_scheduler.value_hist_scheduled_job_30_min_high_water_mark", 148 new Histogram.ScaledRangeOptions(15, 1, 99, 1.5f)); 149 private final Runnable mScheduledJobHighWaterMarkLoggingRunnable = new Runnable() { 150 @Override 151 public void run() { 152 AppSchedulingModuleThread.getHandler().removeCallbacks(this); 153 synchronized (mLock) { 154 sScheduledJob30MinHighWaterMarkLogger.logSample(mScheduledJob30MinHighWaterMark); 155 mScheduledJob30MinHighWaterMark = mJobSet.size(); 156 } 157 // The count doesn't need to be logged at exact times. Logging based on system uptime 158 // should be fine. 159 AppSchedulingModuleThread.getHandler() 160 .postDelayed(this, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 161 } 162 }; 163 164 /** Used by the {@link JobSchedulerService} to instantiate the JobStore. */ get(JobSchedulerService jobManagerService)165 static JobStore get(JobSchedulerService jobManagerService) { 166 synchronized (sSingletonLock) { 167 if (sSingleton == null) { 168 sSingleton = new JobStore(jobManagerService.getContext(), 169 jobManagerService.getLock(), Environment.getDataDirectory()); 170 } 171 return sSingleton; 172 } 173 } 174 175 /** 176 * @return A freshly initialized job store object, with no loaded jobs. 177 */ 178 @VisibleForTesting initAndGetForTesting(Context context, File dataDir)179 public static JobStore initAndGetForTesting(Context context, File dataDir) { 180 JobStore jobStoreUnderTest = new JobStore(context, new Object(), dataDir); 181 jobStoreUnderTest.init(); 182 jobStoreUnderTest.clearForTesting(); 183 return jobStoreUnderTest; 184 } 185 186 /** 187 * Construct the instance of the job store. This results in a blocking read from disk. 188 */ JobStore(Context context, Object lock, File dataDir)189 private JobStore(Context context, Object lock, File dataDir) { 190 mLock = lock; 191 mWriteScheduleLock = new Object(); 192 mContext = context; 193 194 File systemDir = new File(dataDir, "system"); 195 mJobFileDirectory = new File(systemDir, "job"); 196 mJobFileDirectory.mkdirs(); 197 mEventLogger = new SystemConfigFileCommitEventLogger("jobs"); 198 mJobsFile = createJobFile(new File(mJobFileDirectory, "jobs.xml")); 199 200 mJobSet = new JobSet(); 201 202 // If the current RTC is earlier than the timestamp on our persisted jobs file, 203 // we suspect that the RTC is uninitialized and so we cannot draw conclusions 204 // about persisted job scheduling. 205 // 206 // Note that if the persisted jobs file does not exist, we proceed with the 207 // assumption that the RTC is good. This is less work and is safe: if the 208 // clock updates to sanity then we'll be saving the persisted jobs file in that 209 // correct state, which is normal; or we'll wind up writing the jobs file with 210 // an incorrect historical timestamp. That's fine; at worst we'll reboot with 211 // a *correct* timestamp, see a bunch of overdue jobs, and run them; then 212 // settle into normal operation. 213 mXmlTimestamp = mJobsFile.exists() 214 ? mJobsFile.getLastModifiedTime() : mJobFileDirectory.lastModified(); 215 mRtcGood = (sSystemClock.millis() > mXmlTimestamp); 216 217 AppSchedulingModuleThread.getHandler().postDelayed( 218 mScheduledJobHighWaterMarkLoggingRunnable, SCHEDULED_JOB_HIGH_WATER_MARK_PERIOD_MS); 219 } 220 init()221 private void init() { 222 readJobMapFromDisk(mJobSet, mRtcGood); 223 } 224 initAsync(CountDownLatch completionLatch)225 void initAsync(CountDownLatch completionLatch) { 226 mIoHandler.post(new ReadJobMapFromDiskRunnable(mJobSet, mRtcGood, completionLatch)); 227 } 228 createJobFile(String baseName)229 private AtomicFile createJobFile(String baseName) { 230 return createJobFile(new File(mJobFileDirectory, baseName + ".xml")); 231 } 232 createJobFile(File file)233 private AtomicFile createJobFile(File file) { 234 return new AtomicFile(file, mEventLogger); 235 } 236 jobTimesInflatedValid()237 public boolean jobTimesInflatedValid() { 238 return mRtcGood; 239 } 240 clockNowValidToInflate(long now)241 public boolean clockNowValidToInflate(long now) { 242 return now >= mXmlTimestamp; 243 } 244 245 /** 246 * Runs any necessary work asynchronously. If this is called after 247 * {@link #initAsync(CountDownLatch)}, this ensures the given work runs after 248 * the JobStore is initialized. 249 */ runWorkAsync(@onNull Runnable r)250 void runWorkAsync(@NonNull Runnable r) { 251 mIoHandler.post(r); 252 } 253 254 /** 255 * Find all the jobs that were affected by RTC clock uncertainty at boot time. Returns 256 * parallel lists of the existing JobStatus objects and of new, equivalent JobStatus instances 257 * with now-corrected time bounds. 258 */ getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, final ArrayList<JobStatus> toRemove)259 public void getRtcCorrectedJobsLocked(final ArrayList<JobStatus> toAdd, 260 final ArrayList<JobStatus> toRemove) { 261 final long elapsedNow = sElapsedRealtimeClock.millis(); 262 263 // Find the jobs that need to be fixed up, collecting them for post-iteration 264 // replacement with their new versions 265 forEachJob(job -> { 266 final Pair<Long, Long> utcTimes = job.getPersistedUtcTimes(); 267 if (utcTimes != null) { 268 Pair<Long, Long> elapsedRuntimes = 269 convertRtcBoundsToElapsed(utcTimes, elapsedNow); 270 JobStatus newJob = new JobStatus(job, 271 elapsedRuntimes.first, elapsedRuntimes.second, 272 0, 0, job.getLastSuccessfulRunTime(), job.getLastFailedRunTime(), 273 job.getCumulativeExecutionTimeMs()); 274 newJob.prepareLocked(); 275 toAdd.add(newJob); 276 toRemove.add(job); 277 } 278 }); 279 } 280 281 /** 282 * Add a job to the master list, persisting it if necessary. 283 * Similar jobs to the new job will not be removed. 284 * 285 * @param jobStatus Job to add. 286 */ add(JobStatus jobStatus)287 public void add(JobStatus jobStatus) { 288 if (mJobSet.add(jobStatus)) { 289 mCurrentJobSetSize++; 290 maybeUpdateHighWaterMark(); 291 } 292 if (jobStatus.isPersisted()) { 293 mPendingJobWriteUids.put(jobStatus.getUid(), true); 294 maybeWriteStatusToDiskAsync(); 295 } 296 if (DEBUG) { 297 Slog.d(TAG, "Added job status to store: " + jobStatus); 298 } 299 } 300 301 /** 302 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 303 */ 304 @VisibleForTesting addForTesting(JobStatus jobStatus)305 public void addForTesting(JobStatus jobStatus) { 306 if (mJobSet.add(jobStatus)) { 307 mCurrentJobSetSize++; 308 maybeUpdateHighWaterMark(); 309 } 310 if (jobStatus.isPersisted()) { 311 mPendingJobWriteUids.put(jobStatus.getUid(), true); 312 } 313 } 314 containsJob(JobStatus jobStatus)315 boolean containsJob(JobStatus jobStatus) { 316 return mJobSet.contains(jobStatus); 317 } 318 size()319 public int size() { 320 return mJobSet.size(); 321 } 322 getPersistStats()323 public JobStorePersistStats getPersistStats() { 324 return mPersistInfo; 325 } 326 countJobsForUid(int uid)327 public int countJobsForUid(int uid) { 328 return mJobSet.countJobsForUid(uid); 329 } 330 331 /** 332 * Remove the provided job. Will also delete the job if it was persisted. 333 * @param removeFromPersisted If true, the job will be removed from the persisted job list 334 * immediately (if it was persisted). 335 * @return Whether or not the job existed to be removed. 336 */ remove(JobStatus jobStatus, boolean removeFromPersisted)337 public boolean remove(JobStatus jobStatus, boolean removeFromPersisted) { 338 boolean removed = mJobSet.remove(jobStatus); 339 if (!removed) { 340 if (DEBUG) { 341 Slog.d(TAG, "Couldn't remove job: didn't exist: " + jobStatus); 342 } 343 return false; 344 } 345 mCurrentJobSetSize--; 346 if (removeFromPersisted && jobStatus.isPersisted()) { 347 mPendingJobWriteUids.put(jobStatus.getUid(), true); 348 maybeWriteStatusToDiskAsync(); 349 } 350 return removed; 351 } 352 353 /** 354 * Like {@link #remove(JobStatus, boolean)}, but doesn't schedule a disk write. 355 */ 356 @VisibleForTesting removeForTesting(JobStatus jobStatus)357 public void removeForTesting(JobStatus jobStatus) { 358 if (mJobSet.remove(jobStatus)) { 359 mCurrentJobSetSize--; 360 } 361 if (jobStatus.isPersisted()) { 362 mPendingJobWriteUids.put(jobStatus.getUid(), true); 363 } 364 } 365 366 /** 367 * Remove the jobs of users not specified in the keepUserIds. 368 * @param keepUserIds Array of User IDs whose jobs should be kept and not removed. 369 */ removeJobsOfUnlistedUsers(int[] keepUserIds)370 public void removeJobsOfUnlistedUsers(int[] keepUserIds) { 371 mJobSet.removeJobsOfUnlistedUsers(keepUserIds); 372 mCurrentJobSetSize = mJobSet.size(); 373 } 374 375 /** Note a change in the specified JobStatus that necessitates writing job state to disk. */ touchJob(@onNull JobStatus jobStatus)376 void touchJob(@NonNull JobStatus jobStatus) { 377 if (!jobStatus.isPersisted()) { 378 return; 379 } 380 mPendingJobWriteUids.put(jobStatus.getUid(), true); 381 maybeWriteStatusToDiskAsync(); 382 } 383 384 @VisibleForTesting clear()385 public void clear() { 386 mJobSet.clear(); 387 mPendingJobWriteUids.put(ALL_UIDS, true); 388 mCurrentJobSetSize = 0; 389 maybeWriteStatusToDiskAsync(); 390 } 391 392 /** 393 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 394 */ 395 @VisibleForTesting clearForTesting()396 public void clearForTesting() { 397 mJobSet.clear(); 398 mPendingJobWriteUids.put(ALL_UIDS, true); 399 mCurrentJobSetSize = 0; 400 } 401 setUseSplitFiles(boolean useSplitFiles)402 void setUseSplitFiles(boolean useSplitFiles) { 403 synchronized (mLock) { 404 if (mUseSplitFiles != useSplitFiles) { 405 mUseSplitFiles = useSplitFiles; 406 migrateJobFilesAsync(); 407 } 408 } 409 } 410 411 /** 412 * The same as above but does not schedule writing. This makes perf benchmarks more stable. 413 */ 414 @VisibleForTesting setUseSplitFilesForTesting(boolean useSplitFiles)415 public void setUseSplitFilesForTesting(boolean useSplitFiles) { 416 final boolean changed; 417 synchronized (mLock) { 418 changed = mUseSplitFiles != useSplitFiles; 419 if (changed) { 420 mUseSplitFiles = useSplitFiles; 421 mPendingJobWriteUids.put(ALL_UIDS, true); 422 } 423 } 424 if (changed) { 425 synchronized (mWriteScheduleLock) { 426 mSplitFileMigrationNeeded = true; 427 } 428 } 429 } 430 431 /** 432 * @param sourceUid Uid of the source app. 433 * @return A list of all the jobs scheduled for the source app. Never null. 434 */ 435 @NonNull getJobsBySourceUid(int sourceUid)436 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 437 return mJobSet.getJobsBySourceUid(sourceUid); 438 } 439 getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto)440 public void getJobsBySourceUid(int sourceUid, @NonNull Set<JobStatus> insertInto) { 441 mJobSet.getJobsBySourceUid(sourceUid, insertInto); 442 } 443 444 /** 445 * @param uid Uid of the requesting app. 446 * @return All JobStatus objects for a given uid from the master list. Never null. 447 */ 448 @NonNull getJobsByUid(int uid)449 public ArraySet<JobStatus> getJobsByUid(int uid) { 450 return mJobSet.getJobsByUid(uid); 451 } 452 getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto)453 public void getJobsByUid(int uid, @NonNull Set<JobStatus> insertInto) { 454 mJobSet.getJobsByUid(uid, insertInto); 455 } 456 457 /** 458 * @param uid Uid of the requesting app. 459 * @param jobId Job id, specified at schedule-time. 460 * @return the JobStatus that matches the provided uId and jobId, or null if none found. 461 */ 462 @Nullable getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId)463 public JobStatus getJobByUidAndJobId(int uid, @Nullable String namespace, int jobId) { 464 return mJobSet.get(uid, namespace, jobId); 465 } 466 467 /** 468 * Iterate over the set of all jobs, invoking the supplied functor on each. This is for 469 * customers who need to examine each job; we'd much rather not have to generate 470 * transient unified collections for them to iterate over and then discard, or creating 471 * iterators every time a client needs to perform a sweep. 472 */ forEachJob(Consumer<JobStatus> functor)473 public void forEachJob(Consumer<JobStatus> functor) { 474 mJobSet.forEachJob(null, functor); 475 } 476 forEachJob(@ullable Predicate<JobStatus> filterPredicate, Consumer<JobStatus> functor)477 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 478 Consumer<JobStatus> functor) { 479 mJobSet.forEachJob(filterPredicate, functor); 480 } 481 forEachJob(int uid, Consumer<JobStatus> functor)482 public void forEachJob(int uid, Consumer<JobStatus> functor) { 483 mJobSet.forEachJob(uid, functor); 484 } 485 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)486 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 487 mJobSet.forEachJobForSourceUid(sourceUid, functor); 488 } 489 maybeUpdateHighWaterMark()490 private void maybeUpdateHighWaterMark() { 491 if (mScheduledJob30MinHighWaterMark < mCurrentJobSetSize) { 492 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 493 } 494 } 495 496 /** Version of the db schema. */ 497 private static final int JOBS_FILE_VERSION = 1; 498 /** 499 * For legacy reasons, this tag is used to encapsulate the entire job list. 500 */ 501 private static final String XML_TAG_JOB_INFO = "job-info"; 502 /** 503 * For legacy reasons, this tag represents a single {@link JobStatus} object. 504 */ 505 private static final String XML_TAG_JOB = "job"; 506 /** Tag corresponds to constraints this job needs. */ 507 private static final String XML_TAG_PARAMS_CONSTRAINTS = "constraints"; 508 /** Tag corresponds to execution parameters. */ 509 private static final String XML_TAG_PERIODIC = "periodic"; 510 private static final String XML_TAG_ONEOFF = "one-off"; 511 private static final String XML_TAG_EXTRAS = "extras"; 512 private static final String XML_TAG_JOB_WORK_ITEM = "job-work-item"; 513 private static final String XML_TAG_DEBUG_INFO = "debug-info"; 514 private static final String XML_TAG_DEBUG_TAG = "debug-tag"; 515 migrateJobFilesAsync()516 private void migrateJobFilesAsync() { 517 synchronized (mLock) { 518 mPendingJobWriteUids.put(ALL_UIDS, true); 519 } 520 synchronized (mWriteScheduleLock) { 521 mSplitFileMigrationNeeded = true; 522 maybeWriteStatusToDiskAsync(); 523 } 524 } 525 526 /** 527 * Every time the state changes we write all the jobs in one swath, instead of trying to 528 * track incremental changes. 529 */ maybeWriteStatusToDiskAsync()530 private void maybeWriteStatusToDiskAsync() { 531 synchronized (mWriteScheduleLock) { 532 if (!mWriteScheduled) { 533 if (DEBUG) { 534 Slog.v(TAG, "Scheduling persist of jobs to disk."); 535 } 536 mIoHandler.postDelayed(mWriteRunnable, JOB_PERSIST_DELAY); 537 mWriteScheduled = true; 538 } 539 } 540 } 541 542 @VisibleForTesting readJobMapFromDisk(JobSet jobSet, boolean rtcGood)543 public void readJobMapFromDisk(JobSet jobSet, boolean rtcGood) { 544 new ReadJobMapFromDiskRunnable(jobSet, rtcGood).run(); 545 } 546 547 /** Write persisted JobStore state to disk synchronously. Should only be used for testing. */ 548 @VisibleForTesting writeStatusToDiskForTesting()549 public void writeStatusToDiskForTesting() { 550 synchronized (mWriteScheduleLock) { 551 if (mWriteScheduled) { 552 throw new IllegalStateException("An asynchronous write is already scheduled."); 553 } 554 555 mWriteScheduled = true; 556 mWriteRunnable.run(); 557 } 558 } 559 560 /** 561 * Wait for any pending write to the persistent store to clear 562 * @param maxWaitMillis Maximum time from present to wait 563 * @return {@code true} if I/O cleared as expected, {@code false} if the wait 564 * timed out before the pending write completed. 565 */ 566 @VisibleForTesting waitForWriteToCompleteForTesting(long maxWaitMillis)567 public boolean waitForWriteToCompleteForTesting(long maxWaitMillis) { 568 final long start = SystemClock.uptimeMillis(); 569 final long end = start + maxWaitMillis; 570 synchronized (mWriteScheduleLock) { 571 while (mWriteScheduled || mWriteInProgress) { 572 final long now = SystemClock.uptimeMillis(); 573 if (now >= end) { 574 // still not done and we've hit the end; failure 575 return false; 576 } 577 try { 578 mWriteScheduleLock.wait(now - start + maxWaitMillis); 579 } catch (InterruptedException e) { 580 // Spurious; keep waiting 581 break; 582 } 583 } 584 } 585 return true; 586 } 587 588 /** 589 * Returns a single string representation of the contents of the specified intArray. 590 * If the intArray is [1, 2, 4] as the input, the return result will be the string "1,2,4". 591 */ 592 @VisibleForTesting intArrayToString(int[] values)593 static String intArrayToString(int[] values) { 594 final StringJoiner sj = new StringJoiner(","); 595 for (final int value : values) { 596 sj.add(String.valueOf(value)); 597 } 598 return sj.toString(); 599 } 600 601 602 /** 603 * Converts a string containing a comma-separated list of decimal representations 604 * of ints into an array of int. If the string is not correctly formatted, 605 * or if any value doesn't fit into an int, NumberFormatException is thrown. 606 */ 607 @VisibleForTesting stringToIntArray(String str)608 static int[] stringToIntArray(String str) { 609 if (TextUtils.isEmpty(str)) return new int[0]; 610 final String[] arr = str.split(","); 611 final int[] values = new int[arr.length]; 612 for (int i = 0; i < arr.length; i++) { 613 values[i] = Integer.parseInt(arr[i]); 614 } 615 return values; 616 } 617 618 @VisibleForTesting extractUidFromJobFileName(@onNull File file)619 static int extractUidFromJobFileName(@NonNull File file) { 620 final String fileName = file.getName(); 621 if (fileName.startsWith(JOB_FILE_SPLIT_PREFIX)) { 622 try { 623 final int subEnd = fileName.length() - 4; // -4 for ".xml" 624 final int uid = Integer.parseInt( 625 fileName.substring(JOB_FILE_SPLIT_PREFIX.length(), subEnd)); 626 if (uid < 0) { 627 return INVALID_UID; 628 } 629 return uid; 630 } catch (Exception e) { 631 Slog.e(TAG, "Unexpected file name format", e); 632 } 633 } 634 return INVALID_UID; 635 } 636 637 /** 638 * Runnable that writes {@link #mJobSet} out to xml. 639 * NOTE: This Runnable locks on mLock 640 */ 641 private final Runnable mWriteRunnable = new Runnable() { 642 private final SparseArray<AtomicFile> mJobFiles = new SparseArray<>(); 643 private final CopyConsumer mPersistedJobCopier = new CopyConsumer(); 644 645 class CopyConsumer implements Consumer<JobStatus> { 646 private final SparseArray<List<JobStatus>> mJobStoreCopy = new SparseArray<>(); 647 private boolean mCopyAllJobs; 648 649 private void prepare() { 650 mCopyAllJobs = !mUseSplitFiles || mPendingJobWriteUids.get(ALL_UIDS); 651 if (mUseSplitFiles) { 652 // Put the set of changed UIDs in the copy list so that we update each file, 653 // especially if we've dropped all jobs for that UID. 654 if (mPendingJobWriteUids.get(ALL_UIDS)) { 655 // ALL_UIDS is only used when we switch file splitting policy or for tests, 656 // so going through the file list here shouldn't be 657 // a large performance hit on user devices. 658 659 final File[] files; 660 try { 661 files = mJobFileDirectory.listFiles(); 662 } catch (SecurityException e) { 663 Slog.wtf(TAG, "Not allowed to read job file directory", e); 664 return; 665 } 666 if (files == null) { 667 Slog.wtfStack(TAG, "Couldn't get job file list"); 668 } else { 669 for (File file : files) { 670 final int uid = extractUidFromJobFileName(file); 671 if (uid != INVALID_UID) { 672 mJobStoreCopy.put(uid, new ArrayList<>()); 673 } 674 } 675 } 676 } else { 677 for (int i = 0; i < mPendingJobWriteUids.size(); ++i) { 678 mJobStoreCopy.put(mPendingJobWriteUids.keyAt(i), new ArrayList<>()); 679 } 680 } 681 } else { 682 // Single file mode. 683 // Put the catchall UID in the copy list so that we update the single file, 684 // especially if we've dropped all persisted jobs. 685 mJobStoreCopy.put(ALL_UIDS, new ArrayList<>()); 686 } 687 } 688 689 @Override 690 public void accept(JobStatus jobStatus) { 691 final int uid = mUseSplitFiles ? jobStatus.getUid() : ALL_UIDS; 692 if (jobStatus.isPersisted() && (mCopyAllJobs || mPendingJobWriteUids.get(uid))) { 693 List<JobStatus> uidJobList = mJobStoreCopy.get(uid); 694 if (uidJobList == null) { 695 uidJobList = new ArrayList<>(); 696 mJobStoreCopy.put(uid, uidJobList); 697 } 698 uidJobList.add(new JobStatus(jobStatus)); 699 } 700 } 701 702 private void reset() { 703 mJobStoreCopy.clear(); 704 } 705 } 706 707 @Override 708 public void run() { 709 final long startElapsed = sElapsedRealtimeClock.millis(); 710 // Intentionally allow new scheduling of a write operation *before* we clone 711 // the job set. If we reset it to false after cloning, there's a window in 712 // which no new write will be scheduled but mLock is not held, i.e. a new 713 // job might appear and fail to be recognized as needing a persist. The 714 // potential cost is one redundant write of an identical set of jobs in the 715 // rare case of that specific race, but by doing it this way we avoid quite 716 // a bit of lock contention. 717 synchronized (mWriteScheduleLock) { 718 mWriteScheduled = false; 719 if (mWriteInProgress) { 720 // Another runnable is currently writing. Postpone this new write task. 721 maybeWriteStatusToDiskAsync(); 722 return; 723 } 724 mWriteInProgress = true; 725 } 726 final boolean useSplitFiles; 727 synchronized (mLock) { 728 // Clone the jobs so we can release the lock before writing. 729 useSplitFiles = mUseSplitFiles; 730 mPersistedJobCopier.prepare(); 731 mJobSet.forEachJob(null, mPersistedJobCopier); 732 mPendingJobWriteUids.clear(); 733 } 734 mPersistInfo.countAllJobsSaved = 0; 735 mPersistInfo.countSystemServerJobsSaved = 0; 736 mPersistInfo.countSystemSyncManagerJobsSaved = 0; 737 for (int i = mPersistedJobCopier.mJobStoreCopy.size() - 1; i >= 0; --i) { 738 AtomicFile file; 739 if (useSplitFiles) { 740 final int uid = mPersistedJobCopier.mJobStoreCopy.keyAt(i); 741 file = mJobFiles.get(uid); 742 if (file == null) { 743 file = createJobFile(JOB_FILE_SPLIT_PREFIX + uid); 744 mJobFiles.put(uid, file); 745 } 746 } else { 747 file = mJobsFile; 748 } 749 if (DEBUG) { 750 Slog.d(TAG, "Writing for " + mPersistedJobCopier.mJobStoreCopy.keyAt(i) 751 + " to " + file.getBaseFile().getName() + ": " 752 + mPersistedJobCopier.mJobStoreCopy.valueAt(i).size() + " jobs"); 753 } 754 writeJobsMapImpl(file, mPersistedJobCopier.mJobStoreCopy.valueAt(i)); 755 } 756 if (DEBUG) { 757 Slog.v(TAG, "Finished writing, took " + (sElapsedRealtimeClock.millis() 758 - startElapsed) + "ms"); 759 } 760 mPersistedJobCopier.reset(); 761 if (!useSplitFiles) { 762 mJobFiles.clear(); 763 } 764 // Update the last modified time of the directory to aid in RTC time verification 765 // (see the JobStore constructor). 766 mJobFileDirectory.setLastModified(sSystemClock.millis()); 767 synchronized (mWriteScheduleLock) { 768 if (mSplitFileMigrationNeeded) { 769 final File[] files = mJobFileDirectory.listFiles(); 770 for (File file : files) { 771 if (useSplitFiles) { 772 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 773 // Delete the now unused file so there's no confusion in the future. 774 file.delete(); 775 } 776 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 777 // Delete the now unused file so there's no confusion in the future. 778 file.delete(); 779 } 780 } 781 } 782 mWriteInProgress = false; 783 mWriteScheduleLock.notifyAll(); 784 } 785 } 786 787 private void writeJobsMapImpl(@NonNull AtomicFile file, @NonNull List<JobStatus> jobList) { 788 int numJobs = 0; 789 int numSystemJobs = 0; 790 int numSyncJobs = 0; 791 mEventLogger.setStartTime(SystemClock.uptimeMillis()); 792 try (FileOutputStream fos = file.startWrite()) { 793 TypedXmlSerializer out = Xml.resolveSerializer(fos); 794 out.startDocument(null, true); 795 out.setFeature("http://xmlpull.org/v1/doc/features.html#indent-output", true); 796 797 out.startTag(null, XML_TAG_JOB_INFO); 798 out.attribute(null, "version", Integer.toString(JOBS_FILE_VERSION)); 799 for (int i=0; i<jobList.size(); i++) { 800 JobStatus jobStatus = jobList.get(i); 801 if (DEBUG) { 802 Slog.d(TAG, "Saving job " + jobStatus.getJobId()); 803 } 804 out.startTag(null, XML_TAG_JOB); 805 addAttributesToJobTag(out, jobStatus); 806 writeConstraintsToXml(out, jobStatus); 807 writeExecutionCriteriaToXml(out, jobStatus); 808 writeBundleToXml(jobStatus.getJob().getExtras(), out); 809 writeJobWorkItemsToXml(out, jobStatus); 810 writeDebugInfoToXml(out, jobStatus); 811 out.endTag(null, XML_TAG_JOB); 812 813 numJobs++; 814 if (jobStatus.getUid() == Process.SYSTEM_UID) { 815 numSystemJobs++; 816 if (isSyncJob(jobStatus)) { 817 numSyncJobs++; 818 } 819 } 820 } 821 out.endTag(null, XML_TAG_JOB_INFO); 822 out.endDocument(); 823 824 file.finishWrite(fos); 825 } catch (IOException e) { 826 if (DEBUG) { 827 Slog.v(TAG, "Error writing out job data.", e); 828 } 829 } catch (XmlPullParserException e) { 830 if (DEBUG) { 831 Slog.d(TAG, "Error persisting bundle.", e); 832 } 833 } finally { 834 mPersistInfo.countAllJobsSaved += numJobs; 835 mPersistInfo.countSystemServerJobsSaved += numSystemJobs; 836 mPersistInfo.countSystemSyncManagerJobsSaved += numSyncJobs; 837 } 838 } 839 840 /** 841 * Write out a tag with data comprising the required fields and bias of this job and 842 * its client. 843 */ 844 private void addAttributesToJobTag(TypedXmlSerializer out, JobStatus jobStatus) 845 throws IOException { 846 out.attribute(null, "jobid", Integer.toString(jobStatus.getJobId())); 847 out.attribute(null, "package", jobStatus.getServiceComponent().getPackageName()); 848 out.attribute(null, "class", jobStatus.getServiceComponent().getClassName()); 849 if (jobStatus.getSourcePackageName() != null) { 850 out.attribute(null, "sourcePackageName", jobStatus.getSourcePackageName()); 851 } 852 if (jobStatus.getNamespace() != null) { 853 out.attribute(null, "namespace", jobStatus.getNamespace()); 854 } 855 if (jobStatus.getSourceTag() != null) { 856 out.attribute(null, "sourceTag", jobStatus.getSourceTag()); 857 } 858 out.attribute(null, "sourceUserId", String.valueOf(jobStatus.getSourceUserId())); 859 out.attribute(null, "uid", Integer.toString(jobStatus.getUid())); 860 out.attribute(null, "bias", String.valueOf(jobStatus.getBias())); 861 out.attribute(null, "priority", String.valueOf(jobStatus.getJob().getPriority())); 862 out.attribute(null, "flags", String.valueOf(jobStatus.getFlags())); 863 if (jobStatus.getInternalFlags() != 0) { 864 out.attribute(null, "internalFlags", String.valueOf(jobStatus.getInternalFlags())); 865 } 866 867 out.attribute(null, "lastSuccessfulRunTime", 868 String.valueOf(jobStatus.getLastSuccessfulRunTime())); 869 out.attribute(null, "lastFailedRunTime", 870 String.valueOf(jobStatus.getLastFailedRunTime())); 871 872 out.attributeLong(null, "cumulativeExecutionTime", 873 jobStatus.getCumulativeExecutionTimeMs()); 874 } 875 876 private void writeBundleToXml(PersistableBundle extras, XmlSerializer out) 877 throws IOException, XmlPullParserException { 878 out.startTag(null, XML_TAG_EXTRAS); 879 PersistableBundle extrasCopy = deepCopyBundle(extras, 10); 880 extrasCopy.saveToXml(out); 881 out.endTag(null, XML_TAG_EXTRAS); 882 } 883 884 private PersistableBundle deepCopyBundle(PersistableBundle bundle, int maxDepth) { 885 if (maxDepth <= 0) { 886 return null; 887 } 888 PersistableBundle copy = (PersistableBundle) bundle.clone(); 889 Set<String> keySet = bundle.keySet(); 890 for (String key: keySet) { 891 Object o = copy.get(key); 892 if (o instanceof PersistableBundle) { 893 PersistableBundle bCopy = deepCopyBundle((PersistableBundle) o, maxDepth-1); 894 copy.putPersistableBundle(key, bCopy); 895 } 896 } 897 return copy; 898 } 899 900 /** 901 * Write out a tag with data identifying this job's constraints. If the constraint isn't here 902 * it doesn't apply. 903 * TODO: b/183455312 Update this code to use proper serialization for NetworkRequest, 904 * because currently store is not including everything (like, UIDs, bandwidth, 905 * signal strength etc. are lost). 906 */ 907 private void writeConstraintsToXml(TypedXmlSerializer out, JobStatus jobStatus) 908 throws IOException { 909 out.startTag(null, XML_TAG_PARAMS_CONSTRAINTS); 910 final JobInfo job = jobStatus.getJob(); 911 if (jobStatus.hasConnectivityConstraint()) { 912 final NetworkRequest network = jobStatus.getJob().getRequiredNetwork(); 913 out.attribute(null, "net-capabilities-csv", intArrayToString( 914 network.getCapabilities())); 915 out.attribute(null, "net-forbidden-capabilities-csv", intArrayToString( 916 network.getForbiddenCapabilities())); 917 out.attribute(null, "net-transport-types-csv", intArrayToString( 918 network.getTransportTypes())); 919 if (job.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 920 out.attributeLong(null, "estimated-download-bytes", 921 job.getEstimatedNetworkDownloadBytes()); 922 } 923 if (job.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 924 out.attributeLong(null, "estimated-upload-bytes", 925 job.getEstimatedNetworkUploadBytes()); 926 } 927 if (job.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 928 out.attributeLong(null, "minimum-network-chunk-bytes", 929 job.getMinimumNetworkChunkBytes()); 930 } 931 } 932 if (job.isRequireDeviceIdle()) { 933 out.attribute(null, "idle", Boolean.toString(true)); 934 } 935 if (job.isRequireCharging()) { 936 out.attribute(null, "charging", Boolean.toString(true)); 937 } 938 if (job.isRequireBatteryNotLow()) { 939 out.attribute(null, "battery-not-low", Boolean.toString(true)); 940 } 941 if (job.isRequireStorageNotLow()) { 942 out.attribute(null, "storage-not-low", Boolean.toString(true)); 943 } 944 out.endTag(null, XML_TAG_PARAMS_CONSTRAINTS); 945 } 946 947 private void writeExecutionCriteriaToXml(XmlSerializer out, JobStatus jobStatus) 948 throws IOException { 949 final JobInfo job = jobStatus.getJob(); 950 if (jobStatus.getJob().isPeriodic()) { 951 out.startTag(null, XML_TAG_PERIODIC); 952 out.attribute(null, "period", Long.toString(job.getIntervalMillis())); 953 out.attribute(null, "flex", Long.toString(job.getFlexMillis())); 954 } else { 955 out.startTag(null, XML_TAG_ONEOFF); 956 } 957 958 // If we still have the persisted times, we need to record those directly because 959 // we haven't yet been able to calculate the usual elapsed-timebase bounds 960 // correctly due to wall-clock uncertainty. 961 Pair <Long, Long> utcJobTimes = jobStatus.getPersistedUtcTimes(); 962 if (DEBUG && utcJobTimes != null) { 963 Slog.i(TAG, "storing original UTC timestamps for " + jobStatus); 964 } 965 966 final long nowRTC = sSystemClock.millis(); 967 final long nowElapsed = sElapsedRealtimeClock.millis(); 968 if (jobStatus.hasDeadlineConstraint()) { 969 // Wall clock deadline. 970 final long deadlineWallclock = (utcJobTimes == null) 971 ? nowRTC + (jobStatus.getLatestRunTimeElapsed() - nowElapsed) 972 : utcJobTimes.second; 973 out.attribute(null, "deadline", Long.toString(deadlineWallclock)); 974 } 975 if (jobStatus.hasTimingDelayConstraint()) { 976 final long delayWallclock = (utcJobTimes == null) 977 ? nowRTC + (jobStatus.getEarliestRunTime() - nowElapsed) 978 : utcJobTimes.first; 979 out.attribute(null, "delay", Long.toString(delayWallclock)); 980 } 981 982 // Only write out back-off policy if it differs from the default. 983 // This also helps the case where the job is idle -> these aren't allowed to specify 984 // back-off. 985 if (jobStatus.getJob().getInitialBackoffMillis() != JobInfo.DEFAULT_INITIAL_BACKOFF_MILLIS 986 || jobStatus.getJob().getBackoffPolicy() != JobInfo.DEFAULT_BACKOFF_POLICY) { 987 out.attribute(null, "backoff-policy", Integer.toString(job.getBackoffPolicy())); 988 out.attribute(null, "initial-backoff", Long.toString(job.getInitialBackoffMillis())); 989 } 990 if (job.isPeriodic()) { 991 out.endTag(null, XML_TAG_PERIODIC); 992 } else { 993 out.endTag(null, XML_TAG_ONEOFF); 994 } 995 } 996 997 private void writeDebugInfoToXml(@NonNull TypedXmlSerializer out, 998 @NonNull JobStatus jobStatus) throws IOException, XmlPullParserException { 999 final ArraySet<String> debugTags = jobStatus.getJob().getDebugTagsArraySet(); 1000 final int numTags = debugTags.size(); 1001 final String traceTag = jobStatus.getJob().getTraceTag(); 1002 if (traceTag == null && numTags == 0) { 1003 return; 1004 } 1005 out.startTag(null, XML_TAG_DEBUG_INFO); 1006 if (traceTag != null) { 1007 out.attribute(null, "trace-tag", traceTag); 1008 } 1009 for (int i = 0; i < numTags; ++i) { 1010 out.startTag(null, XML_TAG_DEBUG_TAG); 1011 out.attribute(null, "tag", debugTags.valueAt(i)); 1012 out.endTag(null, XML_TAG_DEBUG_TAG); 1013 } 1014 out.endTag(null, XML_TAG_DEBUG_INFO); 1015 } 1016 1017 private void writeJobWorkItemsToXml(@NonNull TypedXmlSerializer out, 1018 @NonNull JobStatus jobStatus) throws IOException, XmlPullParserException { 1019 // Write executing first since they're technically at the front of the queue. 1020 writeJobWorkItemListToXml(out, jobStatus.executingWork); 1021 writeJobWorkItemListToXml(out, jobStatus.pendingWork); 1022 } 1023 1024 private void writeJobWorkItemListToXml(@NonNull TypedXmlSerializer out, 1025 @Nullable List<JobWorkItem> jobWorkItems) 1026 throws IOException, XmlPullParserException { 1027 if (jobWorkItems == null) { 1028 return; 1029 } 1030 // Write the items in list order to maintain the enqueue order. 1031 final int size = jobWorkItems.size(); 1032 for (int i = 0; i < size; ++i) { 1033 final JobWorkItem item = jobWorkItems.get(i); 1034 if (item.getGrants() != null) { 1035 // We currently don't allow persisting jobs when grants are involved. 1036 // TODO(256618122): allow persisting JobWorkItems with grant flags 1037 continue; 1038 } 1039 if (item.getIntent() != null) { 1040 // Intent.saveToXml() doesn't persist everything, so we shouldn't attempt to 1041 // persist these JobWorkItems at all. 1042 Slog.wtf(TAG, "Encountered JobWorkItem with Intent in persisting list"); 1043 continue; 1044 } 1045 out.startTag(null, XML_TAG_JOB_WORK_ITEM); 1046 out.attributeInt(null, "delivery-count", item.getDeliveryCount()); 1047 if (item.getEstimatedNetworkDownloadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1048 out.attributeLong(null, "estimated-download-bytes", 1049 item.getEstimatedNetworkDownloadBytes()); 1050 } 1051 if (item.getEstimatedNetworkUploadBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1052 out.attributeLong(null, "estimated-upload-bytes", 1053 item.getEstimatedNetworkUploadBytes()); 1054 } 1055 if (item.getMinimumNetworkChunkBytes() != JobInfo.NETWORK_BYTES_UNKNOWN) { 1056 out.attributeLong(null, "minimum-network-chunk-bytes", 1057 item.getMinimumNetworkChunkBytes()); 1058 } 1059 writeBundleToXml(item.getExtras(), out); 1060 out.endTag(null, XML_TAG_JOB_WORK_ITEM); 1061 } 1062 } 1063 }; 1064 1065 /** 1066 * Translate the supplied RTC times to the elapsed timebase, with clamping appropriate 1067 * to interpreting them as a job's delay + deadline times for alarm-setting purposes. 1068 * @param rtcTimes a Pair<Long, Long> in which {@code first} is the "delay" earliest 1069 * allowable runtime for the job, and {@code second} is the "deadline" time at which 1070 * the job becomes overdue. 1071 */ convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, long nowElapsed)1072 private static Pair<Long, Long> convertRtcBoundsToElapsed(Pair<Long, Long> rtcTimes, 1073 long nowElapsed) { 1074 final long nowWallclock = sSystemClock.millis(); 1075 final long earliest = (rtcTimes.first > JobStatus.NO_EARLIEST_RUNTIME) 1076 ? nowElapsed + Math.max(rtcTimes.first - nowWallclock, 0) 1077 : JobStatus.NO_EARLIEST_RUNTIME; 1078 final long latest = (rtcTimes.second < JobStatus.NO_LATEST_RUNTIME) 1079 ? nowElapsed + Math.max(rtcTimes.second - nowWallclock, 0) 1080 : JobStatus.NO_LATEST_RUNTIME; 1081 return Pair.create(earliest, latest); 1082 } 1083 isSyncJob(JobStatus status)1084 private static boolean isSyncJob(JobStatus status) { 1085 return com.android.server.content.SyncJobService.class.getName() 1086 .equals(status.getServiceComponent().getClassName()); 1087 } 1088 1089 /** 1090 * Runnable that reads list of persisted job from xml. This is run once at start up, so doesn't 1091 * need to go through {@link JobStore#add(com.android.server.job.controllers.JobStatus)}. 1092 */ 1093 private final class ReadJobMapFromDiskRunnable implements Runnable { 1094 private final JobSet jobSet; 1095 private final boolean rtcGood; 1096 private final CountDownLatch mCompletionLatch; 1097 1098 /** 1099 * @param jobSet Reference to the (empty) set of JobStatus objects that back the JobStore, 1100 * so that after disk read we can populate it directly. 1101 */ ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood)1102 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood) { 1103 this(jobSet, rtcIsGood, null); 1104 } 1105 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, @Nullable CountDownLatch completionLatch)1106 ReadJobMapFromDiskRunnable(JobSet jobSet, boolean rtcIsGood, 1107 @Nullable CountDownLatch completionLatch) { 1108 this.jobSet = jobSet; 1109 this.rtcGood = rtcIsGood; 1110 this.mCompletionLatch = completionLatch; 1111 } 1112 1113 @Override run()1114 public void run() { 1115 if (!mJobFileDirectory.isDirectory()) { 1116 Slog.wtf(TAG, "jobs directory isn't a directory O.O"); 1117 mJobFileDirectory.mkdirs(); 1118 return; 1119 } 1120 1121 int numJobs = 0; 1122 int numSystemJobs = 0; 1123 int numSyncJobs = 0; 1124 List<JobStatus> jobs; 1125 final File[] files; 1126 try { 1127 files = mJobFileDirectory.listFiles(); 1128 } catch (SecurityException e) { 1129 Slog.wtf(TAG, "Not allowed to read job file directory", e); 1130 return; 1131 } 1132 if (files == null) { 1133 Slog.wtfStack(TAG, "Couldn't get job file list"); 1134 return; 1135 } 1136 boolean needFileMigration = false; 1137 long nowElapsed = sElapsedRealtimeClock.millis(); 1138 int numDuplicates = 0; 1139 synchronized (mLock) { 1140 for (File file : files) { 1141 if (!file.getName().equals("jobs.xml") 1142 && !SPLIT_FILE_PATTERN.matcher(file.getName()).matches()) { 1143 // Skip temporary or other files. 1144 continue; 1145 } 1146 final AtomicFile aFile = createJobFile(file); 1147 try (FileInputStream fis = aFile.openRead()) { 1148 jobs = readJobMapImpl(fis, rtcGood, nowElapsed); 1149 if (jobs != null) { 1150 for (int i = 0; i < jobs.size(); i++) { 1151 JobStatus js = jobs.get(i); 1152 final JobStatus existingJob = this.jobSet.get( 1153 js.getUid(), js.getNamespace(), js.getJobId()); 1154 if (existingJob != null) { 1155 numDuplicates++; 1156 // Jobs are meant to have unique uid-namespace-jobId 1157 // combinations, but we've somehow read multiple jobs with the 1158 // combination. Drop the latter one since keeping both will 1159 // result in other issues. 1160 continue; 1161 } 1162 js.prepareLocked(); 1163 js.enqueueTime = nowElapsed; 1164 this.jobSet.add(js); 1165 1166 numJobs++; 1167 if (js.getUid() == Process.SYSTEM_UID) { 1168 numSystemJobs++; 1169 if (isSyncJob(js)) { 1170 numSyncJobs++; 1171 } 1172 } 1173 } 1174 } 1175 } catch (FileNotFoundException e) { 1176 // mJobFileDirectory.listFiles() gave us this file...why can't we find it??? 1177 Slog.e(TAG, "Could not find jobs file: " + file.getName()); 1178 } catch (XmlPullParserException | IOException e) { 1179 Slog.wtf(TAG, "Error in " + file.getName(), e); 1180 } catch (Exception e) { 1181 // Crashing at this point would result in a boot loop, so live with a 1182 // generic Exception for system stability's sake. 1183 Slog.wtf(TAG, "Unexpected exception", e); 1184 } 1185 if (mUseSplitFiles) { 1186 if (!file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1187 // We're supposed to be using the split file architecture, 1188 // but we still have 1189 // the old job file around. Fully migrate and remove the old file. 1190 needFileMigration = true; 1191 } 1192 } else if (file.getName().startsWith(JOB_FILE_SPLIT_PREFIX)) { 1193 // We're supposed to be using the legacy single file architecture, 1194 // but we still have some job split files around. Fully migrate 1195 // and remove the split files. 1196 needFileMigration = true; 1197 } 1198 } 1199 if (mPersistInfo.countAllJobsLoaded < 0) { // Only set them once. 1200 mPersistInfo.countAllJobsLoaded = numJobs; 1201 mPersistInfo.countSystemServerJobsLoaded = numSystemJobs; 1202 mPersistInfo.countSystemSyncManagerJobsLoaded = numSyncJobs; 1203 } 1204 } 1205 Slog.i(TAG, "Read " + numJobs + " jobs"); 1206 if (needFileMigration) { 1207 migrateJobFilesAsync(); 1208 } 1209 1210 if (numDuplicates > 0) { 1211 Slog.wtf(TAG, "Encountered " + numDuplicates + " duplicate persisted jobs"); 1212 } 1213 1214 // Log the count immediately after loading from boot. 1215 mCurrentJobSetSize = numJobs; 1216 mScheduledJob30MinHighWaterMark = mCurrentJobSetSize; 1217 mScheduledJobHighWaterMarkLoggingRunnable.run(); 1218 1219 if (mCompletionLatch != null) { 1220 mCompletionLatch.countDown(); 1221 } 1222 } 1223 1224 /** Returns the {@link String#intern() interned} String if it's not null. */ 1225 @Nullable intern(@ullable String val)1226 private static String intern(@Nullable String val) { 1227 return val == null ? null : val.intern(); 1228 } 1229 readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed)1230 private List<JobStatus> readJobMapImpl(InputStream fis, boolean rtcIsGood, long nowElapsed) 1231 throws XmlPullParserException, IOException { 1232 TypedXmlPullParser parser = Xml.resolvePullParser(fis); 1233 1234 int eventType = parser.getEventType(); 1235 while (eventType != XmlPullParser.START_TAG && 1236 eventType != XmlPullParser.END_DOCUMENT) { 1237 eventType = parser.next(); 1238 Slog.d(TAG, "Start tag: " + parser.getName()); 1239 } 1240 if (eventType == XmlPullParser.END_DOCUMENT) { 1241 if (DEBUG) { 1242 Slog.d(TAG, "No persisted jobs."); 1243 } 1244 return null; 1245 } 1246 1247 String tagName = parser.getName(); 1248 if (XML_TAG_JOB_INFO.equals(tagName)) { 1249 final List<JobStatus> jobs = new ArrayList<JobStatus>(); 1250 final int version = parser.getAttributeInt(null, "version"); 1251 // Read in version info. 1252 if (version > JOBS_FILE_VERSION || version < 0) { 1253 Slog.d(TAG, "Invalid version number, aborting jobs file read."); 1254 return null; 1255 } 1256 1257 eventType = parser.next(); 1258 do { 1259 // Read each <job/> 1260 if (eventType == XmlPullParser.START_TAG) { 1261 tagName = parser.getName(); 1262 // Start reading job. 1263 if (XML_TAG_JOB.equals(tagName)) { 1264 JobStatus persistedJob = 1265 restoreJobFromXml(rtcIsGood, parser, version, nowElapsed); 1266 if (persistedJob != null) { 1267 if (DEBUG) { 1268 Slog.d(TAG, "Read out " + persistedJob); 1269 } 1270 jobs.add(persistedJob); 1271 } else { 1272 Slog.d(TAG, "Error reading job from file."); 1273 } 1274 } 1275 } 1276 eventType = parser.next(); 1277 } while (eventType != XmlPullParser.END_DOCUMENT); 1278 return jobs; 1279 } 1280 return null; 1281 } 1282 1283 /** 1284 * @param parser Xml parser at the beginning of a "<job/>" tag. The next "parser.next()" call 1285 * will take the parser into the body of the job tag. 1286 * @return Newly instantiated job holding all the information we just read out of the xml tag. 1287 */ restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, int schemaVersion, long nowElapsed)1288 private JobStatus restoreJobFromXml(boolean rtcIsGood, TypedXmlPullParser parser, 1289 int schemaVersion, long nowElapsed) throws XmlPullParserException, IOException { 1290 JobInfo.Builder jobBuilder; 1291 int uid, sourceUserId; 1292 long lastSuccessfulRunTime; 1293 long lastFailedRunTime; 1294 long cumulativeExecutionTime; 1295 int internalFlags = 0; 1296 1297 // Read out job identifier attributes and bias. 1298 try { 1299 jobBuilder = buildBuilderFromXml(parser); 1300 jobBuilder.setPersisted(true); 1301 uid = Integer.parseInt(parser.getAttributeValue(null, "uid")); 1302 1303 String val; 1304 if (schemaVersion == 0) { 1305 val = parser.getAttributeValue(null, "priority"); 1306 if (val != null) { 1307 jobBuilder.setBias(Integer.parseInt(val)); 1308 } 1309 } else if (schemaVersion >= 1) { 1310 val = parser.getAttributeValue(null, "bias"); 1311 if (val != null) { 1312 jobBuilder.setBias(Integer.parseInt(val)); 1313 } 1314 val = parser.getAttributeValue(null, "priority"); 1315 if (val != null) { 1316 jobBuilder.setPriority(Integer.parseInt(val)); 1317 } 1318 } 1319 val = parser.getAttributeValue(null, "flags"); 1320 if (val != null) { 1321 jobBuilder.setFlags(Integer.parseInt(val)); 1322 } 1323 val = parser.getAttributeValue(null, "internalFlags"); 1324 if (val != null) { 1325 internalFlags = Integer.parseInt(val); 1326 } 1327 val = parser.getAttributeValue(null, "sourceUserId"); 1328 sourceUserId = val == null ? -1 : Integer.parseInt(val); 1329 1330 val = parser.getAttributeValue(null, "lastSuccessfulRunTime"); 1331 lastSuccessfulRunTime = val == null ? 0 : Long.parseLong(val); 1332 1333 val = parser.getAttributeValue(null, "lastFailedRunTime"); 1334 lastFailedRunTime = val == null ? 0 : Long.parseLong(val); 1335 1336 cumulativeExecutionTime = 1337 parser.getAttributeLong(null, "cumulativeExecutionTime", 0); 1338 } catch (NumberFormatException e) { 1339 Slog.e(TAG, "Error parsing job's required fields, skipping"); 1340 return null; 1341 } 1342 1343 String sourcePackageName = parser.getAttributeValue(null, "sourcePackageName"); 1344 final String namespace = intern(parser.getAttributeValue(null, "namespace")); 1345 final String sourceTag = intern(parser.getAttributeValue(null, "sourceTag")); 1346 1347 int eventType; 1348 // Read out constraints tag. 1349 do { 1350 eventType = parser.next(); 1351 } while (eventType == XmlPullParser.TEXT); // Push through to next START_TAG. 1352 1353 if (!(eventType == XmlPullParser.START_TAG && 1354 XML_TAG_PARAMS_CONSTRAINTS.equals(parser.getName()))) { 1355 // Expecting a <constraints> start tag. 1356 return null; 1357 } 1358 try { 1359 buildConstraintsFromXml(jobBuilder, parser); 1360 } catch (NumberFormatException e) { 1361 Slog.d(TAG, "Error reading constraints, skipping."); 1362 return null; 1363 } catch (XmlPullParserException e) { 1364 Slog.d(TAG, "Error Parser Exception.", e); 1365 return null; 1366 } catch (IOException e) { 1367 Slog.d(TAG, "Error I/O Exception.", e); 1368 return null; 1369 } catch (IllegalArgumentException e) { 1370 Slog.e(TAG, "Constraints contained invalid data", e); 1371 return null; 1372 } 1373 1374 parser.next(); // Consume </constraints> 1375 1376 // Read out execution parameters tag. 1377 do { 1378 eventType = parser.next(); 1379 } while (eventType == XmlPullParser.TEXT); 1380 if (eventType != XmlPullParser.START_TAG) { 1381 return null; 1382 } 1383 1384 // Tuple of (earliest runtime, latest runtime) in UTC. 1385 final Pair<Long, Long> rtcRuntimes = buildRtcExecutionTimesFromXml(parser); 1386 1387 Pair<Long, Long> elapsedRuntimes = convertRtcBoundsToElapsed(rtcRuntimes, nowElapsed); 1388 1389 if (XML_TAG_PERIODIC.equals(parser.getName())) { 1390 try { 1391 String val = parser.getAttributeValue(null, "period"); 1392 final long periodMillis = Long.parseLong(val); 1393 val = parser.getAttributeValue(null, "flex"); 1394 final long flexMillis = (val != null) ? Long.valueOf(val) : periodMillis; 1395 jobBuilder.setPeriodic(periodMillis, flexMillis); 1396 // As a sanity check, cap the recreated run time to be no later than flex+period 1397 // from now. This is the latest the periodic could be pushed out. This could 1398 // happen if the periodic ran early (at flex time before period), and then the 1399 // device rebooted. 1400 if (elapsedRuntimes.second > nowElapsed + periodMillis + flexMillis) { 1401 final long clampedLateRuntimeElapsed = nowElapsed + flexMillis 1402 + periodMillis; 1403 final long clampedEarlyRuntimeElapsed = clampedLateRuntimeElapsed 1404 - flexMillis; 1405 Slog.w(TAG, 1406 String.format("Periodic job for uid='%d' persisted run-time is" + 1407 " too big [%s, %s]. Clamping to [%s,%s]", 1408 uid, 1409 DateUtils.formatElapsedTime(elapsedRuntimes.first / 1000), 1410 DateUtils.formatElapsedTime(elapsedRuntimes.second / 1000), 1411 DateUtils.formatElapsedTime( 1412 clampedEarlyRuntimeElapsed / 1000), 1413 DateUtils.formatElapsedTime( 1414 clampedLateRuntimeElapsed / 1000)) 1415 ); 1416 elapsedRuntimes = 1417 Pair.create(clampedEarlyRuntimeElapsed, clampedLateRuntimeElapsed); 1418 } 1419 } catch (NumberFormatException e) { 1420 Slog.d(TAG, "Error reading periodic execution criteria, skipping."); 1421 return null; 1422 } 1423 } else if (XML_TAG_ONEOFF.equals(parser.getName())) { 1424 try { 1425 if (elapsedRuntimes.first != JobStatus.NO_EARLIEST_RUNTIME) { 1426 jobBuilder.setMinimumLatency(elapsedRuntimes.first - nowElapsed); 1427 } 1428 if (elapsedRuntimes.second != JobStatus.NO_LATEST_RUNTIME) { 1429 jobBuilder.setOverrideDeadline( 1430 elapsedRuntimes.second - nowElapsed); 1431 } 1432 } catch (NumberFormatException e) { 1433 Slog.d(TAG, "Error reading job execution criteria, skipping."); 1434 return null; 1435 } 1436 } else { 1437 if (DEBUG) { 1438 Slog.d(TAG, "Invalid parameter tag, skipping - " + parser.getName()); 1439 } 1440 // Expecting a parameters start tag. 1441 return null; 1442 } 1443 maybeBuildBackoffPolicyFromXml(jobBuilder, parser); 1444 1445 parser.nextTag(); // Consume parameters end tag. 1446 1447 // Read out extras Bundle. 1448 do { 1449 eventType = parser.next(); 1450 } while (eventType == XmlPullParser.TEXT); 1451 if (!(eventType == XmlPullParser.START_TAG 1452 && XML_TAG_EXTRAS.equals(parser.getName()))) { 1453 if (DEBUG) { 1454 Slog.d(TAG, "Error reading extras, skipping."); 1455 } 1456 return null; 1457 } 1458 1459 final PersistableBundle extras; 1460 try { 1461 extras = PersistableBundle.restoreFromXml(parser); 1462 jobBuilder.setExtras(extras); 1463 } catch (IllegalArgumentException e) { 1464 Slog.e(TAG, "Persisted extras contained invalid data", e); 1465 return null; 1466 } 1467 eventType = parser.nextTag(); // Consume </extras> 1468 1469 List<JobWorkItem> jobWorkItems = null; 1470 if (eventType == XmlPullParser.START_TAG 1471 && XML_TAG_JOB_WORK_ITEM.equals(parser.getName())) { 1472 jobWorkItems = readJobWorkItemsFromXml(parser); 1473 } 1474 1475 if (eventType == XmlPullParser.START_TAG 1476 && XML_TAG_DEBUG_INFO.equals(parser.getName())) { 1477 try { 1478 jobBuilder.setTraceTag(parser.getAttributeValue(null, "trace-tag")); 1479 } catch (Exception e) { 1480 Slog.wtf(TAG, "Invalid trace tag persisted to disk", e); 1481 } 1482 parser.next(); 1483 jobBuilder.addDebugTags(readDebugTagsFromXml(parser)); 1484 eventType = parser.nextTag(); // Consume </debug-info> 1485 } 1486 1487 final JobInfo builtJob; 1488 try { 1489 // Don't perform prefetch-deadline check here. Apps targeting S- shouldn't have 1490 // any prefetch-with-deadline jobs accidentally dropped. It's not worth doing 1491 // target SDK version checks here for apps targeting T+. There's no way for an 1492 // app to keep a perpetually scheduled prefetch job with a deadline. Prefetch jobs 1493 // with a deadline would run and then any newly scheduled prefetch jobs wouldn't 1494 // have a deadline. If a job is rescheduled (via jobFinished(true) or onStopJob()'s 1495 // return value), the deadline is dropped. Periodic jobs require all constraints 1496 // to be met, so there's no issue with their deadlines. 1497 // The same logic applies for other target SDK-based validation checks. 1498 builtJob = jobBuilder.build(false, false, false, false); 1499 } catch (Exception e) { 1500 Slog.w(TAG, "Unable to build job from XML, ignoring: " + jobBuilder.summarize(), e); 1501 return null; 1502 } 1503 1504 // Migrate sync jobs forward from earlier, incomplete representation 1505 if ("android".equals(sourcePackageName) 1506 && extras != null 1507 && extras.getBoolean("SyncManagerJob", false)) { 1508 sourcePackageName = extras.getString("owningPackage", sourcePackageName); 1509 if (DEBUG) { 1510 Slog.i(TAG, "Fixing up sync job source package name from 'android' to '" 1511 + sourcePackageName + "'"); 1512 } 1513 } 1514 1515 // And now we're done 1516 final int appBucket = JobSchedulerService.standbyBucketForPackage(sourcePackageName, 1517 sourceUserId, nowElapsed); 1518 JobStatus js = new JobStatus( 1519 builtJob, uid, intern(sourcePackageName), sourceUserId, 1520 appBucket, namespace, sourceTag, 1521 elapsedRuntimes.first, elapsedRuntimes.second, 1522 lastSuccessfulRunTime, lastFailedRunTime, cumulativeExecutionTime, 1523 (rtcIsGood) ? null : rtcRuntimes, internalFlags, /* dynamicConstraints */ 0); 1524 if (jobWorkItems != null) { 1525 for (int i = 0; i < jobWorkItems.size(); ++i) { 1526 js.enqueueWorkLocked(jobWorkItems.get(i)); 1527 } 1528 } 1529 return js; 1530 } 1531 buildBuilderFromXml(TypedXmlPullParser parser)1532 private JobInfo.Builder buildBuilderFromXml(TypedXmlPullParser parser) 1533 throws XmlPullParserException { 1534 // Pull out required fields from <job> attributes. 1535 int jobId = parser.getAttributeInt(null, "jobid"); 1536 String packageName = intern(parser.getAttributeValue(null, "package")); 1537 String className = intern(parser.getAttributeValue(null, "class")); 1538 ComponentName cname = new ComponentName(packageName, className); 1539 1540 return new JobInfo.Builder(jobId, cname); 1541 } 1542 1543 /** 1544 * In S, there has been a change in format to make the code more robust and more 1545 * maintainable. 1546 * If the capabities are bits 4, 14, 15, the format in R, it is a long string as 1547 * netCapabilitiesLong = '49168' from the old XML file attribute "net-capabilities". 1548 * The format in S is the int array string as netCapabilitiesIntArray = '4,14,15' 1549 * from the new XML file attribute "net-capabilities-array". 1550 * For backward compatibility, when reading old XML the old format is still supported in 1551 * reading, but in order to avoid issues with OEM-defined flags, the accepted capabilities 1552 * are limited to that(maxNetCapabilityInR & maxTransportInR) defined in R. 1553 */ buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser)1554 private void buildConstraintsFromXml(JobInfo.Builder jobBuilder, TypedXmlPullParser parser) 1555 throws XmlPullParserException, IOException { 1556 String val; 1557 String netCapabilitiesLong = null; 1558 String netForbiddenCapabilitiesLong = null; 1559 String netTransportTypesLong = null; 1560 1561 final String netCapabilitiesIntArray = parser.getAttributeValue( 1562 null, "net-capabilities-csv"); 1563 final String netForbiddenCapabilitiesIntArray = parser.getAttributeValue( 1564 null, "net-forbidden-capabilities-csv"); 1565 final String netTransportTypesIntArray = parser.getAttributeValue( 1566 null, "net-transport-types-csv"); 1567 if (netCapabilitiesIntArray == null || netTransportTypesIntArray == null) { 1568 netCapabilitiesLong = parser.getAttributeValue(null, "net-capabilities"); 1569 netForbiddenCapabilitiesLong = parser.getAttributeValue( 1570 null, "net-unwanted-capabilities"); 1571 netTransportTypesLong = parser.getAttributeValue(null, "net-transport-types"); 1572 } 1573 1574 if ((netCapabilitiesIntArray != null) && (netTransportTypesIntArray != null)) { 1575 // S+ format. No capability or transport validation since the values should be in 1576 // line with what's defined in the Connectivity mainline module. 1577 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1578 .clearCapabilities(); 1579 1580 for (int capability : stringToIntArray(netCapabilitiesIntArray)) { 1581 builder.addCapability(capability); 1582 } 1583 1584 for (int forbiddenCapability : stringToIntArray(netForbiddenCapabilitiesIntArray)) { 1585 builder.addForbiddenCapability(forbiddenCapability); 1586 } 1587 1588 for (int transport : stringToIntArray(netTransportTypesIntArray)) { 1589 builder.addTransportType(transport); 1590 } 1591 jobBuilder 1592 .setRequiredNetwork(builder.build()) 1593 .setEstimatedNetworkBytes( 1594 parser.getAttributeLong(null, 1595 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1596 parser.getAttributeLong(null, 1597 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1598 .setMinimumNetworkChunkBytes( 1599 parser.getAttributeLong(null, 1600 "minimum-network-chunk-bytes", 1601 JobInfo.NETWORK_BYTES_UNKNOWN)); 1602 } else if (netCapabilitiesLong != null && netTransportTypesLong != null) { 1603 // Format used on R- builds. Drop any unexpected capabilities and transports. 1604 final NetworkRequest.Builder builder = new NetworkRequest.Builder() 1605 .clearCapabilities(); 1606 final int maxNetCapabilityInR = NET_CAPABILITY_TEMPORARILY_NOT_METERED; 1607 // We're okay throwing NFE here; caught by caller 1608 for (int capability : BitUtils.unpackBits(Long.parseLong( 1609 netCapabilitiesLong))) { 1610 if (capability <= maxNetCapabilityInR) { 1611 builder.addCapability(capability); 1612 } 1613 } 1614 for (int forbiddenCapability : BitUtils.unpackBits(Long.parseLong( 1615 netForbiddenCapabilitiesLong))) { 1616 if (forbiddenCapability <= maxNetCapabilityInR) { 1617 builder.addForbiddenCapability(forbiddenCapability); 1618 } 1619 } 1620 1621 final int maxTransportInR = TRANSPORT_TEST; 1622 for (int transport : BitUtils.unpackBits(Long.parseLong( 1623 netTransportTypesLong))) { 1624 if (transport <= maxTransportInR) { 1625 builder.addTransportType(transport); 1626 } 1627 } 1628 jobBuilder.setRequiredNetwork(builder.build()); 1629 // Estimated bytes weren't persisted on R- builds, so no point querying for the 1630 // attributes here. 1631 } else { 1632 // Read legacy values 1633 val = parser.getAttributeValue(null, "connectivity"); 1634 if (val != null) { 1635 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_ANY); 1636 } 1637 val = parser.getAttributeValue(null, "metered"); 1638 if (val != null) { 1639 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_METERED); 1640 } 1641 val = parser.getAttributeValue(null, "unmetered"); 1642 if (val != null) { 1643 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_UNMETERED); 1644 } 1645 val = parser.getAttributeValue(null, "not-roaming"); 1646 if (val != null) { 1647 jobBuilder.setRequiredNetworkType(JobInfo.NETWORK_TYPE_NOT_ROAMING); 1648 } 1649 } 1650 1651 val = parser.getAttributeValue(null, "idle"); 1652 if (val != null) { 1653 jobBuilder.setRequiresDeviceIdle(true); 1654 } 1655 val = parser.getAttributeValue(null, "charging"); 1656 if (val != null) { 1657 jobBuilder.setRequiresCharging(true); 1658 } 1659 val = parser.getAttributeValue(null, "battery-not-low"); 1660 if (val != null) { 1661 jobBuilder.setRequiresBatteryNotLow(true); 1662 } 1663 val = parser.getAttributeValue(null, "storage-not-low"); 1664 if (val != null) { 1665 jobBuilder.setRequiresStorageNotLow(true); 1666 } 1667 } 1668 1669 /** 1670 * Builds the back-off policy out of the params tag. These attributes may not exist, depending 1671 * on whether the back-off was set when the job was first scheduled. 1672 */ maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser)1673 private void maybeBuildBackoffPolicyFromXml(JobInfo.Builder jobBuilder, XmlPullParser parser) { 1674 String val = parser.getAttributeValue(null, "initial-backoff"); 1675 if (val != null) { 1676 long initialBackoff = Long.parseLong(val); 1677 val = parser.getAttributeValue(null, "backoff-policy"); 1678 int backoffPolicy = Integer.parseInt(val); // Will throw NFE which we catch higher up. 1679 jobBuilder.setBackoffCriteria(initialBackoff, backoffPolicy); 1680 } 1681 } 1682 1683 /** 1684 * Extract a job's earliest/latest run time data from XML. These are returned in 1685 * unadjusted UTC wall clock time, because we do not yet know whether the system 1686 * clock is reliable for purposes of calculating deltas from 'now'. 1687 * 1688 * @param parser 1689 * @return A Pair of timestamps in UTC wall-clock time. The first is the earliest 1690 * time at which the job is to become runnable, and the second is the deadline at 1691 * which it becomes overdue to execute. 1692 */ buildRtcExecutionTimesFromXml(TypedXmlPullParser parser)1693 private Pair<Long, Long> buildRtcExecutionTimesFromXml(TypedXmlPullParser parser) { 1694 // Pull out execution time data. 1695 final long earliestRunTimeRtc = 1696 parser.getAttributeLong(null, "delay", JobStatus.NO_EARLIEST_RUNTIME); 1697 final long latestRunTimeRtc = 1698 parser.getAttributeLong(null, "deadline", JobStatus.NO_LATEST_RUNTIME); 1699 return Pair.create(earliestRunTimeRtc, latestRunTimeRtc); 1700 } 1701 1702 @NonNull readJobWorkItemsFromXml(TypedXmlPullParser parser)1703 private List<JobWorkItem> readJobWorkItemsFromXml(TypedXmlPullParser parser) 1704 throws IOException, XmlPullParserException { 1705 List<JobWorkItem> jobWorkItems = new ArrayList<>(); 1706 1707 for (int eventType = parser.getEventType(); eventType != XmlPullParser.END_DOCUMENT; 1708 eventType = parser.next()) { 1709 final String tagName = parser.getName(); 1710 if (!XML_TAG_JOB_WORK_ITEM.equals(tagName)) { 1711 // We're no longer operating with work items. 1712 break; 1713 } 1714 try { 1715 JobWorkItem jwi = readJobWorkItemFromXml(parser); 1716 if (jwi != null) { 1717 jobWorkItems.add(jwi); 1718 } 1719 } catch (Exception e) { 1720 // If there's an issue with one JobWorkItem, drop only the one item and not the 1721 // whole job. 1722 Slog.e(TAG, "Problem with persisted JobWorkItem", e); 1723 } 1724 } 1725 1726 return jobWorkItems; 1727 } 1728 1729 @Nullable readJobWorkItemFromXml(TypedXmlPullParser parser)1730 private JobWorkItem readJobWorkItemFromXml(TypedXmlPullParser parser) 1731 throws IOException, XmlPullParserException { 1732 JobWorkItem.Builder jwiBuilder = new JobWorkItem.Builder(); 1733 1734 jwiBuilder 1735 .setDeliveryCount(parser.getAttributeInt(null, "delivery-count")) 1736 .setEstimatedNetworkBytes( 1737 parser.getAttributeLong(null, 1738 "estimated-download-bytes", JobInfo.NETWORK_BYTES_UNKNOWN), 1739 parser.getAttributeLong(null, 1740 "estimated-upload-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)) 1741 .setMinimumNetworkChunkBytes(parser.getAttributeLong(null, 1742 "minimum-network-chunk-bytes", JobInfo.NETWORK_BYTES_UNKNOWN)); 1743 parser.next(); 1744 try { 1745 final PersistableBundle extras = PersistableBundle.restoreFromXml(parser); 1746 jwiBuilder.setExtras(extras); 1747 } catch (IllegalArgumentException e) { 1748 Slog.e(TAG, "Persisted extras contained invalid data", e); 1749 return null; 1750 } 1751 1752 try { 1753 return jwiBuilder.build(); 1754 } catch (Exception e) { 1755 Slog.e(TAG, "Invalid JobWorkItem", e); 1756 return null; 1757 } 1758 } 1759 1760 @NonNull readDebugTagsFromXml(TypedXmlPullParser parser)1761 private Set<String> readDebugTagsFromXml(TypedXmlPullParser parser) 1762 throws IOException, XmlPullParserException { 1763 Set<String> debugTags = new ArraySet<>(); 1764 1765 for (int eventType = parser.getEventType(); eventType != XmlPullParser.END_DOCUMENT; 1766 eventType = parser.next()) { 1767 final String tagName = parser.getName(); 1768 if (!XML_TAG_DEBUG_TAG.equals(tagName)) { 1769 // We're no longer operating with debug tags. 1770 break; 1771 } 1772 if (debugTags.size() < JobInfo.MAX_NUM_DEBUG_TAGS) { 1773 final String debugTag; 1774 try { 1775 debugTag = JobInfo.validateDebugTag(parser.getAttributeValue(null, "tag")); 1776 } catch (Exception e) { 1777 Slog.wtf(TAG, "Invalid debug tag persisted to disk", e); 1778 continue; 1779 } 1780 debugTags.add(debugTag); 1781 } 1782 } 1783 1784 return debugTags; 1785 } 1786 } 1787 1788 /** Set of all tracked jobs. */ 1789 @VisibleForTesting 1790 public static final class JobSet { 1791 @VisibleForTesting // Key is the getUid() originator of the jobs in each sheaf 1792 final SparseArray<ArraySet<JobStatus>> mJobs; 1793 1794 @VisibleForTesting // Same data but with the key as getSourceUid() of the jobs in each sheaf 1795 final SparseArray<ArraySet<JobStatus>> mJobsPerSourceUid; 1796 JobSet()1797 public JobSet() { 1798 mJobs = new SparseArray<ArraySet<JobStatus>>(); 1799 mJobsPerSourceUid = new SparseArray<>(); 1800 } 1801 getJobsByUid(int uid)1802 public ArraySet<JobStatus> getJobsByUid(int uid) { 1803 ArraySet<JobStatus> matchingJobs = new ArraySet<>(); 1804 getJobsByUid(uid, matchingJobs); 1805 return matchingJobs; 1806 } 1807 getJobsByUid(int uid, Set<JobStatus> insertInto)1808 public void getJobsByUid(int uid, Set<JobStatus> insertInto) { 1809 ArraySet<JobStatus> jobs = mJobs.get(uid); 1810 if (jobs != null) { 1811 insertInto.addAll(jobs); 1812 } 1813 } 1814 1815 @NonNull getJobsBySourceUid(int sourceUid)1816 public ArraySet<JobStatus> getJobsBySourceUid(int sourceUid) { 1817 final ArraySet<JobStatus> result = new ArraySet<>(); 1818 getJobsBySourceUid(sourceUid, result); 1819 return result; 1820 } 1821 getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto)1822 public void getJobsBySourceUid(int sourceUid, Set<JobStatus> insertInto) { 1823 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1824 if (jobs != null) { 1825 insertInto.addAll(jobs); 1826 } 1827 } 1828 add(JobStatus job)1829 public boolean add(JobStatus job) { 1830 final int uid = job.getUid(); 1831 final int sourceUid = job.getSourceUid(); 1832 ArraySet<JobStatus> jobs = mJobs.get(uid); 1833 if (jobs == null) { 1834 jobs = new ArraySet<JobStatus>(); 1835 mJobs.put(uid, jobs); 1836 } 1837 ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1838 if (jobsForSourceUid == null) { 1839 jobsForSourceUid = new ArraySet<>(); 1840 mJobsPerSourceUid.put(sourceUid, jobsForSourceUid); 1841 } 1842 final boolean added = jobs.add(job); 1843 final boolean addedInSource = jobsForSourceUid.add(job); 1844 if (added != addedInSource) { 1845 Slog.wtf(TAG, "mJobs and mJobsPerSourceUid mismatch; caller= " + added 1846 + " source= " + addedInSource); 1847 } 1848 return added || addedInSource; 1849 } 1850 remove(JobStatus job)1851 public boolean remove(JobStatus job) { 1852 final int uid = job.getUid(); 1853 final ArraySet<JobStatus> jobs = mJobs.get(uid); 1854 final int sourceUid = job.getSourceUid(); 1855 final ArraySet<JobStatus> jobsForSourceUid = mJobsPerSourceUid.get(sourceUid); 1856 final boolean didRemove = jobs != null && jobs.remove(job); 1857 final boolean sourceRemove = jobsForSourceUid != null && jobsForSourceUid.remove(job); 1858 if (didRemove != sourceRemove) { 1859 Slog.wtf(TAG, "Job presence mismatch; caller=" + didRemove 1860 + " source=" + sourceRemove); 1861 } 1862 if (didRemove || sourceRemove) { 1863 // no more jobs for this uid? let the now-empty set objects be GC'd. 1864 if (jobs != null && jobs.size() == 0) { 1865 mJobs.remove(uid); 1866 } 1867 if (jobsForSourceUid != null && jobsForSourceUid.size() == 0) { 1868 mJobsPerSourceUid.remove(sourceUid); 1869 } 1870 return true; 1871 } 1872 return false; 1873 } 1874 1875 /** 1876 * Removes the jobs of all users not specified by the keepUserIds of user ids. 1877 * This will remove jobs scheduled *by* and *for* any unlisted users. 1878 */ removeJobsOfUnlistedUsers(final int[] keepUserIds)1879 public void removeJobsOfUnlistedUsers(final int[] keepUserIds) { 1880 final Predicate<JobStatus> noSourceUser = 1881 job -> !ArrayUtils.contains(keepUserIds, job.getSourceUserId()); 1882 final Predicate<JobStatus> noCallingUser = 1883 job -> !ArrayUtils.contains(keepUserIds, job.getUserId()); 1884 removeAll(noSourceUser.or(noCallingUser)); 1885 } 1886 removeAll(Predicate<JobStatus> predicate)1887 private void removeAll(Predicate<JobStatus> predicate) { 1888 for (int jobSetIndex = mJobs.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1889 final ArraySet<JobStatus> jobs = mJobs.valueAt(jobSetIndex); 1890 jobs.removeIf(predicate); 1891 if (jobs.size() == 0) { 1892 mJobs.removeAt(jobSetIndex); 1893 } 1894 } 1895 for (int jobSetIndex = mJobsPerSourceUid.size() - 1; jobSetIndex >= 0; jobSetIndex--) { 1896 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.valueAt(jobSetIndex); 1897 jobs.removeIf(predicate); 1898 if (jobs.size() == 0) { 1899 mJobsPerSourceUid.removeAt(jobSetIndex); 1900 } 1901 } 1902 } 1903 contains(JobStatus job)1904 public boolean contains(JobStatus job) { 1905 final int uid = job.getUid(); 1906 ArraySet<JobStatus> jobs = mJobs.get(uid); 1907 return jobs != null && jobs.contains(job); 1908 } 1909 get(int uid, @Nullable String namespace, int jobId)1910 public JobStatus get(int uid, @Nullable String namespace, int jobId) { 1911 ArraySet<JobStatus> jobs = mJobs.get(uid); 1912 if (jobs != null) { 1913 for (int i = jobs.size() - 1; i >= 0; i--) { 1914 JobStatus job = jobs.valueAt(i); 1915 if (job.getJobId() == jobId && Objects.equals(namespace, job.getNamespace())) { 1916 return job; 1917 } 1918 } 1919 } 1920 return null; 1921 } 1922 1923 // Inefficient; use only for testing getAllJobs()1924 public List<JobStatus> getAllJobs() { 1925 ArrayList<JobStatus> allJobs = new ArrayList<JobStatus>(size()); 1926 for (int i = mJobs.size() - 1; i >= 0; i--) { 1927 ArraySet<JobStatus> jobs = mJobs.valueAt(i); 1928 if (jobs != null) { 1929 // Use a for loop over the ArraySet, so we don't need to make its 1930 // optional collection class iterator implementation or have to go 1931 // through a temporary array from toArray(). 1932 for (int j = jobs.size() - 1; j >= 0; j--) { 1933 allJobs.add(jobs.valueAt(j)); 1934 } 1935 } 1936 } 1937 return allJobs; 1938 } 1939 clear()1940 public void clear() { 1941 mJobs.clear(); 1942 mJobsPerSourceUid.clear(); 1943 } 1944 size()1945 public int size() { 1946 int total = 0; 1947 for (int i = mJobs.size() - 1; i >= 0; i--) { 1948 total += mJobs.valueAt(i).size(); 1949 } 1950 return total; 1951 } 1952 1953 // We only want to count the jobs that this uid has scheduled on its own 1954 // behalf, not those that the app has scheduled on someone else's behalf. countJobsForUid(int uid)1955 public int countJobsForUid(int uid) { 1956 int total = 0; 1957 ArraySet<JobStatus> jobs = mJobs.get(uid); 1958 if (jobs != null) { 1959 for (int i = jobs.size() - 1; i >= 0; i--) { 1960 JobStatus job = jobs.valueAt(i); 1961 if (job.getUid() == job.getSourceUid()) { 1962 total++; 1963 } 1964 } 1965 } 1966 return total; 1967 } 1968 forEachJob(@ullable Predicate<JobStatus> filterPredicate, @NonNull Consumer<JobStatus> functor)1969 public void forEachJob(@Nullable Predicate<JobStatus> filterPredicate, 1970 @NonNull Consumer<JobStatus> functor) { 1971 for (int uidIndex = mJobs.size() - 1; uidIndex >= 0; uidIndex--) { 1972 ArraySet<JobStatus> jobs = mJobs.valueAt(uidIndex); 1973 if (jobs != null) { 1974 for (int i = jobs.size() - 1; i >= 0; i--) { 1975 final JobStatus jobStatus = jobs.valueAt(i); 1976 if ((filterPredicate == null) || filterPredicate.test(jobStatus)) { 1977 functor.accept(jobStatus); 1978 } 1979 } 1980 } 1981 } 1982 } 1983 forEachJob(int callingUid, Consumer<JobStatus> functor)1984 public void forEachJob(int callingUid, Consumer<JobStatus> functor) { 1985 ArraySet<JobStatus> jobs = mJobs.get(callingUid); 1986 if (jobs != null) { 1987 for (int i = jobs.size() - 1; i >= 0; i--) { 1988 functor.accept(jobs.valueAt(i)); 1989 } 1990 } 1991 } 1992 forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor)1993 public void forEachJobForSourceUid(int sourceUid, Consumer<JobStatus> functor) { 1994 final ArraySet<JobStatus> jobs = mJobsPerSourceUid.get(sourceUid); 1995 if (jobs != null) { 1996 for (int i = jobs.size() - 1; i >= 0; i--) { 1997 functor.accept(jobs.valueAt(i)); 1998 } 1999 } 2000 } 2001 } 2002 } 2003