1 /* 2 * Copyright (C) 2022 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 package com.google.android.car.kitchensink.backup; 18 import android.annotation.Nullable; 19 import android.app.backup.BackupDataInput; 20 import android.app.backup.BackupDataOutput; 21 import android.app.backup.BackupTransport; 22 import android.app.backup.RestoreDescription; 23 import android.app.backup.RestoreSet; 24 import android.content.ComponentName; 25 import android.content.Context; 26 import android.content.Intent; 27 import android.content.pm.PackageInfo; 28 import android.os.IBinder; 29 import android.os.ParcelFileDescriptor; 30 import android.system.ErrnoException; 31 import android.system.Os; 32 import android.system.StructStat; 33 import android.util.ArrayMap; 34 import android.util.Base64; 35 import android.util.Log; 36 37 import libcore.io.IoUtils; 38 39 import java.io.BufferedOutputStream; 40 import java.io.File; 41 import java.io.FileInputStream; 42 import java.io.FileNotFoundException; 43 import java.io.FileOutputStream; 44 import java.io.IOException; 45 import java.util.ArrayList; 46 import java.util.Collections; 47 import java.util.List; 48 49 50 public final class KitchenSinkBackupTransport extends BackupTransport { 51 private static final String TRANSPORT_DIR_NAME = 52 "com.google.android.car.kitchensink.backup.KitchenSinkBackupTransport"; 53 54 private static final String TRANSPORT_DESTINATION_STRING = 55 "Backing up to debug-only private cache"; 56 57 private static final String TRANSPORT_DATA_MANAGEMENT_LABEL = ""; 58 private static final String FULL_DATA_DIR = "_full"; 59 private static final String INCREMENTAL_DIR = "_delta"; 60 private static final String DEFAULT_DEVICE_NAME_FOR_RESTORE_SET = "flash"; 61 // The currently-active restore set always has the same (nonzero) token, which is 1 in this case 62 private static final long CURRENT_SET_TOKEN = 1; 63 private static final String TAG = KitchenSinkBackupTransport.class.getSimpleName(); 64 private static final boolean DEBUG = true; 65 private static final long FULL_BACKUP_SIZE_QUOTA = 25 * 1024 * 1024; 66 private static final long KEY_VALUE_BACKUP_SIZE_QUOTA = 5 * 1024 * 1024; 67 // set of other possible backups currently available over this transport. 68 static final long[] POSSIBLE_SETS = { 2, 3, 4, 5, 6, 7, 8, 9 }; 69 private static final int FULL_RESTORE_BUFFER_BYTE_SIZE = 2 * 1024; 70 private static final int FULL_BACKUP_BUFFER_BYTE_SIZE = 4096; 71 72 private final Context mContext; 73 private File mDataDir; 74 private File mCurrentSetDir; 75 private File mCurrentSetFullDir; 76 private File mCurrentSetIncrementalDir; 77 78 // Kay/Value restore 79 private PackageInfo[] mRestorePackages; 80 private int mRestorePackageIndex; // Index into mRestorePackages 81 private int mRestoreType; 82 private File mRestoreSetDir; 83 private File mRestoreSetIncrementalDir; 84 private File mRestoreSetFullDir; 85 86 private byte[] mFullBackupBuffer; 87 private long mFullBackupSize; 88 private ParcelFileDescriptor mSocket; 89 private String mFullTargetPackage; 90 private FileInputStream mSocketInputStream; 91 private BufferedOutputStream mFullBackupOutputStream; 92 93 private byte[] mFullRestoreBuffer; 94 private FileInputStream mCurFullRestoreStream; 95 makeDataDirs()96 private void makeDataDirs() { 97 if (DEBUG) Log.v(TAG, "Making new data directories."); 98 mDataDir = mContext.getFilesDir(); 99 mCurrentSetDir = new File(mDataDir, Long.toString(CURRENT_SET_TOKEN)); 100 mCurrentSetFullDir = new File(mCurrentSetDir, FULL_DATA_DIR); 101 mCurrentSetIncrementalDir = new File(mCurrentSetDir, INCREMENTAL_DIR); 102 103 mCurrentSetDir.mkdirs(); 104 mCurrentSetFullDir.mkdir(); 105 mCurrentSetIncrementalDir.mkdir(); 106 } KitchenSinkBackupTransport(Context context)107 public KitchenSinkBackupTransport(Context context) { 108 mContext = context; 109 makeDataDirs(); 110 } 111 112 @Override name()113 public String name() { 114 return new ComponentName(mContext, this.getClass()).flattenToShortString(); 115 } 116 117 @Override transportDirName()118 public String transportDirName() { 119 return TRANSPORT_DIR_NAME; 120 } 121 122 @Override currentDestinationString()123 public String currentDestinationString() { 124 return TRANSPORT_DESTINATION_STRING; 125 } 126 127 @Override configurationIntent()128 public Intent configurationIntent() { 129 // The KitchenSink transport is not user-configurable 130 return null; 131 } 132 dataManagementIntent()133 public Intent dataManagementIntent() { 134 // The KitchenSink transport does not present a data-management UI 135 return null; 136 } 137 @Override dataManagementIntentLabel()138 public CharSequence dataManagementIntentLabel() { 139 return TRANSPORT_DATA_MANAGEMENT_LABEL; 140 } 141 142 @Override requestBackupTime()143 public long requestBackupTime() { 144 if (DEBUG) Log.d(TAG, "request backup time"); 145 // any time is a good time for local backup 146 return 0; 147 } 148 149 @Override initializeDevice()150 public int initializeDevice() { 151 if (DEBUG) { 152 Log.d(TAG, "initializing server side storage for this device; wiping all data"); 153 } 154 // Deletes all data from current storage set 155 deleteContents(mCurrentSetDir); 156 makeDataDirs(); 157 return TRANSPORT_OK; 158 } 159 160 // Deletes the contents recursively deleteContents(File dirname)161 private void deleteContents(File dirname) { 162 if (DEBUG) Log.d(TAG, "Deleting data from: " + dirname); 163 File[] contents = dirname.listFiles(); 164 if (contents == null) return; 165 for (File f : contents) { 166 if (f.isDirectory()) { 167 // delete the directory's contents then fall through 168 // and delete the directory itself. 169 deleteContents(f); 170 } 171 // deletes the directory itself after deleting everything in it 172 f.delete(); 173 } 174 175 } 176 177 // Encapsulation of a single k/v element change 178 private static final class KVOperation { 179 // Element filename after base 64 encoding as the key, for efficiency 180 final String mKey; 181 // An allocated byte array where data is placed when read from the stream 182 // null when this is a deletion operation 183 final @Nullable byte[] mValue; 184 KVOperation(String k, @Nullable byte[] v)185 KVOperation(String k, @Nullable byte[] v) { 186 mKey = k; 187 mValue = v; 188 } 189 } 190 191 @Override performBackup(PackageInfo packageInfo, ParcelFileDescriptor data)192 public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data) { 193 return performBackup(packageInfo, data, /* flags= */ 0); 194 } 195 196 @Override performBackup(PackageInfo packageInfo, ParcelFileDescriptor data, int flags)197 public int performBackup(PackageInfo packageInfo, ParcelFileDescriptor data, int flags) { 198 Log.i(TAG, "perform backup is called for: " + packageInfo.packageName); 199 try { 200 return performBackupInternal(packageInfo, data, flags); 201 } finally { 202 // close the output stream regardless of whether an exception is thrown or caught. 203 IoUtils.closeQuietly(data); 204 } 205 } 206 performBackupInternal(PackageInfo packageInfo, ParcelFileDescriptor data, int flags)207 private int performBackupInternal(PackageInfo packageInfo, ParcelFileDescriptor data, 208 int flags) { 209 Log.i(TAG, "perform backup internal is called for: " + packageInfo.packageName); 210 if ((flags & FLAG_DATA_NOT_CHANGED) != 0) { 211 // For unchanged data we do nothing and tell the caller everything was OK 212 Log.i(TAG, "Data is not changed, no backup needed for " + packageInfo.packageName); 213 return TRANSPORT_OK; 214 } 215 boolean isIncremental = (flags & FLAG_INCREMENTAL) != 0; 216 boolean isNonIncremental = (flags & FLAG_NON_INCREMENTAL) != 0; 217 218 if (isIncremental) { 219 Log.i(TAG, "Performing incremental backup for " + packageInfo.packageName); 220 } else if (isNonIncremental) { 221 Log.i(TAG, "Performing non-incremental backup for " + packageInfo.packageName); 222 } else { 223 Log.i(TAG, "Performing backup for " + packageInfo.packageName); 224 } 225 226 if (DEBUG) { 227 try { 228 // get detailed information about the file, access system API 229 StructStat ss = Os.fstat(data.getFileDescriptor()); 230 Log.v(TAG, "performBackup() pkg=" + packageInfo.packageName 231 + " size=" + ss.st_size + " flags=" + flags); 232 } catch (ErrnoException e) { 233 Log.w(TAG, " Unable to stat input file in performBackup() " + e); 234 } 235 } 236 237 File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName); 238 boolean hasDataForPackage = !packageDir.mkdirs(); 239 240 if (isNonIncremental && hasDataForPackage) { 241 Log.w(TAG, "Requested non-incremental, deleting existing data."); 242 clearBackupData(packageInfo); 243 packageDir.mkdirs(); 244 } 245 246 // go through the entire input data stream to make a list of all the updates to apply later 247 ArrayList<KVOperation> changeOps; 248 try { 249 changeOps = parseBackupStream(data); 250 } catch (IOException e) { 251 // if something goes wrong, abort the operation and return error. 252 Log.v(TAG, "Exception reading backup input", e); 253 return TRANSPORT_ERROR; 254 } 255 256 // calculate the sum of the current in-datastore size per key to detect quota overrun 257 ArrayMap<String, Integer> datastore = new ArrayMap<>(); 258 int totalSize = parseKeySizes(packageDir, datastore); 259 Log.i(TAG, "Total size of the current data:" + totalSize); 260 // find out the datastore size that will result from applying the 261 // sequence of delta operations 262 if (DEBUG) { 263 int numOps = changeOps.size(); 264 if (numOps > 0) { 265 Log.v(TAG, "Calculating delta size impact for " + numOps + "updates."); 266 } else { 267 Log.v(TAG, "No operations in backup stream, so no size change"); 268 } 269 } 270 271 int updatedSize = totalSize; 272 for (KVOperation op : changeOps) { 273 // Deduct the size of the key we're about to replace, if any 274 final Integer curSize = datastore.get(op.mKey); 275 if (curSize != null) { 276 updatedSize -= curSize.intValue(); 277 if (DEBUG && op.mValue == null) { 278 Log.d(TAG, "delete " + op.mKey + ", updated total " + updatedSize); 279 } 280 } 281 282 // And add back the size of the value we're about to store, if any 283 if (op.mValue != null) { 284 updatedSize += op.mValue.length; 285 if (DEBUG) { 286 Log.d(TAG, ((curSize == null) ? " new " : " replace ") 287 + op.mKey + ", updated total " + updatedSize); 288 } 289 } 290 } 291 292 // If our final size is over quota, report the failure 293 if (updatedSize > KEY_VALUE_BACKUP_SIZE_QUOTA) { 294 Log.w(TAG, "New datastore size " + updatedSize 295 + " exceeds quota " + KEY_VALUE_BACKUP_SIZE_QUOTA); 296 return TRANSPORT_QUOTA_EXCEEDED; 297 } 298 // No problem with storage size, so go ahead and apply the delta operations 299 // (in the order that the app provided them) 300 for (KVOperation op : changeOps) { 301 File element = new File(packageDir, op.mKey); 302 303 // this is either a deletion or a rewrite-from-zero, so we can just remove 304 // the existing file and proceed in either case. 305 Log.v(TAG, "Deleting the existing file: " + element.getPath()); 306 element.delete(); 307 308 // if this wasn't a deletion, put the new data in place 309 if (op.mValue != null) { 310 try (FileOutputStream out = new FileOutputStream(element)) { 311 out.write(op.mValue, 0, op.mValue.length); 312 } catch (IOException e) { 313 Log.e(TAG, "Unable to update key file " + element, e); 314 return TRANSPORT_ERROR; 315 } 316 } 317 } 318 Log.i(TAG, "KVBackup is successful."); 319 return TRANSPORT_OK; 320 } 321 322 // Parses the input with Base64-encode and returns the value with a newly allocated byte[] parseBackupStream(ParcelFileDescriptor data)323 private ArrayList<KVOperation> parseBackupStream(ParcelFileDescriptor data) 324 throws IOException { 325 ArrayList<KVOperation> changeOps = new ArrayList<>(); 326 BackupDataInput changeSet = new BackupDataInput(data.getFileDescriptor()); 327 while (changeSet.readNextHeader()) { 328 String key = changeSet.getKey(); 329 String base64Key = new String(Base64.encode(key.getBytes(), Base64.NO_WRAP)); 330 int dataSize = changeSet.getDataSize(); 331 if (DEBUG) { 332 Log.d(TAG, "Delta operation key: " + key + "; size: " + dataSize 333 + "; key64: " + base64Key); 334 } 335 336 byte[] buf = null; 337 if (dataSize >= 0) { 338 buf = new byte[dataSize]; 339 changeSet.readEntityData(buf, 0, dataSize); 340 } 341 changeOps.add(new KVOperation(base64Key, buf)); 342 } 343 return changeOps; 344 } 345 346 // Reads the given datastore directory, building a table of the value size of each 347 // keyed element, and returning the summed total. parseKeySizes(File packageDir, ArrayMap<String, Integer> datastore)348 private int parseKeySizes(File packageDir, ArrayMap<String, Integer> datastore) { 349 int totalSize = 0; 350 final String[] elements = packageDir.list(); 351 if (elements != null) { 352 if (DEBUG) { 353 Log.d(TAG, "Existing datastore contents: " + packageDir); 354 } 355 for (String file : elements) { 356 File element = new File(packageDir, file); 357 String key = file; // filename 358 int size = (int) element.length(); 359 totalSize += size; 360 if (DEBUG) { 361 Log.d(TAG, " key " + key + " size " + size); 362 } 363 datastore.put(key, size); 364 } 365 if (DEBUG) { 366 Log.d(TAG, "TOTAL: " + totalSize); 367 } 368 } else { 369 if (DEBUG) { 370 Log.d(TAG, "No existing data for package: " + packageDir); 371 } 372 } 373 return totalSize; 374 } 375 376 @Override getBinder()377 public IBinder getBinder() { 378 if (DEBUG) Log.d(TAG, "get binder"); 379 return super.getBinder(); 380 } 381 382 @Override getTransportFlags()383 public int getTransportFlags() { 384 if (DEBUG) Log.d(TAG, "get transport flags"); 385 return super.getTransportFlags(); 386 } 387 388 389 @Override clearBackupData(PackageInfo packageInfo)390 public int clearBackupData(PackageInfo packageInfo) { 391 File packageDir = new File(mCurrentSetIncrementalDir, packageInfo.packageName); 392 if (DEBUG) { 393 Log.d(TAG, "clear backup data for package: " + packageInfo.packageName 394 + " package directory: " + packageDir); 395 } 396 final File[] incrementalFiles = packageDir.listFiles(); 397 // deletes files in incremental file set 398 if (incrementalFiles != null) { 399 for (File f : incrementalFiles) { 400 f.delete(); 401 } 402 packageDir.delete(); 403 } 404 // deletes files in current file set 405 packageDir = new File(mCurrentSetFullDir, packageInfo.packageName); 406 final File[] currentFiles = packageDir.listFiles(); 407 if (currentFiles != null) { 408 for (File f : currentFiles) { 409 f.delete(); 410 } 411 packageDir.delete(); 412 } 413 return TRANSPORT_OK; 414 } 415 416 // calls after performBackup(), performFullBackup(), clearBackupData() 417 @Override finishBackup()418 public int finishBackup() { 419 if (DEBUG) Log.d(TAG, "finish backup for:" + mFullTargetPackage); 420 return closeFullBackup(); 421 } 422 closeFullBackup()423 private int closeFullBackup() { 424 if (mSocket == null) { 425 return TRANSPORT_OK; 426 } 427 try { 428 if (mFullBackupOutputStream != null) { 429 // forces any buffered output bytes 430 // to be written out to the underlying output stream. 431 mFullBackupOutputStream.flush(); 432 mFullBackupOutputStream.close(); 433 } 434 mSocketInputStream = null; 435 mFullTargetPackage = null; 436 mSocket.close(); 437 } catch (IOException e) { 438 if (DEBUG) { 439 Log.w(TAG, "Exception caught in closeFullBackup()", e); 440 } 441 return TRANSPORT_ERROR; 442 } finally { 443 mSocket = null; 444 mFullBackupOutputStream = null; 445 } 446 return TRANSPORT_OK; 447 } 448 449 // ------------------------------------------------------------------------------------ 450 // Full backup handling 451 452 @Override requestFullBackupTime()453 public long requestFullBackupTime() { 454 if (DEBUG) Log.d(TAG, "request full backup time"); 455 return 0; 456 } 457 458 @Override checkFullBackupSize(long size)459 public int checkFullBackupSize(long size) { 460 if (DEBUG) Log.d(TAG, "check full backup size"); 461 int result = TRANSPORT_OK; 462 // Decline zero-size "backups" 463 if (size <= 0) { 464 result = TRANSPORT_PACKAGE_REJECTED; 465 } else if (size > FULL_BACKUP_SIZE_QUOTA) { 466 result = TRANSPORT_QUOTA_EXCEEDED; 467 } 468 if (result != TRANSPORT_OK) { 469 if (DEBUG) { 470 Log.d(TAG, "Declining backup of size " + size + " Full backup size quota: " 471 + FULL_BACKUP_SIZE_QUOTA); 472 } 473 } 474 return result; 475 } 476 477 @Override performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket)478 public int performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket) { 479 if (DEBUG) Log.d(TAG, "perform full backup for: " + targetPackage); 480 if (mSocket != null) { 481 Log.e(TAG, "Attempt to initiate full backup while one is in progress"); 482 return TRANSPORT_ERROR; 483 } 484 // We know a priori that we run in the system process, so we need to make 485 // sure to dup() our own copy of the socket fd. Transports which run in 486 // their own processes must not do this. 487 try { 488 mFullBackupSize = 0; 489 mSocket = ParcelFileDescriptor.dup(socket.getFileDescriptor()); 490 mSocketInputStream = new FileInputStream(mSocket.getFileDescriptor()); 491 } catch (IOException e) { 492 Log.e(TAG, "Unable to process socket for full backup:" + e); 493 return TRANSPORT_ERROR; 494 } 495 496 mFullTargetPackage = targetPackage.packageName; 497 mFullBackupBuffer = new byte[FULL_BACKUP_BUFFER_BYTE_SIZE]; 498 499 return TRANSPORT_OK; 500 } 501 502 @Override performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket, int flags)503 public int performFullBackup(PackageInfo targetPackage, ParcelFileDescriptor socket, 504 int flags) { 505 Log.v(TAG, "perform full backup, flags:" + flags + ", package:" + targetPackage); 506 return super.performFullBackup(targetPackage, socket, flags); 507 } 508 509 // Reads data from socket file descriptor provided in performFullBackup() call 510 @Override sendBackupData(final int numBytes)511 public int sendBackupData(final int numBytes) { 512 if (DEBUG) Log.d(TAG, "send back data"); 513 if (mSocket == null) { 514 Log.w(TAG, "Attempted sendBackupData before performFullBackup"); 515 return TRANSPORT_ERROR; 516 } 517 518 mFullBackupSize += numBytes; 519 if (mFullBackupSize > FULL_BACKUP_SIZE_QUOTA) { 520 return TRANSPORT_QUOTA_EXCEEDED; 521 } 522 523 if (numBytes > mFullBackupBuffer.length) { 524 mFullBackupBuffer = new byte[numBytes]; 525 } 526 // creates new full backup output stream at the target location 527 if (mFullBackupOutputStream == null) { 528 FileOutputStream outputStream; 529 try { 530 File tarball = new File(mCurrentSetFullDir, mFullTargetPackage); 531 outputStream = new FileOutputStream(tarball); 532 } catch (FileNotFoundException e) { 533 return TRANSPORT_ERROR; 534 } 535 // later will close when finishBackup() and cancelFullBackup() are called 536 mFullBackupOutputStream = new BufferedOutputStream(outputStream); 537 } 538 539 int bytesLeft = numBytes; 540 while (bytesLeft > 0) { 541 try { 542 int nRead = mSocketInputStream.read(mFullBackupBuffer, 0, bytesLeft); 543 Log.i(TAG, "read " + bytesLeft + " bytes of data"); 544 if (nRead < 0) { 545 // Something went wrong if we expect data but saw EOD 546 Log.w(TAG, "Unexpected EOD; failing backup"); 547 return TRANSPORT_ERROR; 548 } 549 mFullBackupOutputStream.write(mFullBackupBuffer, 0, nRead); 550 bytesLeft -= nRead; 551 } catch (IOException e) { 552 Log.e(TAG, "Error handling backup data for " + mFullTargetPackage); 553 return TRANSPORT_ERROR; 554 } 555 } 556 if (DEBUG) { 557 Log.d(TAG, "Stored " + numBytes + " of data"); 558 } 559 return TRANSPORT_OK; 560 } 561 562 // Happens before finishBackup(), tear down any ongoing backup state 563 @Override cancelFullBackup()564 public void cancelFullBackup() { 565 if (DEBUG) { 566 Log.d(TAG, "Canceling full backup of " + mFullTargetPackage); 567 } 568 File archive = new File(mCurrentSetFullDir, mFullTargetPackage); 569 closeFullBackup(); 570 if (archive.exists()) { 571 archive.delete(); 572 } 573 } 574 575 // ------------------------------------------------------------------------------------ 576 // Restore handling 577 578 @Override getAvailableRestoreSets()579 public RestoreSet[] getAvailableRestoreSets() { 580 Log.v(TAG, "get available restore sets"); 581 long[] existing = new long[POSSIBLE_SETS.length + 1]; 582 // number of existing non-current sets 583 int num = 0; 584 // see which possible non-current sets exist... 585 for (long token : POSSIBLE_SETS) { 586 // if the file directory exists for the non-current set 587 if ((new File(mDataDir, Long.toString(token))).exists()) { 588 existing[num++] = token; 589 Log.v(TAG, "number of available restore sets: " + num); 590 } 591 } 592 // always adds the currently-active set at last 593 existing[num++] = CURRENT_SET_TOKEN; 594 595 RestoreSet[] available = new RestoreSet[num]; 596 String deviceName = DEFAULT_DEVICE_NAME_FOR_RESTORE_SET; 597 for (int i = 0; i < available.length; i++) { 598 available[i] = new RestoreSet("Local disk image", deviceName, existing[i]); 599 } 600 return available; 601 } 602 603 @Override getCurrentRestoreSet()604 public long getCurrentRestoreSet() { 605 // The current restore set always has the same token, which is 1 606 if (DEBUG) Log.d(TAG, "get current restore set"); 607 return CURRENT_SET_TOKEN; 608 } 609 610 @Override startRestore(long token, PackageInfo[] packages)611 public int startRestore(long token, PackageInfo[] packages) { 612 if (DEBUG) { 613 Log.d(TAG, "start restore for token: " + token + " , num of packages: " 614 + packages.length); 615 } 616 mRestorePackages = packages; 617 mRestorePackageIndex = -1; 618 mRestoreSetDir = new File(mDataDir, Long.toString(token)); 619 mRestoreSetIncrementalDir = new File(mRestoreSetDir, INCREMENTAL_DIR); 620 mRestoreSetFullDir = new File(mRestoreSetDir, FULL_DATA_DIR); 621 return TRANSPORT_OK; 622 } 623 624 // Get the package name of the next application with data in the backup store, plus 625 // a description of the structure of the restored type 626 @Override nextRestorePackage()627 public RestoreDescription nextRestorePackage() { 628 if (DEBUG) { 629 Log.d(TAG, "nextRestorePackage() : mRestorePackageIndex=" + mRestorePackageIndex 630 + " length=" + mRestorePackages.length); 631 } 632 if (mRestorePackages == null) throw new IllegalStateException("startRestore not called"); 633 634 boolean found; 635 while (++mRestorePackageIndex < mRestorePackages.length) { 636 // name of the current restore package 637 String name = mRestorePackages[mRestorePackageIndex].packageName; 638 639 // If we have key/value data for this package, deliver that 640 // skip packages where we have a data dir but no actual contents 641 found = hasRestoreDataForPackage(name); 642 if (found) { 643 mRestoreType = RestoreDescription.TYPE_KEY_VALUE; 644 } else { 645 // No key/value data; check for [non-empty] full data 646 File maybeFullData = new File(mRestoreSetFullDir, name); 647 if (maybeFullData.length() > 0) { 648 if (DEBUG) { 649 Log.d(TAG, "nextRestorePackage(TYPE_FULL_STREAM) @ " 650 + mRestorePackageIndex + " = " + name); 651 } 652 mRestoreType = RestoreDescription.TYPE_FULL_STREAM; 653 mCurFullRestoreStream = null; // ensure starting from the ground state 654 found = true; 655 } 656 } 657 658 if (found) { 659 return new RestoreDescription(name, mRestoreType); 660 } 661 // if not found for either type 662 if (DEBUG) { 663 Log.d(TAG, "... package @ " + mRestorePackageIndex + " = " + name 664 + " has no data; skipping"); 665 } 666 } 667 668 if (DEBUG) Log.d(TAG, "no more packages to restore"); 669 return RestoreDescription.NO_MORE_PACKAGES; 670 } 671 672 // check if this package has key/value backup data hasRestoreDataForPackage(String packageName)673 private boolean hasRestoreDataForPackage(String packageName) { 674 String[] contents = (new File(mRestoreSetIncrementalDir, packageName)).list(); 675 if (contents != null && contents.length > 0) { 676 if (DEBUG) { 677 Log.d(TAG, "nextRestorePackage(TYPE_KEY_VALUE) @ " 678 + mRestorePackageIndex + " = " + packageName); 679 } 680 return true; 681 } 682 return false; 683 } 684 685 // get the date for the application returned by nextRestorePackage(), only if key/value is 686 // the delivery type 687 @Override getRestoreData(ParcelFileDescriptor outFd)688 public int getRestoreData(ParcelFileDescriptor outFd) { 689 if (DEBUG) Log.d(TAG, "get restore data"); 690 if (mRestorePackages == null) throw new IllegalStateException("startRestore not called"); 691 if (mRestorePackageIndex < 0) { 692 throw new IllegalStateException("nextRestorePackage not called"); 693 } 694 if (mRestoreType != RestoreDescription.TYPE_KEY_VALUE) { 695 throw new IllegalStateException("getRestoreData(fd) for non-key/value dataset, " 696 + "restore type:" + mRestoreType); 697 } 698 File packageDir = new File(mRestoreSetIncrementalDir, 699 mRestorePackages[mRestorePackageIndex].packageName); 700 // the restore set is the concatenation of the individual record blobs, 701 // each of which is a file in the package's directory. 702 List<DecodedFilename> blobs = contentsByKey(packageDir); 703 if (blobs == null) { // nextRestorePackage() ensures the dir exists, so this is an error 704 Log.e(TAG, "No keys for package: " + packageDir); 705 return TRANSPORT_ERROR; 706 } 707 708 // We expect at least some data if the directory exists in the first place 709 if (DEBUG) Log.d(TAG, "getRestoreData() found " + blobs.size() + " key files"); 710 BackupDataOutput out = new BackupDataOutput(outFd.getFileDescriptor()); 711 try { 712 for (DecodedFilename keyEntry : blobs) { 713 File f = keyEntry.mFile; 714 try (FileInputStream in = new FileInputStream(f)) { 715 int size = (int) f.length(); 716 byte[] buf = new byte[size]; 717 in.read(buf); 718 if (DEBUG) Log.d(TAG, "... key=" + keyEntry.mKey + " size=" + size); 719 out.writeEntityHeader(keyEntry.mKey, size); 720 out.writeEntityData(buf, size); 721 } 722 } 723 return TRANSPORT_OK; 724 } catch (IOException e) { 725 Log.e(TAG, "Unable to read backup records", e); 726 return TRANSPORT_ERROR; 727 } 728 } 729 730 private static final class DecodedFilename implements Comparable<DecodedFilename> { 731 public File mFile; 732 public String mKey; 733 DecodedFilename(File f)734 DecodedFilename(File f) { 735 mFile = f; 736 mKey = new String(Base64.decode(f.getName(), Base64.DEFAULT)); 737 } 738 739 @Override compareTo(DecodedFilename other)740 public int compareTo(DecodedFilename other) { 741 // sorts into ascending lexical order by decoded key 742 return mKey.compareTo(other.mKey); 743 } 744 } 745 746 // Return a list of the files in the given directory, sorted lexically by 747 // the Base64-decoded file name, not by the on-disk filename contentsByKey(File dir)748 private List<DecodedFilename> contentsByKey(File dir) { 749 File[] allFiles = dir.listFiles(); 750 if (allFiles == null || allFiles.length == 0) { 751 return Collections.emptyList(); 752 } 753 754 // Decode the filenames into keys then sort lexically by key 755 List<DecodedFilename> contents = new ArrayList<>(); 756 for (File f : allFiles) { 757 contents.add(new DecodedFilename(f)); 758 } 759 Collections.sort(contents); 760 return contents; 761 } 762 763 @Override finishRestore()764 public void finishRestore() { 765 if (DEBUG) Log.d(TAG, "finishRestore()"); 766 if (mRestoreType == RestoreDescription.TYPE_FULL_STREAM) { 767 resetFullRestoreState(); 768 } 769 // set the restore type back to 0 770 mRestoreType = 0; 771 } 772 773 // Clears full restore stream and full restore buffer back to the ground state resetFullRestoreState()774 private void resetFullRestoreState() { 775 IoUtils.closeQuietly(mCurFullRestoreStream); 776 mCurFullRestoreStream = null; 777 mFullRestoreBuffer = null; 778 } 779 780 // ------------------------------------------------------------------------------------ 781 // Full restore handling 782 783 // Writes some data to the socket supplied to this call, and returns the number of bytes 784 // written. The system will then read that many bytes and stream them to the 785 // application's agent for restore. 786 @Override getNextFullRestoreDataChunk(ParcelFileDescriptor socket)787 public int getNextFullRestoreDataChunk(ParcelFileDescriptor socket) { 788 if (DEBUG) Log.d(TAG, "get next full restore data chunk"); 789 if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) { 790 throw new IllegalStateException("Asked for full restore data for non-stream package" 791 + ", restore type:" + mRestoreType); 792 } 793 794 // first chunk? 795 if (mCurFullRestoreStream == null) { 796 final String name = mRestorePackages[mRestorePackageIndex].packageName; 797 if (DEBUG) Log.i(TAG, "Starting full restore of " + name); 798 File dataset = new File(mRestoreSetFullDir, name); 799 try { 800 mCurFullRestoreStream = new FileInputStream(dataset); 801 } catch (IOException e) { 802 // If we can't open the target package's tarball, we return the single-package 803 // error code and let the caller go on to the next package. 804 Log.e(TAG, "Unable to read archive for " + name + e); 805 return TRANSPORT_PACKAGE_REJECTED; 806 } 807 mFullRestoreBuffer = new byte[FULL_RESTORE_BUFFER_BYTE_SIZE]; 808 } 809 810 FileOutputStream stream = new FileOutputStream(socket.getFileDescriptor()); 811 812 int nRead; 813 try { 814 nRead = mCurFullRestoreStream.read(mFullRestoreBuffer); 815 if (nRead < 0) { 816 // EOF: tell the caller we're done 817 nRead = NO_MORE_DATA; 818 } else if (nRead == 0) { 819 // This shouldn't happen when reading a FileInputStream; we should always 820 // get either a positive nonzero byte count or -1. Log the situation and 821 // treat it as EOF. 822 Log.w(TAG, "read() of archive file returned 0; treating as EOF"); 823 nRead = NO_MORE_DATA; 824 } else { 825 if (DEBUG) { 826 Log.i(TAG, "delivering restore chunk: " + nRead); 827 } 828 stream.write(mFullRestoreBuffer, 0, nRead); 829 } 830 } catch (IOException e) { 831 Log.e(TAG, "exception:" + e); 832 return TRANSPORT_ERROR; // Hard error accessing the file; shouldn't happen 833 } finally { 834 IoUtils.closeQuietly(socket); 835 } 836 837 return nRead; 838 } 839 840 // If the OS encounters an error while processing RestoreDescription.TYPE_FULL_STREAM 841 // data for restore, it will invoke this method to tell the transport that it should 842 // abandon the data download for the current package. 843 @Override abortFullRestore()844 public int abortFullRestore() { 845 Log.v(TAG, "abort full restore"); 846 if (mRestoreType != RestoreDescription.TYPE_FULL_STREAM) { 847 throw new IllegalStateException("abortFullRestore() but not currently restoring" 848 + ", restore type: " + mRestoreType); 849 } 850 resetFullRestoreState(); 851 mRestoreType = 0; 852 return TRANSPORT_OK; 853 } 854 855 @Override getBackupQuota(String packageName, boolean isFullBackup)856 public long getBackupQuota(String packageName, boolean isFullBackup) { 857 if (DEBUG) Log.d(TAG, "get backup quota"); 858 return isFullBackup ? FULL_BACKUP_SIZE_QUOTA : KEY_VALUE_BACKUP_SIZE_QUOTA; 859 } 860 861 } 862