1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18
19 #include "FuseDaemon.h"
20
21 #include <android-base/file.h>
22 #include <android-base/logging.h>
23 #include <android-base/properties.h>
24 #include <android-base/strings.h>
25 #include <android/log.h>
26 #include <android/trace.h>
27 #include <ctype.h>
28 #include <dirent.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <fuse_i.h>
32 #include <fuse_kernel.h>
33 #include <fuse_log.h>
34 #include <fuse_lowlevel.h>
35 #include <inttypes.h>
36 #include <limits.h>
37 #include <stdbool.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/inotify.h>
42 #include <sys/mman.h>
43 #include <sys/mount.h>
44 #include <sys/param.h>
45 #include <sys/resource.h>
46 #include <sys/stat.h>
47 #include <sys/statfs.h>
48 #include <sys/statvfs.h>
49 #include <sys/time.h>
50 #include <sys/types.h>
51 #include <sys/uio.h>
52 #include <unistd.h>
53
54 #include <iostream>
55 #include <map>
56 #include <mutex>
57 #include <queue>
58 #include <regex>
59 #include <thread>
60 #include <unordered_map>
61 #include <unordered_set>
62 #include <vector>
63
64 #include "BpfSyscallWrappers.h"
65 #include "MediaProviderWrapper.h"
66 #include "leveldb/db.h"
67 #include "libfuse_jni/FuseUtils.h"
68 #include "libfuse_jni/ReaddirHelper.h"
69 #include "libfuse_jni/RedactionInfo.h"
70
71 using mediaprovider::fuse::DirectoryEntry;
72 using mediaprovider::fuse::dirhandle;
73 using mediaprovider::fuse::handle;
74 using mediaprovider::fuse::node;
75 using mediaprovider::fuse::RedactionInfo;
76 using std::string;
77 using std::vector;
78
79 // logging macros to avoid duplication.
80 #define TRACE_NODE(__node, __req) \
81 LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
82 << "] (uid=" << (__req)->ctx.uid << ") "
83
84 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
85 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
86
87 class ScopedTrace {
88 public:
ScopedTrace(const char * name)89 explicit inline ScopedTrace(const char *name) {
90 ATrace_beginSection(name);
91 }
92
~ScopedTrace()93 inline ~ScopedTrace() {
94 ATrace_endSection();
95 }
96 };
97
98 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
99
100 #define FUSE_UNKNOWN_INO 0xffffffff
101
102 // Stolen from: android_filesystem_config.h
103 #define AID_APP_START 10000
104
105 #define FUSE_MAX_MAX_PAGES 256
106
107 const size_t MAX_READ_SIZE = FUSE_MAX_MAX_PAGES * getpagesize();
108 // Stolen from: UserHandle#getUserId
109 constexpr int PER_USER_RANGE = 100000;
110
111 // Stolen from: UserManagerService
112 constexpr int MAX_USER_ID = UINT32_MAX / PER_USER_RANGE;
113
114 const int MY_UID = getuid();
115 const int MY_USER_ID = MY_UID / PER_USER_RANGE;
116 const std::string MY_USER_ID_STRING(std::to_string(MY_UID / PER_USER_RANGE));
117
118 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
119 const std::regex PATTERN_OWNED_PATH(
120 "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb)/([^/]+)(/?.*)?",
121 std::regex_constants::icase);
122 const std::regex PATTERN_BPF_BACKING_PATH("^/storage/[^/]+/[0-9]+/Android/(data|obb)$",
123 std::regex_constants::icase);
124
125 static constexpr char TRANSFORM_SYNTHETIC_DIR[] = "synthetic";
126 static constexpr char TRANSFORM_TRANSCODE_DIR[] = "transcode";
127 static constexpr char PRIMARY_VOLUME_PREFIX[] = "/storage/emulated";
128 static constexpr char STORAGE_PREFIX[] = "/storage";
129
130 static constexpr char VOLUME_INTERNAL[] = "internal";
131 static constexpr char VOLUME_EXTERNAL_PRIMARY[] = "external_primary";
132
133 static constexpr char OWNERSHIP_RELATION[] = "ownership";
134
135 static constexpr char FUSE_BPF_PROG_PATH[] = "/sys/fs/bpf/prog_fuseMedia_fuse_media";
136
137 enum class BpfFd { REMOVE = -2 };
138
139 /*
140 * In order to avoid double caching with fuse, call fadvise on the file handles
141 * in the underlying file system. However, if this is done on every read/write,
142 * the fadvises cause a very significant slowdown in tests (specifically fio
143 * seq_write). So call fadvise on the file handles with the most reads/writes
144 * only after a threshold is passed.
145 */
146 class FAdviser {
147 public:
FAdviser()148 FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
149
~FAdviser()150 ~FAdviser() {
151 SendMessage(Message::quit);
152 thread_.join();
153 }
154
Record(int fd,size_t size)155 void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
156
Close(int fd)157 void Close(int fd) { SendMessage(Message::close, fd); }
158
159 private:
160 struct Message {
161 enum Type { record, close, quit };
162 Type type;
163 int fd;
164 size_t size;
165 };
166
RecordImpl(int fd,size_t size)167 void RecordImpl(int fd, size_t size) {
168 total_size_ += size;
169
170 // Find or create record in files_
171 // Remove record from sizes_ if it exists, adjusting size appropriately
172 auto file = files_.find(fd);
173 if (file != files_.end()) {
174 auto old_size = file->second;
175 size += old_size->first;
176 sizes_.erase(old_size);
177 } else {
178 file = files_.insert(Files::value_type(fd, sizes_.end())).first;
179 }
180
181 // Now (re) insert record in sizes_
182 auto new_size = sizes_.insert(Sizes::value_type(size, fd));
183 file->second = new_size;
184
185 if (total_size_ < threshold_) return;
186
187 LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
188 while (!sizes_.empty() && total_size_ > target_) {
189 auto size = --sizes_.end();
190 total_size_ -= size->first;
191 posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
192 files_.erase(size->second);
193 sizes_.erase(size);
194 }
195 LOG(INFO) << "Threshold now " << total_size_;
196 }
197
CloseImpl(int fd)198 void CloseImpl(int fd) {
199 auto file = files_.find(fd);
200 if (file == files_.end()) return;
201
202 total_size_ -= file->second->first;
203 sizes_.erase(file->second);
204 files_.erase(file);
205 }
206
MessageLoopImpl()207 void MessageLoopImpl() {
208 while (1) {
209 Message message;
210
211 {
212 std::unique_lock<std::mutex> lock(mutex_);
213 cv_.wait(lock, [this] { return !queue_.empty(); });
214 message = queue_.front();
215 queue_.pop();
216 }
217
218 switch (message.type) {
219 case Message::record:
220 RecordImpl(message.fd, message.size);
221 break;
222
223 case Message::close:
224 CloseImpl(message.fd);
225 break;
226
227 case Message::quit:
228 return;
229 }
230 }
231 }
232
MessageLoop(FAdviser * ptr)233 static int MessageLoop(FAdviser* ptr) {
234 ptr->MessageLoopImpl();
235 return 0;
236 }
237
SendMessage(Message::Type type,int fd=-1,size_t size=0)238 void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
239 {
240 std::unique_lock<std::mutex> lock(mutex_);
241 Message message = {type, fd, size};
242 queue_.push(message);
243 }
244 cv_.notify_one();
245 }
246
247 std::mutex mutex_;
248 std::condition_variable cv_;
249 std::queue<Message> queue_;
250 std::thread thread_;
251
252 typedef std::multimap<size_t, int> Sizes;
253 typedef std::map<int, Sizes::iterator> Files;
254
255 Files files_;
256 Sizes sizes_;
257 size_t total_size_;
258
259 const size_t threshold_ = 64 * 1024 * 1024;
260 const size_t target_ = 32 * 1024 * 1024;
261 };
262
263 /* Single FUSE mount */
264 struct fuse {
fusefuse265 explicit fuse(const std::string& _path, const ino_t _ino, const bool _uncached_mode,
266 const bool _bpf, android::base::unique_fd&& _bpf_fd,
267 const std::vector<string>& _supported_transcoding_relative_paths,
268 const std::vector<string>& _supported_uncached_relative_paths)
269 : path(_path),
270 tracker(mediaprovider::fuse::NodeTracker(&lock)),
271 root(node::CreateRoot(_path, &lock, _ino, &tracker)),
272 uncached_mode(_uncached_mode),
273 mp(0),
274 zero_addr(0),
275 disable_dentry_cache(false),
276 passthrough(false),
277 bpf(_bpf),
278 bpf_fd(std::move(_bpf_fd)),
279 supported_transcoding_relative_paths(_supported_transcoding_relative_paths),
280 supported_uncached_relative_paths(_supported_uncached_relative_paths) {}
281
IsRootfuse282 inline bool IsRoot(const node* node) const { return node == root; }
283
GetEffectiveRootPathfuse284 inline string GetEffectiveRootPath() {
285 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
286 return path + "/" + MY_USER_ID_STRING;
287 }
288 return path;
289 }
290
GetTransformsDirfuse291 inline string GetTransformsDir() { return GetEffectiveRootPath() + "/.transforms"; }
292
293 // Note that these two (FromInode / ToInode) conversion wrappers are required
294 // because fuse_lowlevel_ops documents that the root inode is always one
295 // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
296 // on any of the other inodes in the FS.
FromInodefuse297 inline node* FromInode(__u64 inode) {
298 if (inode == FUSE_ROOT_ID) {
299 return root;
300 }
301
302 return node::FromInode(inode, &tracker);
303 }
304
FromInodeNoThrowfuse305 inline node* FromInodeNoThrow(__u64 inode) {
306 if (inode == FUSE_ROOT_ID) {
307 return root;
308 }
309
310 return node::FromInodeNoThrow(inode, &tracker);
311 }
312
ToInodefuse313 inline __u64 ToInode(node* node) const {
314 if (IsRoot(node)) {
315 return FUSE_ROOT_ID;
316 }
317
318 return node::ToInode(node);
319 }
320
IsTranscodeSupportedPathfuse321 inline bool IsTranscodeSupportedPath(const string& path) {
322 // Keep in sync with MediaProvider#supportsTranscode
323 if (!android::base::EndsWithIgnoreCase(path, ".mp4")) {
324 return false;
325 }
326
327 const std::string& base_path = GetEffectiveRootPath() + "/";
328 for (const std::string& relative_path : supported_transcoding_relative_paths) {
329 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
330 return true;
331 }
332 }
333
334 return false;
335 }
336
IsUncachedPathfuse337 inline bool IsUncachedPath(const std::string& path) {
338 const std::string base_path = GetEffectiveRootPath() + "/";
339 for (const std::string& relative_path : supported_uncached_relative_paths) {
340 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
341 return true;
342 }
343 }
344
345 return false;
346 }
347
ShouldNotCachefuse348 inline bool ShouldNotCache(const std::string& path) {
349 if (uncached_mode) {
350 // Cache is disabled for the entire volume.
351 return true;
352 }
353
354 if (supported_uncached_relative_paths.empty()) {
355 // By default there is no supported uncached path. Just return early in this case.
356 return false;
357 }
358
359 if (!android::base::StartsWithIgnoreCase(path, PRIMARY_VOLUME_PREFIX)) {
360 // Uncached path config applies only to primary volumes.
361 return false;
362 }
363
364 if (android::base::EndsWith(path, "/")) {
365 return IsUncachedPath(path);
366 } else {
367 // Append a slash at the end to make sure that the exact match is picked up.
368 return IsUncachedPath(path + "/");
369 }
370 }
371
372 std::recursive_mutex lock;
373 const string path;
374 // The Inode tracker associated with this FUSE instance.
375 mediaprovider::fuse::NodeTracker tracker;
376 node* const root;
377 struct fuse_session* se;
378
379 const bool uncached_mode;
380
381 /*
382 * Used to make JNI calls to MediaProvider.
383 * Responsibility of freeing this object falls on corresponding
384 * FuseDaemon object.
385 */
386 mediaprovider::fuse::MediaProviderWrapper* mp;
387
388 /*
389 * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
390 * The memory is read only and should never be modified.
391 */
392 /* const */ char* zero_addr;
393
394 FAdviser fadviser;
395
396 std::atomic_bool* active;
397 std::atomic_bool disable_dentry_cache;
398 std::atomic_bool passthrough;
399 std::atomic_bool bpf;
400
401 const android::base::unique_fd bpf_fd;
402
403 // FUSE device id.
404 std::atomic_uint dev;
405 const std::vector<string> supported_transcoding_relative_paths;
406 const std::vector<string> supported_uncached_relative_paths;
407
408 // LevelDb Connection Map
409 std::map<std::string, leveldb::DB*> level_db_connection_map;
410 std::recursive_mutex level_db_mutex;
411 };
412
413 struct OpenInfo {
414 int flags;
415 bool for_write;
416 bool direct_io;
417 };
418
419 enum class FuseOp { lookup, readdir, mknod, mkdir, create };
420
get_name(node * n)421 static inline string get_name(node* n) {
422 if (n) {
423 std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
424 name += "node_path: " + n->BuildSafePath();
425 return name;
426 }
427 return "?";
428 }
429
ptr_to_id(const void * ptr)430 static inline __u64 ptr_to_id(const void* ptr) {
431 return (__u64)(uintptr_t) ptr;
432 }
433
434 /*
435 * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
436 *
437 * This is called before the MediaProvider returns fd from the lower file
438 * system to an app over the ContentResolver interface. This allows us
439 * check with is_file_locked if any reference to that fd is still open.
440 */
set_file_lock(int fd,bool for_read,const std::string & path)441 static int set_file_lock(int fd, bool for_read, const std::string& path) {
442 std::string lock_str = (for_read ? "read" : "write");
443
444 struct flock fl{};
445 fl.l_type = for_read ? F_RDLCK : F_WRLCK;
446 fl.l_whence = SEEK_SET;
447
448 int res = fcntl(fd, F_OFD_SETLK, &fl);
449 if (res) {
450 PLOG(WARNING) << "Failed to set lock: " << lock_str;
451 return res;
452 }
453 return res;
454 }
455
456 /*
457 * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
458 *
459 * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
460 * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
461 * allowing us to know if any reference to that fd is still open here.
462 *
463 * Returns true if fd may have a lock, false otherwise
464 */
is_file_locked(int fd,const std::string & path)465 static bool is_file_locked(int fd, const std::string& path) {
466 struct flock fl{};
467 fl.l_type = F_WRLCK;
468 fl.l_whence = SEEK_SET;
469
470 int res = fcntl(fd, F_OFD_GETLK, &fl);
471 if (res) {
472 PLOG(WARNING) << "Failed to check lock";
473 // Assume worst
474 return true;
475 }
476 bool locked = fl.l_type != F_UNLCK;
477 return locked;
478 }
479
get_fuse(fuse_req_t req)480 static struct fuse* get_fuse(fuse_req_t req) {
481 return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
482 }
483
is_package_owned_path(const string & path,const string & fuse_path)484 static bool is_package_owned_path(const string& path, const string& fuse_path) {
485 if (path.rfind(fuse_path, 0) != 0) {
486 return false;
487 }
488 return std::regex_match(path, PATTERN_OWNED_PATH);
489 }
490
is_bpf_backing_path(const string & path)491 static bool is_bpf_backing_path(const string& path) {
492 return std::regex_match(path, PATTERN_BPF_BACKING_PATH);
493 }
494
495 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
496 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)497 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
498 const string& child_name, const string& path) {
499 if (mediaprovider::fuse::containsMount(path)) {
500 LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
501 return;
502 }
503
504 if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
505 // Invalidating the dentry can fail if there's no dcache entry, however, there may still
506 // be cached attributes, so attempt to invalidate those by invalidating the inode
507 fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
508 }
509 }
510
get_entry_timeout(const string & path,bool should_inval,struct fuse * fuse)511 static double get_entry_timeout(const string& path, bool should_inval, struct fuse* fuse) {
512 if (fuse->disable_dentry_cache || should_inval || is_package_owned_path(path, fuse->path) ||
513 fuse->ShouldNotCache(path)) {
514 // We set dentry timeout to 0 for the following reasons:
515 // 1. The dentry cache was completely disabled for the entire volume.
516 // 2.1 Case-insensitive lookups need to invalidate other case-insensitive dentry matches
517 // 2.2 Nodes supporting transforms need to be invalidated, so that subsequent lookups by a
518 // uid requiring a transform is guaranteed to come to the FUSE daemon.
519 // 3. With app data isolation enabled, app A should not guess existence of app B from the
520 // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
521 // information.
522 // 4. The dentry cache was completely disabled for the given path.
523 return 0;
524 }
525 return std::numeric_limits<double>::max();
526 }
527
get_path(node * node)528 static std::string get_path(node* node) {
529 const string& io_path = node->GetIoPath();
530 return io_path.empty() ? node->BuildPath() : io_path;
531 }
532
533 // Returns true if the path resides under .transforms/synthetic.
534 // NOTE: currently only file paths corresponding to redacted URIs reside under this folder. The path
535 // itself never exists and just a link for transformation.
is_synthetic_path(const string & path,struct fuse * fuse)536 static inline bool is_synthetic_path(const string& path, struct fuse* fuse) {
537 return android::base::StartsWithIgnoreCase(
538 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR);
539 }
540
is_transforms_dir_path(const string & path,struct fuse * fuse)541 static inline bool is_transforms_dir_path(const string& path, struct fuse* fuse) {
542 return android::base::StartsWithIgnoreCase(path, fuse->GetTransformsDir());
543 }
544
validate_node_path(const std::string & path,const std::string & name,fuse_req_t req,int * error_code,struct fuse_entry_param * e,const FuseOp op)545 static std::unique_ptr<mediaprovider::fuse::FileLookupResult> validate_node_path(
546 const std::string& path, const std::string& name, fuse_req_t req, int* error_code,
547 struct fuse_entry_param* e, const FuseOp op) {
548 struct fuse* fuse = get_fuse(req);
549 const struct fuse_ctx* ctx = fuse_req_ctx(req);
550 memset(e, 0, sizeof(*e));
551
552 const bool synthetic_path = is_synthetic_path(path, fuse);
553 if (lstat(path.c_str(), &e->attr) < 0 && !(op == FuseOp::lookup && synthetic_path)) {
554 *error_code = errno;
555 return nullptr;
556 }
557
558 if (is_transforms_dir_path(path, fuse)) {
559 if (op == FuseOp::lookup) {
560 // Lookups are only allowed under .transforms/synthetic dir
561 if (!(android::base::EqualsIgnoreCase(path, fuse->GetTransformsDir()) ||
562 android::base::StartsWithIgnoreCase(
563 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR))) {
564 *error_code = ENONET;
565 return nullptr;
566 }
567 } else {
568 // user-code is only allowed to make lookups under .transforms dir, and that too only
569 // under .transforms/synthetic dir
570 *error_code = ENOENT;
571 return nullptr;
572 }
573 }
574
575 if (S_ISDIR(e->attr.st_mode)) {
576 // now that we have reached this point, ops on directories are safe and require no
577 // transformation.
578 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
579 }
580
581 if (!synthetic_path && !fuse->IsTranscodeSupportedPath(path)) {
582 // Transforms are only supported for synthetic or transcode-supported paths
583 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
584 }
585
586 // Handle potential file transforms
587 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
588 fuse->mp->FileLookup(path, req->ctx.uid, req->ctx.pid);
589
590 if (!file_lookup_result) {
591 // Fail lookup if we can't fetch FileLookupResult for path
592 LOG(WARNING) << "Failed to fetch FileLookupResult for " << path;
593 *error_code = EFAULT;
594 return nullptr;
595 }
596
597 const string& io_path = file_lookup_result->io_path;
598 // Update size with io_path iff there's an io_path
599 if (!io_path.empty() && (lstat(io_path.c_str(), &e->attr) < 0)) {
600 *error_code = errno;
601 return nullptr;
602 }
603
604 return file_lookup_result;
605 }
606
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & parent_path,const string & path,struct fuse_entry_param * e,int * error_code,const FuseOp op)607 static node* make_node_entry(fuse_req_t req, node* parent, const string& name,
608 const string& parent_path, const string& path,
609 struct fuse_entry_param* e, int* error_code, const FuseOp op) {
610 struct fuse* fuse = get_fuse(req);
611 const struct fuse_ctx* ctx = fuse_req_ctx(req);
612 node* node;
613
614 memset(e, 0, sizeof(*e));
615
616 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
617 validate_node_path(path, name, req, error_code, e, op);
618 if (!file_lookup_result) {
619 // Fail lookup if we can't validate |path, |errno| would have already been set
620 return nullptr;
621 }
622
623 bool should_invalidate = file_lookup_result->transforms_supported;
624 const bool transforms_complete = file_lookup_result->transforms_complete;
625 const int transforms = file_lookup_result->transforms;
626 const int transforms_reason = file_lookup_result->transforms_reason;
627 const string& io_path = file_lookup_result->io_path;
628 if (transforms) {
629 // If the node requires transforms, we MUST never cache it in the VFS
630 CHECK(should_invalidate);
631 }
632
633 node = parent->LookupChildByName(name, true /* acquire */, transforms);
634 if (!node) {
635 ino_t ino = e->attr.st_ino;
636 node = ::node::Create(parent, name, io_path, transforms_complete, transforms,
637 transforms_reason, &fuse->lock, ino, &fuse->tracker);
638 } else if (!mediaprovider::fuse::containsMount(path)) {
639 // Only invalidate a path if it does not contain mount and |name| != node->GetName.
640 // Invalidate both names to ensure there's no dentry left in the kernel after the following
641 // operations:
642 // 1) touch foo, touch FOO, unlink *foo*
643 // 2) touch foo, touch FOO, unlink *FOO*
644 // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
645 // -Set |should_invalidate| to true to invalidate lookup_name by using 0 timeout below
646 // -Explicitly invalidate node_name. Note that we invalidate async otherwise we will
647 // deadlock the kernel
648 if (name != node->GetName()) {
649 // Force node invalidation to fix the kernel dentry cache for case (1) above
650 should_invalidate = true;
651 // Make copies of the node name and path so we're not attempting to acquire
652 // any node locks from the invalidation thread. Depending on timing, we may end
653 // up invalidating the wrong inode but that shouldn't result in correctness issues.
654 const fuse_ino_t parent_ino = fuse->ToInode(parent);
655 const fuse_ino_t child_ino = fuse->ToInode(node);
656 const std::string& node_name = node->GetName();
657 std::thread t([=]() { fuse_inval(fuse->se, parent_ino, child_ino, node_name, path); });
658 t.detach();
659 // Update the name after |node_name| reference above has been captured in lambda
660 // This avoids invalidating the node again on subsequent accesses with |name|
661 node->SetName(name);
662 }
663
664 // This updated value allows us correctly decide if to keep_cache and use direct_io during
665 // FUSE_OPEN. Between the last lookup and this lookup, we might have deleted a cached
666 // transcoded file on the lower fs. A subsequent transcode at FUSE_READ should ensure we
667 // don't reuse any stale transcode page cache content.
668 node->SetTransformsComplete(transforms_complete);
669 }
670 TRACE_NODE(node, req);
671
672 if (should_invalidate && fuse->IsTranscodeSupportedPath(path)) {
673 // Some components like the MTP stack need an efficient mechanism to determine if a file
674 // supports transcoding. This allows them workaround an issue with MTP clients on windows
675 // where those clients incorrectly use the original file size instead of the transcoded file
676 // size to copy files from the device. This size misuse causes transcoded files to be
677 // truncated to the original file size, hence corrupting the transcoded file.
678 //
679 // We expose the transcode bit via the st_nlink stat field. This should be safe because the
680 // field is not supported on FAT filesystems which FUSE is emulating.
681 // WARNING: Apps should never rely on this behavior as it is NOT supported API and will be
682 // removed in a future release when the MTP stack has better support for transcoded files on
683 // Windows OS.
684 e->attr.st_nlink = 2;
685 }
686
687 // This FS is not being exported via NFS so just a fixed generation number
688 // for now. If we do need this, we need to increment the generation ID each
689 // time the fuse daemon restarts because that's what it takes for us to
690 // reuse inode numbers.
691 e->generation = 0;
692 e->ino = fuse->ToInode(node);
693
694 // When FUSE BPF is used, the caching of node attributes and lookups is
695 // disabled to avoid possible inconsistencies between the FUSE cache and
696 // the lower file system state.
697 // With FUSE BPF the file system requests are forwarded to the lower file
698 // system bypassing the FUSE daemon, so dropping the caching does not
699 // introduce a performance regression.
700 // Currently FUSE BPF is limited to the Android/data and Android/obb
701 // directories.
702 if (!fuse->bpf || !is_bpf_backing_path(parent_path)) {
703 e->entry_timeout = get_entry_timeout(path, should_invalidate, fuse);
704 e->attr_timeout = fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max();
705 }
706 return node;
707 }
708
709 namespace mediaprovider {
710 namespace fuse {
711
712 /**
713 * Function implementations
714 *
715 * These implement the various functions in fuse_lowlevel_ops
716 *
717 */
718
pf_init(void * userdata,struct fuse_conn_info * conn)719 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
720 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
721
722 // Check the same property as android.os.Build.IS_ARC.
723 const bool is_arc = android::base::GetBoolProperty("ro.boot.container", false);
724
725 // We don't want a getattr request with every read request
726 conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
727 uint64_t mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
728 FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
729 FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
730 // Disable writeback cache if it's uncached mode or if it's ARC. In ARC, due to the Downloads
731 // bind-mount, we need to disable it on the primary emulated volume as well as on StubVolumes.
732 if (fuse->uncached_mode || is_arc) {
733 mask &= ~FUSE_CAP_WRITEBACK_CACHE;
734 }
735
736 bool disable_splice_write = false;
737 if (fuse->passthrough) {
738 if (conn->capable & FUSE_CAP_PASSTHROUGH) {
739 mask |= FUSE_CAP_PASSTHROUGH;
740
741 // SPLICE_WRITE seems to cause linux kernel cache corruption with passthrough enabled.
742 // It is still under investigation but while running
743 // ScopedStorageDeviceTest#testAccessMediaLocationInvalidation, we notice test flakes
744 // of about 1/20 for the following reason:
745 // 1. App without ACCESS_MEDIA_LOCATION permission reads redacted bytes via FUSE cache
746 // 2. App with ACCESS_MEDIA_LOCATION permission reads non-redacted bytes via passthrough
747 // cache
748 // (2) fails because bytes from (1) sneak into the passthrough cache??
749 // To workaround, we disable splice for write when passthrough is enabled.
750 // This shouldn't have any performance regression if comparing passthrough devices to
751 // no-passthrough devices for the following reasons:
752 // 1. No-op for no-passthrough devices
753 // 2. Passthrough devices
754 // a. Files not requiring redaction use passthrough which bypasses FUSE_READ entirely
755 // b. Files requiring redaction are still faster than no-passthrough devices that use
756 // direct_io
757 disable_splice_write = true;
758 } else {
759 LOG(WARNING) << "Passthrough feature not supported by the kernel";
760 fuse->passthrough = false;
761 }
762 }
763
764 conn->want |= conn->capable & mask;
765 if (disable_splice_write) {
766 conn->want &= ~FUSE_CAP_SPLICE_WRITE;
767 }
768
769 conn->max_read = MAX_READ_SIZE;
770
771 fuse->active->store(true, std::memory_order_release);
772 }
773
removeInstance(struct fuse * fuse,std::string instance_name)774 static void removeInstance(struct fuse* fuse, std::string instance_name) {
775 if (fuse->level_db_connection_map.find(instance_name) != fuse->level_db_connection_map.end()) {
776 delete fuse->level_db_connection_map[instance_name];
777 (fuse->level_db_connection_map).erase(instance_name);
778 LOG(INFO) << "Removed leveldb connection for " << instance_name;
779 }
780 }
781
removeLevelDbConnection(struct fuse * fuse)782 static void removeLevelDbConnection(struct fuse* fuse) {
783 fuse->level_db_mutex.lock();
784 if (android::base::StartsWith(fuse->path, PRIMARY_VOLUME_PREFIX)) {
785 removeInstance(fuse, VOLUME_INTERNAL);
786 removeInstance(fuse, OWNERSHIP_RELATION);
787 removeInstance(fuse, VOLUME_EXTERNAL_PRIMARY);
788 } else {
789 // Return "C58E-1702" from the path like "/storage/C58E-1702"
790 std::string volume_name = (fuse->path).substr(9);
791 // Convert to lowercase
792 std::transform(volume_name.begin(), volume_name.end(), volume_name.begin(), ::tolower);
793 removeInstance(fuse, volume_name);
794 }
795 fuse->level_db_mutex.unlock();
796 }
797
pf_destroy(void * userdata)798 static void pf_destroy(void* userdata) {
799 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
800 removeLevelDbConnection(fuse);
801 LOG(INFO) << "DESTROY " << fuse->path;
802
803 node::DeleteTree(fuse->root);
804 }
805
806 // Return true if the path is accessible for that uid.
is_app_accessible_path(struct fuse * fuse,const string & path,uid_t uid)807 static bool is_app_accessible_path(struct fuse* fuse, const string& path, uid_t uid) {
808 MediaProviderWrapper* mp = fuse->mp;
809
810 if (uid < AID_APP_START || uid == MY_UID) {
811 return true;
812 }
813
814 if (path == PRIMARY_VOLUME_PREFIX) {
815 // Apps should never refer to /storage/emulated - they should be using the user-spcific
816 // subdirs, eg /storage/emulated/0
817 return false;
818 }
819
820 std::smatch match;
821 if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
822 const std::string& pkg = match[1];
823 // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
824 // and it's not an external file/directory of any package
825 if (pkg == ".nomedia") {
826 return true;
827 }
828 if (!fuse->bpf && android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
829 // Emulated storage bind-mounts app-private data directories, and so these
830 // should not be accessible through FUSE anyway.
831 LOG(WARNING) << "Rejected access to app-private dir on FUSE: " << path
832 << " from uid: " << uid;
833 return false;
834 }
835 if (!mp->isUidAllowedAccessToDataOrObbPath(uid, path)) {
836 PLOG(WARNING) << "Invalid other package file access from " << uid << "(: " << path;
837 return false;
838 }
839 }
840 return true;
841 }
842
fuse_bpf_fill_entries(const string & path,const int bpf_fd,struct fuse_entry_param * e,int & backing_fd)843 void fuse_bpf_fill_entries(const string& path, const int bpf_fd, struct fuse_entry_param* e,
844 int& backing_fd) {
845 /*
846 * The file descriptor `backing_fd` must not be closed as it is closed
847 * automatically by the kernel as soon as it consumes the FUSE reply. This
848 * mechanism is necessary because userspace doesn't know when the kernel
849 * will consume the FUSE response containing `backing_fd`, thus it may close
850 * the `backing_fd` too soon, with the risk of assigning a backing file
851 * which is either invalid or corresponds to the wrong file in the lower
852 * file system.
853 */
854 backing_fd = open(path.c_str(), O_CLOEXEC | O_DIRECTORY | O_RDONLY);
855 if (backing_fd < 0) {
856 PLOG(ERROR) << "Failed to open: " << path;
857 return;
858 }
859
860 e->backing_action = FUSE_ACTION_REPLACE;
861 e->backing_fd = backing_fd;
862
863 if (bpf_fd >= 0) {
864 e->bpf_action = FUSE_ACTION_REPLACE;
865 e->bpf_fd = bpf_fd;
866 } else if (bpf_fd == static_cast<int>(BpfFd::REMOVE)) {
867 e->bpf_action = FUSE_ACTION_REMOVE;
868 } else {
869 e->bpf_action = FUSE_ACTION_KEEP;
870 }
871 }
872
fuse_bpf_install(struct fuse * fuse,struct fuse_entry_param * e,const string & child_path,int & backing_fd)873 void fuse_bpf_install(struct fuse* fuse, struct fuse_entry_param* e, const string& child_path,
874 int& backing_fd) {
875 // TODO(b/211873756) Enable only for the primary volume. Must be
876 // extended for other media devices.
877 if (android::base::StartsWith(child_path, PRIMARY_VOLUME_PREFIX)) {
878 if (is_bpf_backing_path(child_path)) {
879 fuse_bpf_fill_entries(child_path, fuse->bpf_fd.get(), e, backing_fd);
880 } else if (is_package_owned_path(child_path, fuse->path)) {
881 fuse_bpf_fill_entries(child_path, static_cast<int>(BpfFd::REMOVE), e, backing_fd);
882 }
883 }
884 }
885
886 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
887
is_user_accessible_path(fuse_req_t req,const struct fuse * fuse,const string & path)888 static bool is_user_accessible_path(fuse_req_t req, const struct fuse* fuse, const string& path) {
889 std::smatch match;
890 std::regex_search(path, match, storage_emulated_regex);
891
892 // Ensure the FuseDaemon user id matches the user id or cross-user lookups are allowed in
893 // requested path
894 if (match.size() == 2 && std::to_string(getuid() / PER_USER_RANGE) != match[1].str()) {
895 // If user id mismatch, check cross-user lookups
896 long userId = strtol(match[1].str().c_str(), nullptr, 10);
897 if (userId < 0 || userId > MAX_USER_ID ||
898 !fuse->mp->ShouldAllowLookup(req->ctx.uid, userId)) {
899 return false;
900 }
901 }
902 return true;
903 }
904
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code,const FuseOp op,const bool validate_access,int * backing_fd=NULL)905 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
906 struct fuse_entry_param* e, int* error_code, const FuseOp op,
907 const bool validate_access, int* backing_fd = NULL) {
908 struct fuse* fuse = get_fuse(req);
909 node* parent_node = fuse->FromInode(parent);
910 if (!parent_node) {
911 *error_code = ENOENT;
912 return nullptr;
913 }
914 string parent_path = parent_node->BuildPath();
915
916 // We should always allow lookups on the root, because failing them could cause
917 // bind mounts to be invalidated.
918 if (validate_access && !fuse->IsRoot(parent_node) &&
919 !is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
920 *error_code = ENOENT;
921 return nullptr;
922 }
923
924 TRACE_NODE(parent_node, req);
925
926 const string child_path = parent_path + "/" + name;
927
928 if (validate_access && !is_user_accessible_path(req, fuse, child_path)) {
929 *error_code = EACCES;
930 return nullptr;
931 }
932
933 auto node = make_node_entry(req, parent_node, name, parent_path, child_path, e, error_code, op);
934
935 if (fuse->bpf) {
936 if (op == FuseOp::lookup) {
937 // Only direct lookup calls support setting backing_fd and bpf program
938 fuse_bpf_install(fuse, e, child_path, *backing_fd);
939 } else if (is_bpf_backing_path(child_path) && op == FuseOp::readdir) {
940 // Fuse-bpf driver implementation doesn’t support providing backing_fd
941 // and bpf program as a part of readdirplus lookup. So we make sure
942 // here we're not making any lookups on backed files because we want
943 // to receive separate lookup calls for them later to set backing_fd and bpf.
944 e->ino = 0;
945 }
946 }
947
948 return node;
949 }
950
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)951 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
952 ATRACE_CALL();
953 struct fuse_entry_param e;
954 int backing_fd = -1;
955
956 int error_code = 0;
957 if (do_lookup(req, parent, name, &e, &error_code, FuseOp::lookup, true, &backing_fd)) {
958 fuse_reply_entry(req, &e);
959 } else {
960 CHECK(error_code != 0);
961 fuse_reply_err(req, error_code);
962 }
963
964 if (backing_fd != -1) close(backing_fd);
965 }
966
pf_lookup_postfilter(fuse_req_t req,fuse_ino_t parent,uint32_t error_in,const char * name,struct fuse_entry_out * feo,struct fuse_entry_bpf_out * febo)967 static void pf_lookup_postfilter(fuse_req_t req, fuse_ino_t parent, uint32_t error_in,
968 const char* name, struct fuse_entry_out* feo,
969 struct fuse_entry_bpf_out* febo) {
970 struct fuse* fuse = get_fuse(req);
971
972 ATRACE_CALL();
973 node* parent_node = fuse->FromInode(parent);
974 if (!parent_node) {
975 fuse_reply_err(req, ENOENT);
976 return;
977 }
978
979 TRACE_NODE(parent_node, req);
980 const string path = parent_node->BuildPath() + "/" + name;
981 if (strcmp(name, ".nomedia") != 0 &&
982 !fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, path)) {
983 fuse_reply_err(req, ENOENT);
984 return;
985 }
986
987 struct {
988 struct fuse_entry_out feo;
989 struct fuse_entry_bpf_out febo;
990 } buf = {*feo, *febo};
991
992 fuse_reply_buf(req, (const char*)&buf, sizeof(buf));
993 }
994
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)995 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
996 node* node = fuse->FromInode(ino);
997 TRACE_NODE(node, req);
998 if (node) {
999 // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
1000 // some reason we only keep 32 bit refcounts but the kernel issues
1001 // forget requests with a 64 bit counter.
1002 node->Release(static_cast<uint32_t>(nlookup));
1003 }
1004 }
1005
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)1006 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
1007 // Always allow to forget so no need to check is_app_accessible_path()
1008 ATRACE_CALL();
1009 node* node;
1010 struct fuse* fuse = get_fuse(req);
1011
1012 do_forget(req, fuse, ino, nlookup);
1013 fuse_reply_none(req);
1014 }
1015
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)1016 static void pf_forget_multi(fuse_req_t req,
1017 size_t count,
1018 struct fuse_forget_data* forgets) {
1019 ATRACE_CALL();
1020 struct fuse* fuse = get_fuse(req);
1021
1022 for (int i = 0; i < count; i++) {
1023 do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
1024 }
1025 fuse_reply_none(req);
1026 }
1027
pf_fallocate(fuse_req_t req,fuse_ino_t ino,int mode,off_t offset,off_t length,fuse_file_info * fi)1028 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length,
1029 fuse_file_info* fi) {
1030 ATRACE_CALL();
1031 struct fuse* fuse = get_fuse(req);
1032
1033 handle* h = reinterpret_cast<handle*>(fi->fh);
1034 auto err = fallocate(h->fd, mode, offset, length);
1035 fuse_reply_err(req, err ? errno : 0);
1036 }
1037
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1038 static void pf_getattr(fuse_req_t req,
1039 fuse_ino_t ino,
1040 struct fuse_file_info* fi) {
1041 ATRACE_CALL();
1042 struct fuse* fuse = get_fuse(req);
1043 node* node = fuse->FromInode(ino);
1044 if (!node) {
1045 fuse_reply_err(req, ENOENT);
1046 return;
1047 }
1048 const string& path = get_path(node);
1049 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1050 fuse_reply_err(req, ENOENT);
1051 return;
1052 }
1053 TRACE_NODE(node, req);
1054
1055 struct stat s;
1056 memset(&s, 0, sizeof(s));
1057 if (lstat(path.c_str(), &s) < 0) {
1058 fuse_reply_err(req, errno);
1059 } else {
1060 fuse_reply_attr(req, &s,
1061 fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1062 }
1063 }
1064
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)1065 static void pf_setattr(fuse_req_t req,
1066 fuse_ino_t ino,
1067 struct stat* attr,
1068 int to_set,
1069 struct fuse_file_info* fi) {
1070 ATRACE_CALL();
1071 struct fuse* fuse = get_fuse(req);
1072 node* node = fuse->FromInode(ino);
1073 if (!node) {
1074 fuse_reply_err(req, ENOENT);
1075 return;
1076 }
1077 const string& path = get_path(node);
1078 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1079 fuse_reply_err(req, ENOENT);
1080 return;
1081 }
1082
1083 int fd = -1;
1084 if (fi) {
1085 // If we have a file_info, setattr was called with an fd so use the fd instead of path
1086 handle* h = reinterpret_cast<handle*>(fi->fh);
1087 fd = h->fd;
1088 } else {
1089 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1090 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1091 path, path, ctx->uid, ctx->pid, node->GetTransformsReason(), true /* for_write */,
1092 false /* redact */, false /* log_transforms_metrics */);
1093
1094 if (!result) {
1095 fuse_reply_err(req, EFAULT);
1096 return;
1097 }
1098
1099 if (result->status) {
1100 fuse_reply_err(req, EACCES);
1101 return;
1102 }
1103 }
1104 struct timespec times[2];
1105 TRACE_NODE(node, req);
1106
1107 /* XXX: incomplete implementation on purpose.
1108 * chmod/chown should NEVER be implemented.*/
1109
1110 if ((to_set & FUSE_SET_ATTR_SIZE)) {
1111 int res = 0;
1112 if (fd == -1) {
1113 res = truncate64(path.c_str(), attr->st_size);
1114 } else {
1115 res = ftruncate64(fd, attr->st_size);
1116 }
1117
1118 if (res < 0) {
1119 fuse_reply_err(req, errno);
1120 return;
1121 }
1122 }
1123
1124 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
1125 * are both set, then set it to the current time. Else, set it to the
1126 * time specified in the request. Same goes for mtime. Use utimensat(2)
1127 * as it allows ATIME and MTIME to be changed independently, and has
1128 * nanosecond resolution which fuse also has.
1129 */
1130 if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
1131 times[0].tv_nsec = UTIME_OMIT;
1132 times[1].tv_nsec = UTIME_OMIT;
1133 if (to_set & FATTR_ATIME) {
1134 if (to_set & FATTR_ATIME_NOW) {
1135 times[0].tv_nsec = UTIME_NOW;
1136 } else {
1137 times[0] = attr->st_atim;
1138 }
1139 }
1140
1141 if (to_set & FATTR_MTIME) {
1142 if (to_set & FATTR_MTIME_NOW) {
1143 times[1].tv_nsec = UTIME_NOW;
1144 } else {
1145 times[1] = attr->st_mtim;
1146 }
1147 }
1148
1149 TRACE_NODE(node, req);
1150 int res = 0;
1151 if (fd == -1) {
1152 res = utimensat(-1, path.c_str(), times, 0);
1153 } else {
1154 res = futimens(fd, times);
1155 }
1156
1157 if (res < 0) {
1158 fuse_reply_err(req, errno);
1159 return;
1160 }
1161 }
1162
1163 lstat(path.c_str(), attr);
1164 fuse_reply_attr(req, attr, fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1165 }
1166
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)1167 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
1168 {
1169 struct fuse* fuse = get_fuse(req);
1170 node* node = fuse->FromInode(ino);
1171 const string& path = node ? get_path(node) : "";
1172
1173 if (node && is_app_accessible_path(fuse, path, req->ctx.uid)) {
1174 // TODO(b/147482155): Check that uid has access to |path| and its contents
1175 fuse_reply_canonical_path(req, path.c_str());
1176 return;
1177 }
1178 fuse_reply_err(req, ENOENT);
1179 }
1180
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)1181 static void pf_mknod(fuse_req_t req,
1182 fuse_ino_t parent,
1183 const char* name,
1184 mode_t mode,
1185 dev_t rdev) {
1186 ATRACE_CALL();
1187 struct fuse* fuse = get_fuse(req);
1188 node* parent_node = fuse->FromInode(parent);
1189 if (!parent_node) {
1190 fuse_reply_err(req, ENOENT);
1191 return;
1192 }
1193 string parent_path = parent_node->BuildPath();
1194 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1195 fuse_reply_err(req, ENOENT);
1196 return;
1197 }
1198
1199 TRACE_NODE(parent_node, req);
1200
1201 const string child_path = parent_path + "/" + name;
1202
1203 mode = (mode & (~0777)) | 0664;
1204 if (mknod(child_path.c_str(), mode, rdev) < 0) {
1205 fuse_reply_err(req, errno);
1206 return;
1207 }
1208
1209 int error_code = 0;
1210 struct fuse_entry_param e;
1211 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1212 FuseOp::mknod)) {
1213 fuse_reply_entry(req, &e);
1214 } else {
1215 CHECK(error_code != 0);
1216 fuse_reply_err(req, error_code);
1217 }
1218 }
1219
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)1220 static void pf_mkdir(fuse_req_t req,
1221 fuse_ino_t parent,
1222 const char* name,
1223 mode_t mode) {
1224 ATRACE_CALL();
1225 struct fuse* fuse = get_fuse(req);
1226 node* parent_node = fuse->FromInode(parent);
1227 if (!parent_node) {
1228 fuse_reply_err(req, ENOENT);
1229 return;
1230 }
1231 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1232 const string parent_path = parent_node->BuildPath();
1233 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1234 fuse_reply_err(req, ENOENT);
1235 return;
1236 }
1237
1238 TRACE_NODE(parent_node, req);
1239
1240 const string child_path = parent_path + "/" + name;
1241
1242 int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
1243 if (status) {
1244 fuse_reply_err(req, status);
1245 return;
1246 }
1247
1248 mode = (mode & (~0777)) | 0775;
1249 if (mkdir(child_path.c_str(), mode) < 0) {
1250 fuse_reply_err(req, errno);
1251 return;
1252 }
1253
1254 int error_code = 0;
1255 struct fuse_entry_param e;
1256 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1257 FuseOp::mkdir)) {
1258 fuse_reply_entry(req, &e);
1259 } else {
1260 CHECK(error_code != 0);
1261 fuse_reply_err(req, error_code);
1262 }
1263 }
1264
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)1265 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
1266 ATRACE_CALL();
1267 struct fuse* fuse = get_fuse(req);
1268 node* parent_node = fuse->FromInode(parent);
1269 if (!parent_node) {
1270 fuse_reply_err(req, ENOENT);
1271 return;
1272 }
1273 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1274 const string parent_path = parent_node->BuildPath();
1275 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1276 fuse_reply_err(req, ENOENT);
1277 return;
1278 }
1279
1280 TRACE_NODE(parent_node, req);
1281
1282 const string child_path = parent_path + "/" + name;
1283
1284 int status = fuse->mp->DeleteFile(child_path, ctx->uid);
1285 if (status) {
1286 fuse_reply_err(req, status);
1287 return;
1288 }
1289
1290 // TODO(b/169306422): Log each deleted node
1291 parent_node->SetDeletedForChild(name);
1292 fuse_reply_err(req, 0);
1293 }
1294
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)1295 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
1296 ATRACE_CALL();
1297 struct fuse* fuse = get_fuse(req);
1298 node* parent_node = fuse->FromInode(parent);
1299 if (!parent_node) {
1300 fuse_reply_err(req, ENOENT);
1301 return;
1302 }
1303 const string parent_path = parent_node->BuildPath();
1304 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1305 fuse_reply_err(req, ENOENT);
1306 return;
1307 }
1308
1309 if (is_transforms_dir_path(parent_path, fuse)) {
1310 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1311 // readdir, and any dir operations attempted on it should fail
1312 fuse_reply_err(req, ENOENT);
1313 return;
1314 }
1315
1316 TRACE_NODE(parent_node, req);
1317
1318 const string child_path = parent_path + "/" + name;
1319
1320 int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
1321 if (status) {
1322 fuse_reply_err(req, status);
1323 return;
1324 }
1325
1326 if (rmdir(child_path.c_str()) < 0) {
1327 fuse_reply_err(req, errno);
1328 return;
1329 }
1330
1331 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
1332 TRACE_NODE(child_node, req);
1333 if (child_node) {
1334 child_node->SetDeleted();
1335 }
1336
1337 fuse_reply_err(req, 0);
1338 }
1339 /*
1340 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
1341 const char* name)
1342 {
1343 cout << "TODO:" << __func__;
1344 }
1345 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1346 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1347 const char* new_name, unsigned int flags) {
1348 ATRACE_CALL();
1349 struct fuse* fuse = get_fuse(req);
1350
1351 if (flags != 0) {
1352 return EINVAL;
1353 }
1354
1355 node* old_parent_node = fuse->FromInode(parent);
1356 if (!old_parent_node) return ENOENT;
1357 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1358 const string old_parent_path = old_parent_node->BuildPath();
1359 if (!is_app_accessible_path(fuse, old_parent_path, ctx->uid)) {
1360 return ENOENT;
1361 }
1362
1363 if (is_transforms_dir_path(old_parent_path, fuse)) {
1364 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1365 // readdir, and any dir operations attempted on it should fail
1366 return ENOENT;
1367 }
1368
1369 node* new_parent_node;
1370 if (fuse->bpf) {
1371 new_parent_node = fuse->FromInodeNoThrow(new_parent);
1372 if (!new_parent_node) return EXDEV;
1373 } else {
1374 new_parent_node = fuse->FromInode(new_parent);
1375 if (!new_parent_node) return ENOENT;
1376 }
1377 const string new_parent_path = new_parent_node->BuildPath();
1378 if (fuse->bpf && is_bpf_backing_path(new_parent_path)) {
1379 return EXDEV;
1380 }
1381 if (!is_app_accessible_path(fuse, new_parent_path, ctx->uid)) {
1382 return ENOENT;
1383 }
1384
1385 if (!old_parent_node || !new_parent_node) {
1386 return ENOENT;
1387 } else if (parent == new_parent && name == new_name) {
1388 // No rename required.
1389 return 0;
1390 }
1391
1392 TRACE_NODE(old_parent_node, req);
1393 TRACE_NODE(new_parent_node, req);
1394
1395 const string old_child_path = old_parent_path + "/" + name;
1396 const string new_child_path = new_parent_path + "/" + new_name;
1397
1398 if (android::base::EqualsIgnoreCase(fuse->GetEffectiveRootPath() + "/android", old_child_path)) {
1399 // Prevent renaming Android/ dir since it contains bind-mounts on the primary volume
1400 return EACCES;
1401 }
1402
1403 // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
1404 const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
1405 // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
1406 // EFAULT/EIO is reported due to JNI exception.
1407 if (res == 0) {
1408 // Mark any existing destination nodes as deleted. This fixes the following edge case:
1409 // 1. New destination node is forgotten
1410 // 2. Old destination node is not forgotten because there's still an open fd ref to it
1411 // 3. Lookup for |new_name| returns old destination node with stale metadata
1412 new_parent_node->SetDeletedForChild(new_name);
1413 // TODO(b/169306422): Log each renamed node
1414 old_parent_node->RenameChild(name, new_name, new_parent_node);
1415 }
1416 return res;
1417 }
1418
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1419 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1420 const char* new_name, unsigned int flags) {
1421 int res = do_rename(req, parent, name, new_parent, new_name, flags);
1422 fuse_reply_err(req, res);
1423 }
1424
1425 /*
1426 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
1427 const char* new_name)
1428 {
1429 cout << "TODO:" << __func__;
1430 }
1431 */
1432
create_handle_for_node(struct fuse * fuse,const string & path,int fd,uid_t uid,uid_t transforms_uid,node * node,const RedactionInfo * ri,const bool allow_passthrough,const bool open_info_direct_io,int * keep_cache)1433 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, uid_t uid,
1434 uid_t transforms_uid, node* node, const RedactionInfo* ri,
1435 const bool allow_passthrough, const bool open_info_direct_io,
1436 int* keep_cache) {
1437 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1438
1439 bool redaction_needed = ri->isRedactionNeeded();
1440 handle* handle = nullptr;
1441 int transforms = node->GetTransforms();
1442 bool transforms_complete = node->IsTransformsComplete();
1443 if (transforms_uid > 0) {
1444 CHECK(transforms);
1445 }
1446
1447 if (fuse->passthrough && allow_passthrough) {
1448 *keep_cache = transforms_complete;
1449 // We only enabled passthrough iff these 2 conditions hold
1450 // 1. Redaction is not needed
1451 // 2. Node transforms are completed, e.g transcoding.
1452 // (2) is important because we transcode lazily (on the first read) and with passthrough,
1453 // we will never get a read into the FUSE daemon, so passthrough would have returned
1454 // arbitrary bytes the first time around. However, if we ensure that transforms are
1455 // completed, then it's safe to use passthrough. Additionally, transcoded nodes never
1456 // require redaction so (2) implies (1)
1457 handle = new struct handle(fd, ri, !open_info_direct_io /* cached */,
1458 !redaction_needed && transforms_complete /* passthrough */, uid,
1459 transforms_uid);
1460 } else {
1461 // Without fuse->passthrough, we don't want to use the FUSE VFS cache in two cases:
1462 // 1. When redaction is needed because app A with EXIF access might access
1463 // a region that should have been redacted for app B without EXIF access, but app B on
1464 // a subsequent read, will be able to see the EXIF data because the read request for
1465 // that region will be served from cache and not get to the FUSE daemon
1466 // 2. When the file has a read or write lock on it. This means that the MediaProvider
1467 // has given an fd to the lower file system to an app. There are two cases where using
1468 // the cache in this case can be a problem:
1469 // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
1470 // subsequent read from the lower fs fd will not see the write.
1471 // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
1472 // the lower fs fd because those writes did not go through the FUSE layer and reads from
1473 // FUSE after that write may be served from cache
1474 bool has_redacted = node->HasRedactedCache();
1475 bool is_redaction_change =
1476 (redaction_needed && !has_redacted) || (!redaction_needed && has_redacted);
1477 bool is_cached_file_open = node->HasCachedHandle();
1478 bool direct_io = open_info_direct_io || (is_cached_file_open && is_redaction_change) ||
1479 is_file_locked(fd, path) || fuse->ShouldNotCache(path);
1480
1481 if (!is_cached_file_open && is_redaction_change) {
1482 node->SetRedactedCache(redaction_needed);
1483 // Purges stale page cache before open
1484 *keep_cache = 0;
1485 } else {
1486 *keep_cache = transforms_complete;
1487 }
1488 handle = new struct handle(fd, ri, !direct_io /* cached */, false /* passthrough */, uid,
1489 transforms_uid);
1490 }
1491
1492 node->AddHandle(handle);
1493 return handle;
1494 }
1495
do_passthrough_enable(fuse_req_t req,struct fuse_file_info * fi,unsigned int fd)1496 static bool do_passthrough_enable(fuse_req_t req, struct fuse_file_info* fi, unsigned int fd) {
1497 int passthrough_fh = fuse_passthrough_enable(req, fd);
1498
1499 if (passthrough_fh <= 0) {
1500 return false;
1501 }
1502
1503 fi->passthrough_fh = passthrough_fh;
1504 return true;
1505 }
1506
parse_open_flags(const string & path,const int in_flags)1507 static OpenInfo parse_open_flags(const string& path, const int in_flags) {
1508 const bool for_write = in_flags & (O_WRONLY | O_RDWR);
1509 int out_flags = in_flags;
1510 bool direct_io = false;
1511
1512 if (in_flags & O_DIRECT) {
1513 // Set direct IO on the FUSE fs file
1514 direct_io = true;
1515
1516 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
1517 // Remove O_DIRECT because there are strict alignment requirements for direct IO and
1518 // there were some historical bugs affecting encrypted block devices.
1519 // Hence, this is only supported on public volumes.
1520 out_flags &= ~O_DIRECT;
1521 }
1522 }
1523 if (in_flags & O_WRONLY) {
1524 // Replace O_WRONLY with O_RDWR because even if the FUSE fd is opened write-only, the FUSE
1525 // driver might issue reads on the lower fs ith the writeback cache enabled
1526 out_flags &= ~O_WRONLY;
1527 out_flags |= O_RDWR;
1528 }
1529 if (in_flags & O_APPEND) {
1530 // Remove O_APPEND because passing it to the lower fs can lead to file corruption when
1531 // multiple FUSE threads race themselves reading. With writeback cache enabled, the FUSE
1532 // driver already handles the O_APPEND
1533 out_flags &= ~O_APPEND;
1534 }
1535
1536 return {.flags = out_flags, .for_write = for_write, .direct_io = direct_io};
1537 }
1538
fill_fuse_file_info(const handle * handle,const OpenInfo * open_info,const int keep_cache,struct fuse_file_info * fi)1539 static void fill_fuse_file_info(const handle* handle, const OpenInfo* open_info,
1540 const int keep_cache, struct fuse_file_info* fi) {
1541 fi->fh = ptr_to_id(handle);
1542 fi->keep_cache = keep_cache;
1543 fi->direct_io = !handle->cached;
1544 }
1545
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1546 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
1547 ATRACE_CALL();
1548 struct fuse* fuse = get_fuse(req);
1549 node* node = fuse->FromInode(ino);
1550 if (!node) {
1551 fuse_reply_err(req, ENOENT);
1552 return;
1553 }
1554 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1555 const string& io_path = get_path(node);
1556 const string& build_path = node->BuildPath();
1557 if (!is_app_accessible_path(fuse, io_path, ctx->uid)) {
1558 fuse_reply_err(req, ENOENT);
1559 return;
1560 }
1561
1562 const OpenInfo open_info = parse_open_flags(io_path, fi->flags);
1563
1564 if (open_info.for_write && node->GetTransforms()) {
1565 TRACE_NODE(node, req) << "write with transforms";
1566 } else {
1567 TRACE_NODE(node, req) << (open_info.for_write ? "write" : "read");
1568 }
1569
1570 // Force permission check with the build path because the MediaProvider database might not be
1571 // aware of the io_path
1572 // We don't redact if the caller was granted write permission for this file
1573 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1574 build_path, io_path, ctx->uid, ctx->pid, node->GetTransformsReason(),
1575 open_info.for_write, !open_info.for_write /* redact */,
1576 true /* log_transforms_metrics */);
1577 if (!result) {
1578 fuse_reply_err(req, EFAULT);
1579 return;
1580 }
1581
1582 if (result->status) {
1583 fuse_reply_err(req, result->status);
1584 return;
1585 }
1586
1587 int fd = -1;
1588 const bool is_fd_from_java = result->fd >= 0;
1589 if (is_fd_from_java) {
1590 fd = result->fd;
1591 TRACE_NODE(node, req) << "opened in Java";
1592 } else {
1593 fd = open(io_path.c_str(), open_info.flags);
1594 if (fd < 0) {
1595 fuse_reply_err(req, errno);
1596 return;
1597 }
1598 }
1599
1600 int keep_cache = 1;
1601 // If is_fd_from_java==true, we disallow passthrough because the fd can be pointing to the
1602 // FUSE fs if gotten from another process
1603 const handle* h = create_handle_for_node(fuse, io_path, fd, result->uid, result->transforms_uid,
1604 node, result->redaction_info.release(),
1605 /* allow_passthrough */ !is_fd_from_java,
1606 open_info.direct_io, &keep_cache);
1607 fill_fuse_file_info(h, &open_info, keep_cache, fi);
1608
1609 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
1610 // user FUSE passthrough is a conservative rule and might be dropped as
1611 // soon as demonstrated its correctness.
1612 if (h->passthrough && !do_passthrough_enable(req, fi, fd)) {
1613 // TODO: Should we crash here so we can find errors easily?
1614 PLOG(ERROR) << "Passthrough OPEN failed for " << io_path;
1615 fuse_reply_err(req, EFAULT);
1616 return;
1617 }
1618
1619 fuse_reply_open(req, fi);
1620 }
1621
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi,bool direct_io)1622 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi,
1623 bool direct_io) {
1624 handle* h = reinterpret_cast<handle*>(fi->fh);
1625 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1626
1627 buf.buf[0].fd = h->fd;
1628 buf.buf[0].pos = off;
1629 buf.buf[0].flags =
1630 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1631 if (direct_io) {
1632 // sdcardfs does not register splice_read_file_operations and some requests fail with EFAULT
1633 // Specifically, FUSE splice is only enabled for 8KB+ buffers, hence such reads fail
1634 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)FUSE_BUF_NO_SPLICE);
1635 } else {
1636 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)0);
1637 }
1638 }
1639
1640 /**
1641 * Sets the parameters for a fuse_buf that reads from memory, including flags.
1642 * Makes buf->mem point to an already mapped region of zeroized memory.
1643 * This memory is read only.
1644 */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1645 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1646 buf->size = size;
1647 buf->mem = fuse->zero_addr;
1648 buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1649 buf->pos = -1;
1650 buf->fd = -1;
1651 }
1652
1653 /**
1654 * Sets the parameters for a fuse_buf that reads from file, including flags.
1655 */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1656 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1657 buf->size = size;
1658 buf->fd = fd;
1659 buf->pos = pos;
1660 buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1661 buf->mem = nullptr;
1662 }
1663
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi,bool direct_io)1664 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi,
1665 bool direct_io) {
1666 handle* h = reinterpret_cast<handle*>(fi->fh);
1667
1668 std::vector<ReadRange> ranges;
1669 h->ri->getReadRanges(off, size, &ranges);
1670
1671 // As an optimization, return early if there are no ranges to redact.
1672 if (ranges.size() == 0) {
1673 do_read(req, size, off, fi, direct_io);
1674 return;
1675 }
1676
1677 const size_t num_bufs = ranges.size();
1678 auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1679 reinterpret_cast<fuse_bufvec*>(
1680 malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1681 free};
1682 fuse_bufvec& bufvec = *bufvec_ptr;
1683
1684 // initialize bufvec
1685 bufvec.count = num_bufs;
1686 bufvec.idx = 0;
1687 bufvec.off = 0;
1688
1689 for (int i = 0; i < num_bufs; ++i) {
1690 const ReadRange& range = ranges[i];
1691 if (range.is_redaction) {
1692 create_mem_fuse_buf(range.size, &(bufvec.buf[i]), get_fuse(req));
1693 } else {
1694 create_file_fuse_buf(range.size, range.start, h->fd, &(bufvec.buf[i]));
1695 }
1696 }
1697
1698 fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1699 }
1700
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1701 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1702 struct fuse_file_info* fi) {
1703 ATRACE_CALL();
1704 handle* h = reinterpret_cast<handle*>(fi->fh);
1705 if (h == nullptr) {
1706 return;
1707 }
1708 const bool direct_io = !h->cached;
1709 struct fuse* fuse = get_fuse(req);
1710
1711 node* node = fuse->FromInode(ino);
1712
1713 if (!node->IsTransformsComplete()) {
1714 if (!fuse->mp->Transform(node->BuildPath(), node->GetIoPath(), node->GetTransforms(),
1715 node->GetTransformsReason(), req->ctx.uid, h->uid,
1716 h->transforms_uid)) {
1717 fuse_reply_err(req, EFAULT);
1718 return;
1719 }
1720 node->SetTransformsComplete(true);
1721 }
1722
1723 fuse->fadviser.Record(h->fd, size);
1724
1725 if (h->ri->isRedactionNeeded()) {
1726 do_read_with_redaction(req, size, off, fi, direct_io);
1727 } else {
1728 do_read(req, size, off, fi, direct_io);
1729 }
1730 }
1731
1732 /*
1733 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1734 size_t size, off_t off, struct fuse_file_info* fi)
1735 {
1736 cout << "TODO:" << __func__;
1737 }
1738 */
1739
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1740 static void pf_write_buf(fuse_req_t req,
1741 fuse_ino_t ino,
1742 struct fuse_bufvec* bufv,
1743 off_t off,
1744 struct fuse_file_info* fi) {
1745 ATRACE_CALL();
1746 handle* h = reinterpret_cast<handle*>(fi->fh);
1747 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1748 ssize_t size;
1749 struct fuse* fuse = get_fuse(req);
1750
1751 buf.buf[0].fd = h->fd;
1752 buf.buf[0].pos = off;
1753 buf.buf[0].flags =
1754 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1755 size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1756
1757 if (size < 0)
1758 fuse_reply_err(req, -size);
1759 else {
1760 // Execute Record *before* fuse_reply_write to avoid the following ordering:
1761 // fuse_reply_write -> pf_release (destroy handle) -> Record (use handle after free)
1762 fuse->fadviser.Record(h->fd, size);
1763 fuse_reply_write(req, size);
1764 }
1765 }
1766 // Haven't tested this one. Not sure what calls it.
1767 #if 0
1768 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1769 off_t off_in, struct fuse_file_info* fi_in,
1770 fuse_ino_t ino_out, off_t off_out,
1771 struct fuse_file_info* fi_out, size_t len,
1772 int flags)
1773 {
1774 handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1775 handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1776 struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1777 struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1778 ssize_t size;
1779
1780 buf_in.buf[0].fd = h_in->fd;
1781 buf_in.buf[0].pos = off_in;
1782 buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1783
1784 buf_out.buf[0].fd = h_out->fd;
1785 buf_out.buf[0].pos = off_out;
1786 buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1787 size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1788
1789 if (size < 0) {
1790 fuse_reply_err(req, -size);
1791 }
1792
1793 fuse_reply_write(req, size);
1794 }
1795 #endif
1796
1797 /*
1798 * This function does nothing except being a placeholder to keep the FUSE
1799 * driver handling flushes on close(2).
1800 * In fact, kernels prior to 5.8 stop attempting flushing the cache on close(2)
1801 * if the .flush operation is not implemented by the FUSE daemon.
1802 * This has been fixed in the kernel by commit 614c026e8a46 ("fuse: always
1803 * flush dirty data on close(2)"), merged in Linux 5.8, but until then
1804 * userspace must mitigate this behavior by not leaving the .flush function
1805 * pointer empty.
1806 */
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1807 static void pf_flush(fuse_req_t req,
1808 fuse_ino_t ino,
1809 struct fuse_file_info* fi) {
1810 ATRACE_CALL();
1811 struct fuse* fuse = get_fuse(req);
1812 TRACE_NODE(nullptr, req) << "noop";
1813 fuse_reply_err(req, 0);
1814 }
1815
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1816 static void pf_release(fuse_req_t req,
1817 fuse_ino_t ino,
1818 struct fuse_file_info* fi) {
1819 ATRACE_CALL();
1820 struct fuse* fuse = get_fuse(req);
1821
1822 node* node = fuse->FromInode(ino);
1823 handle* h = reinterpret_cast<handle*>(fi->fh);
1824 TRACE_NODE(node, req);
1825
1826 fuse->fadviser.Close(h->fd);
1827 if (node) {
1828 node->DestroyHandle(h);
1829 }
1830
1831 fuse_reply_err(req, 0);
1832 }
1833
do_sync_common(int fd,bool datasync)1834 static int do_sync_common(int fd, bool datasync) {
1835 int res = datasync ? fdatasync(fd) : fsync(fd);
1836
1837 if (res == -1) return errno;
1838 return 0;
1839 }
1840
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1841 static void pf_fsync(fuse_req_t req,
1842 fuse_ino_t ino,
1843 int datasync,
1844 struct fuse_file_info* fi) {
1845 ATRACE_CALL();
1846 handle* h = reinterpret_cast<handle*>(fi->fh);
1847 int err = do_sync_common(h->fd, datasync);
1848
1849 fuse_reply_err(req, err);
1850 }
1851
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1852 static void pf_fsyncdir(fuse_req_t req,
1853 fuse_ino_t ino,
1854 int datasync,
1855 struct fuse_file_info* fi) {
1856 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1857 int err = do_sync_common(dirfd(h->d), datasync);
1858
1859 fuse_reply_err(req, err);
1860 }
1861
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1862 static void pf_opendir(fuse_req_t req,
1863 fuse_ino_t ino,
1864 struct fuse_file_info* fi) {
1865 ATRACE_CALL();
1866 struct fuse* fuse = get_fuse(req);
1867 node* node = fuse->FromInode(ino);
1868 if (!node) {
1869 fuse_reply_err(req, ENOENT);
1870 return;
1871 }
1872 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1873 const string path = node->BuildPath();
1874 if (!is_app_accessible_path(fuse, path, ctx->uid)) {
1875 fuse_reply_err(req, ENOENT);
1876 return;
1877 }
1878
1879 TRACE_NODE(node, req);
1880
1881 int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1882 if (status) {
1883 fuse_reply_err(req, status);
1884 return;
1885 }
1886
1887 DIR* dir = opendir(path.c_str());
1888 if (!dir) {
1889 fuse_reply_err(req, errno);
1890 return;
1891 }
1892
1893 dirhandle* h = new dirhandle(dir);
1894 node->AddDirHandle(h);
1895
1896 fi->fh = ptr_to_id(h);
1897 fuse_reply_open(req, fi);
1898 }
1899
1900 #define READDIR_BUF 32768LU
1901
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1902 static void do_readdir_common(fuse_req_t req,
1903 fuse_ino_t ino,
1904 size_t size,
1905 off_t off,
1906 struct fuse_file_info* fi,
1907 bool plus) {
1908 struct fuse* fuse = get_fuse(req);
1909 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1910 size_t len = std::min<size_t>(size, READDIR_BUF);
1911 char buf[READDIR_BUF];
1912 size_t used = 0;
1913 std::shared_ptr<DirectoryEntry> de;
1914
1915 struct fuse_entry_param e;
1916 size_t entry_size = 0;
1917
1918 node* node = fuse->FromInode(ino);
1919 if (!node) {
1920 fuse_reply_err(req, ENOENT);
1921 return;
1922 }
1923 const string path = node->BuildPath();
1924 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1925 fuse_reply_err(req, ENOENT);
1926 return;
1927 }
1928
1929 TRACE_NODE(node, req);
1930
1931 // We don't return EACCES for compatibility with the previous implementation.
1932 // It just ignored entries causing EACCES.
1933 if (!is_user_accessible_path(req, fuse, path)) {
1934 fuse_reply_buf(req, buf, used);
1935 return;
1936 }
1937
1938 // Get all directory entries from MediaProvider on first readdir() call of
1939 // directory handle. h->next_off = 0 indicates that current readdir() call
1940 // is first readdir() call for the directory handle, Avoid multiple JNI calls
1941 // for single directory handle.
1942 if (h->next_off == 0) {
1943 h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
1944 }
1945 // If the last entry in the previous readdir() call was rejected due to
1946 // buffer capacity constraints, update directory offset to start from
1947 // previously rejected entry. Directory offset can also change if there was
1948 // a seekdir() on the given directory handle.
1949 if (off != h->next_off) {
1950 h->next_off = off;
1951 }
1952 const int num_directory_entries = h->de.size();
1953 // Check for errors. Any error/exception occurred while obtaining directory
1954 // entries will be indicated by marking first directory entry name as empty
1955 // string. In the erroneous case corresponding d_type will hold error number.
1956 if (num_directory_entries && h->de[0]->d_name.empty()) {
1957 fuse_reply_err(req, h->de[0]->d_type);
1958 return;
1959 }
1960
1961 while (h->next_off < num_directory_entries) {
1962 de = h->de[h->next_off];
1963 entry_size = 0;
1964 h->next_off++;
1965 if (plus) {
1966 int error_code = 0;
1967 // Skip validating user and app access as they are already performed on parent node
1968 if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code, FuseOp::readdir, false)) {
1969 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
1970 &e, h->next_off);
1971 } else {
1972 // Ignore lookup errors on
1973 // 1. non-existing files returned from MediaProvider database.
1974 // 2. path that doesn't match FuseDaemon UID and calling uid.
1975 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES
1976 || error_code == EIO) continue;
1977 fuse_reply_err(req, error_code);
1978 return;
1979 }
1980 } else {
1981 // This should never happen because we have readdir_plus enabled without adaptive
1982 // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
1983 LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
1984 e.attr.st_ino = FUSE_UNKNOWN_INO;
1985 e.attr.st_mode = de->d_type << 12;
1986 entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
1987 h->next_off);
1988 }
1989 // If buffer in fuse_add_direntry[_plus] is not large enough then
1990 // the entry is not added to buffer but the size of the entry is still
1991 // returned. Check available buffer size + returned entry size is less
1992 // than actual buffer size to confirm entry is added to buffer.
1993 if (used + entry_size > len) {
1994 // When an entry is rejected, lookup called by readdir_plus will not be tracked by
1995 // kernel. Call forget on the rejected node to decrement the reference count.
1996 if (plus) {
1997 do_forget(req, fuse, e.ino, 1);
1998 }
1999 break;
2000 }
2001 used += entry_size;
2002 }
2003 fuse_reply_buf(req, buf, used);
2004 }
2005
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2006 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
2007 struct fuse_file_info* fi) {
2008 ATRACE_CALL();
2009 do_readdir_common(req, ino, size, off, fi, false);
2010 }
2011
round_up(off_t o,size_t s)2012 static off_t round_up(off_t o, size_t s) {
2013 return (o + s - 1) / s * s;
2014 }
2015
pf_readdir_postfilter(fuse_req_t req,fuse_ino_t ino,uint32_t error_in,off_t off_in,off_t off_out,size_t size_out,const void * dirents_in,struct fuse_file_info * fi)2016 static void pf_readdir_postfilter(fuse_req_t req, fuse_ino_t ino, uint32_t error_in, off_t off_in,
2017 off_t off_out, size_t size_out, const void* dirents_in,
2018 struct fuse_file_info* fi) {
2019 struct fuse* fuse = get_fuse(req);
2020 char buf[READDIR_BUF];
2021 struct fuse_read_out* fro = (struct fuse_read_out*)(buf);
2022 size_t used = 0;
2023 bool redacted = false;
2024 char* dirents_out = (char*)(fro + 1);
2025
2026 ATRACE_CALL();
2027 node* node = fuse->FromInode(ino);
2028 if (!node) {
2029 fuse_reply_err(req, ENOENT);
2030 return;
2031 }
2032
2033 TRACE_NODE(node, req);
2034 const string path = node->BuildPath();
2035
2036 *fro = (struct fuse_read_out){
2037 .offset = (uint64_t)off_out,
2038 };
2039
2040 for (off_t in = 0; in < size_out;) {
2041 struct fuse_dirent* dirent_in = (struct fuse_dirent*)((char*)dirents_in + in);
2042 struct fuse_dirent* dirent_out = (struct fuse_dirent*)((char*)dirents_out + used);
2043 struct stat stats;
2044 int err;
2045
2046 std::string child_name(dirent_in->name, dirent_in->namelen);
2047 std::string child_path = path + "/" + child_name;
2048
2049 in += sizeof(*dirent_in) + round_up(dirent_in->namelen, sizeof(uint64_t));
2050 err = stat(child_path.c_str(), &stats);
2051 if (err == 0 &&
2052 ((stats.st_mode & 0001) || ((stats.st_mode & 0010) && req->ctx.gid == stats.st_gid) ||
2053 ((stats.st_mode & 0100) && req->ctx.uid == stats.st_uid) ||
2054 fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, child_path) ||
2055 child_name == ".nomedia")) {
2056 *dirent_out = *dirent_in;
2057 strcpy(dirent_out->name, child_name.c_str());
2058 used += sizeof(*dirent_out) + round_up(dirent_out->namelen, sizeof(uint64_t));
2059 } else {
2060 redacted = true;
2061 }
2062 }
2063 if (redacted && used == 0) fro->again = 1;
2064 fuse_reply_buf(req, buf, sizeof(*fro) + used);
2065 }
2066
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2067 static void pf_readdirplus(fuse_req_t req,
2068 fuse_ino_t ino,
2069 size_t size,
2070 off_t off,
2071 struct fuse_file_info* fi) {
2072 ATRACE_CALL();
2073 do_readdir_common(req, ino, size, off, fi, true);
2074 }
2075
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)2076 static void pf_releasedir(fuse_req_t req,
2077 fuse_ino_t ino,
2078 struct fuse_file_info* fi) {
2079 ATRACE_CALL();
2080 struct fuse* fuse = get_fuse(req);
2081
2082 node* node = fuse->FromInode(ino);
2083
2084 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
2085 TRACE_NODE(node, req);
2086 if (node) {
2087 node->DestroyDirHandle(h);
2088 }
2089
2090 fuse_reply_err(req, 0);
2091 }
2092
pf_statfs(fuse_req_t req,fuse_ino_t ino)2093 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
2094 ATRACE_CALL();
2095 struct statvfs st;
2096 struct fuse* fuse = get_fuse(req);
2097
2098 if (statvfs(fuse->root->GetName().c_str(), &st))
2099 fuse_reply_err(req, errno);
2100 else
2101 fuse_reply_statfs(req, &st);
2102 }
2103 /*
2104 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2105 const char* value, size_t size, int flags)
2106 {
2107 cout << "TODO:" << __func__;
2108 }
2109
2110 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2111 size_t size)
2112 {
2113 cout << "TODO:" << __func__;
2114 }
2115
2116 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
2117 {
2118 cout << "TODO:" << __func__;
2119 }
2120
2121 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
2122 {
2123 cout << "TODO:" << __func__;
2124 }*/
2125
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)2126 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
2127 ATRACE_CALL();
2128 struct fuse* fuse = get_fuse(req);
2129
2130 node* node = fuse->FromInode(ino);
2131 if (!node) {
2132 fuse_reply_err(req, ENOENT);
2133 return;
2134 }
2135 const string path = node->BuildPath();
2136 if (path != PRIMARY_VOLUME_PREFIX && !is_app_accessible_path(fuse, path, req->ctx.uid)) {
2137 fuse_reply_err(req, ENOENT);
2138 return;
2139 }
2140 TRACE_NODE(node, req);
2141
2142 // exists() checks are always allowed.
2143 if (mask == F_OK) {
2144 int res = access(path.c_str(), F_OK);
2145 fuse_reply_err(req, res ? errno : 0);
2146 return;
2147 }
2148 struct stat stat;
2149 if (lstat(path.c_str(), &stat)) {
2150 // File doesn't exist
2151 fuse_reply_err(req, ENOENT);
2152 return;
2153 }
2154
2155 // For read and write permission checks we go to MediaProvider.
2156 int status = 0;
2157 bool for_write = mask & W_OK;
2158 bool is_directory = S_ISDIR(stat.st_mode);
2159 if (is_directory) {
2160 if (path == PRIMARY_VOLUME_PREFIX && mask == X_OK) {
2161 // Special case for this path: apps should be allowed to enter it,
2162 // but not list directory contents (which would be user numbers).
2163 int res = access(path.c_str(), X_OK);
2164 fuse_reply_err(req, res ? errno : 0);
2165 return;
2166 }
2167 status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
2168 } else {
2169 if (mask & X_OK) {
2170 // Fuse is mounted with MS_NOEXEC.
2171 fuse_reply_err(req, EACCES);
2172 return;
2173 }
2174
2175 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
2176 path, path, req->ctx.uid, req->ctx.pid, node->GetTransformsReason(), for_write,
2177 false /* redact */, false /* log_transforms_metrics */);
2178 if (!result) {
2179 status = EFAULT;
2180 } else if (result->status) {
2181 status = EACCES;
2182 }
2183 }
2184
2185 fuse_reply_err(req, status);
2186 }
2187
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)2188 static void pf_create(fuse_req_t req,
2189 fuse_ino_t parent,
2190 const char* name,
2191 mode_t mode,
2192 struct fuse_file_info* fi) {
2193 ATRACE_CALL();
2194 struct fuse* fuse = get_fuse(req);
2195 node* parent_node = fuse->FromInode(parent);
2196 if (!parent_node) {
2197 fuse_reply_err(req, ENOENT);
2198 return;
2199 }
2200 const string parent_path = parent_node->BuildPath();
2201 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
2202 fuse_reply_err(req, ENOENT);
2203 return;
2204 }
2205
2206 TRACE_NODE(parent_node, req);
2207
2208 const string child_path = parent_path + "/" + name;
2209
2210 const OpenInfo open_info = parse_open_flags(child_path, fi->flags);
2211
2212 int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
2213 if (mp_return_code) {
2214 fuse_reply_err(req, mp_return_code);
2215 return;
2216 }
2217
2218 mode = (mode & (~0777)) | 0664;
2219 int fd = open(child_path.c_str(), open_info.flags, mode);
2220 if (fd < 0) {
2221 int error_code = errno;
2222 // We've already inserted the file into the MP database before the
2223 // failed open(), so that needs to be rolled back here.
2224 fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
2225 fuse_reply_err(req, error_code);
2226 return;
2227 }
2228
2229 int error_code = 0;
2230 struct fuse_entry_param e;
2231 node* node = make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
2232 FuseOp::create);
2233 TRACE_NODE(node, req);
2234 if (!node) {
2235 CHECK(error_code != 0);
2236 fuse_reply_err(req, error_code);
2237 return;
2238 }
2239
2240 // Let MediaProvider know we've created a new file
2241 fuse->mp->OnFileCreated(child_path);
2242
2243 // TODO(b/147274248): Assume there will be no EXIF to redact.
2244 // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
2245 // to the file before all the EXIF content is written. We could special case reads before the
2246 // first close after a file has just been created.
2247 int keep_cache = 1;
2248 const handle* h = create_handle_for_node(
2249 fuse, child_path, fd, req->ctx.uid, 0 /* transforms_uid */, node, new RedactionInfo(),
2250 /* allow_passthrough */ true, open_info.direct_io, &keep_cache);
2251 fill_fuse_file_info(h, &open_info, keep_cache, fi);
2252
2253 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
2254 // user FUSE passthrough is a conservative rule and might be dropped as
2255 // soon as demonstrated its correctness.
2256 if (h->passthrough && !do_passthrough_enable(req, fi, fd)) {
2257 PLOG(ERROR) << "Passthrough CREATE failed for " << child_path;
2258 fuse_reply_err(req, EFAULT);
2259 return;
2260 }
2261
2262 fuse_reply_create(req, &e, fi);
2263 }
2264 /*
2265 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
2266 struct fuse_file_info* fi, struct flock* lock)
2267 {
2268 cout << "TODO:" << __func__;
2269 }
2270
2271 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
2272 struct fuse_file_info* fi,
2273 struct flock* lock, int sleep)
2274 {
2275 cout << "TODO:" << __func__;
2276 }
2277
2278 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
2279 uint64_t idx)
2280 {
2281 cout << "TODO:" << __func__;
2282 }
2283
2284 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
2285 void* arg, struct fuse_file_info* fi, unsigned flags,
2286 const void* in_buf, size_t in_bufsz, size_t out_bufsz)
2287 {
2288 cout << "TODO:" << __func__;
2289 }
2290
2291 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
2292 struct fuse_pollhandle* ph)
2293 {
2294 cout << "TODO:" << __func__;
2295 }
2296
2297 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
2298 off_t offset, struct fuse_bufvec* bufv)
2299 {
2300 cout << "TODO:" << __func__;
2301 }
2302
2303 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
2304 struct fuse_file_info* fi, int op)
2305 {
2306 cout << "TODO:" << __func__;
2307 }
2308
2309 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
2310 off_t offset, off_t length, struct fuse_file_info* fi)
2311 {
2312 cout << "TODO:" << __func__;
2313 }
2314 */
2315
2316 static struct fuse_lowlevel_ops ops{
2317 .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup,
2318 .lookup_postfilter = pf_lookup_postfilter, .forget = pf_forget, .getattr = pf_getattr,
2319 .setattr = pf_setattr, .canonical_path = pf_canonical_path, .mknod = pf_mknod,
2320 .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
2321 /*.symlink = pf_symlink,*/
2322 .rename = pf_rename,
2323 /*.link = pf_link,*/
2324 .open = pf_open, .read = pf_read,
2325 /*.write = pf_write,*/
2326 .flush = pf_flush, .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir,
2327 .readdir = pf_readdir, .readdirpostfilter = pf_readdir_postfilter, .releasedir = pf_releasedir,
2328 .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
2329 /*.setxattr = pf_setxattr,
2330 .getxattr = pf_getxattr,
2331 .listxattr = pf_listxattr,
2332 .removexattr = pf_removexattr,*/
2333 .access = pf_access, .create = pf_create,
2334 /*.getlk = pf_getlk,
2335 .setlk = pf_setlk,
2336 .bmap = pf_bmap,
2337 .ioctl = pf_ioctl,
2338 .poll = pf_poll,*/
2339 .write_buf = pf_write_buf,
2340 /*.retrieve_reply = pf_retrieve_reply,*/
2341 .forget_multi = pf_forget_multi,
2342 /*.flock = pf_flock,*/
2343 .fallocate = pf_fallocate, .readdirplus = pf_readdirplus,
2344 /*.copy_file_range = pf_copy_file_range,*/
2345 };
2346
2347 static struct fuse_loop_config config = {
2348 .clone_fd = 1,
2349 .max_idle_threads = 10,
2350 };
2351
2352 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
2353 {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
2354 {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
2355 {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
2356 {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
2357 {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
2358 {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
2359 {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
2360 {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
2361 });
2362
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)2363 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
2364 __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
2365 }
2366
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)2367 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
2368 if (fuse->passthrough) {
2369 // Always open with FUSE if passthrough is enabled. This avoids the delicate file lock
2370 // acquisition below to ensure VFS cache consistency and doesn't impact filesystem
2371 // performance since read(2)/write(2) happen in the kernel
2372 return true;
2373 }
2374
2375 bool use_fuse = false;
2376
2377 if (active.load(std::memory_order_acquire)) {
2378 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2379 const node* node = node::LookupAbsolutePath(fuse->root, path);
2380 if (node && node->HasCachedHandle()) {
2381 use_fuse = true;
2382 } else {
2383 // If we are unable to set a lock, we should use fuse since we can't track
2384 // when all fd references (including dups) are closed. This can happen when
2385 // we try to set a write lock twice on the same file
2386 use_fuse = set_file_lock(fd, for_read, path);
2387 }
2388 } else {
2389 LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
2390 }
2391
2392 return use_fuse;
2393 }
2394
UsesFusePassthrough() const2395 bool FuseDaemon::UsesFusePassthrough() const {
2396 return fuse->passthrough;
2397 }
2398
InvalidateFuseDentryCache(const std::string & path)2399 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
2400 LOG(VERBOSE) << "Invalidating FUSE dentry cache";
2401 if (active.load(std::memory_order_acquire)) {
2402 string name;
2403 fuse_ino_t parent;
2404 fuse_ino_t child;
2405 {
2406 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2407 const node* node = node::LookupAbsolutePath(fuse->root, path);
2408 if (node) {
2409 name = node->GetName();
2410 child = fuse->ToInode(const_cast<class node*>(node));
2411 parent = fuse->ToInode(node->GetParent());
2412 }
2413 }
2414
2415 if (!name.empty()) {
2416 std::thread t([=]() { fuse_inval(fuse->se, parent, child, name, path); });
2417 t.detach();
2418 }
2419 } else {
2420 LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
2421 }
2422 }
2423
FuseDaemon(JNIEnv * env,jobject mediaProvider)2424 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
2425 active(false), fuse(nullptr) {}
2426
IsStarted() const2427 bool FuseDaemon::IsStarted() const {
2428 return active.load(std::memory_order_acquire);
2429 }
2430
IsPropertySet(const char * name,bool & value)2431 static bool IsPropertySet(const char* name, bool& value) {
2432 if (android::base::GetProperty(name, "") == "") return false;
2433
2434 value = android::base::GetBoolProperty(name, false);
2435 LOG(INFO) << "fuse-bpf is " << (value ? "enabled" : "disabled") << " because of property "
2436 << name;
2437 return true;
2438 }
2439
IsFuseBpfEnabled()2440 bool IsFuseBpfEnabled() {
2441 // ro.fuse.bpf.is_running may not be set when first reading this property, so we have to
2442 // reproduce the vold/Utils.cpp:isFuseBpfEnabled() logic here
2443
2444 bool is_enabled;
2445 if (IsPropertySet("ro.fuse.bpf.is_running", is_enabled)) return is_enabled;
2446 if (IsPropertySet("persist.sys.fuse.bpf.override", is_enabled)) return is_enabled;
2447 if (IsPropertySet("ro.fuse.bpf.enabled", is_enabled)) return is_enabled;
2448
2449 // If the kernel has fuse-bpf, /sys/fs/fuse/features/fuse_bpf will exist and have the contents
2450 // 'supported\n' - see fs/fuse/inode.c in the kernel source
2451 string contents;
2452 const char* filename = "/sys/fs/fuse/features/fuse_bpf";
2453 if (!android::base::ReadFileToString(filename, &contents)) {
2454 LOG(INFO) << "fuse-bpf is disabled because " << filename << " cannot be read";
2455 return false;
2456 }
2457
2458 if (contents == "supported\n") {
2459 LOG(INFO) << "fuse-bpf is enabled because " << filename << " reads 'supported'";
2460 return true;
2461 } else {
2462 LOG(INFO) << "fuse-bpf is disabled because " << filename << " does not read 'supported'";
2463 return false;
2464 }
2465 }
2466
Start(android::base::unique_fd fd,const std::string & path,const bool uncached_mode,const std::vector<std::string> & supported_transcoding_relative_paths,const std::vector<std::string> & supported_uncached_relative_paths)2467 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path,
2468 const bool uncached_mode,
2469 const std::vector<std::string>& supported_transcoding_relative_paths,
2470 const std::vector<std::string>& supported_uncached_relative_paths) {
2471 android::base::SetDefaultTag(LOG_TAG);
2472
2473 struct fuse_args args;
2474 struct fuse_cmdline_opts opts;
2475
2476 struct stat stat;
2477
2478 if (lstat(path.c_str(), &stat)) {
2479 PLOG(ERROR) << "ERROR: failed to stat source " << path;
2480 return;
2481 }
2482
2483 if (!S_ISDIR(stat.st_mode)) {
2484 PLOG(ERROR) << "ERROR: source is not a directory";
2485 return;
2486 }
2487
2488 args = FUSE_ARGS_INIT(0, nullptr);
2489 if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
2490 fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
2491 LOG(ERROR) << "ERROR: failed to set options";
2492 return;
2493 }
2494
2495 bool bpf_enabled = IsFuseBpfEnabled();
2496 android::base::unique_fd bpf_fd(-1);
2497 if (bpf_enabled) {
2498 bpf_fd.reset(android::bpf::retrieveProgram(FUSE_BPF_PROG_PATH));
2499 if (!bpf_fd.ok()) {
2500 int error = errno;
2501 PLOG(ERROR) << "Failed to fetch BPF prog fd: " << error;
2502 bpf_enabled = false;
2503 } else {
2504 LOG(INFO) << "Using FUSE BPF, BPF prog fd fetched";
2505 }
2506 }
2507
2508 if (!bpf_enabled) {
2509 LOG(INFO) << "Not using FUSE BPF";
2510 }
2511
2512 struct fuse fuse_default(path, stat.st_ino, uncached_mode, bpf_enabled, std::move(bpf_fd),
2513 supported_transcoding_relative_paths,
2514 supported_uncached_relative_paths);
2515 fuse_default.mp = ∓
2516 // fuse_default is stack allocated, but it's safe to save it as an instance variable because
2517 // this method blocks and FuseDaemon#active tells if we are currently blocking
2518 fuse = &fuse_default;
2519
2520 // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
2521 // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
2522 // on each read.
2523 fuse_default.zero_addr = static_cast<char*>(mmap(
2524 NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
2525 if (fuse_default.zero_addr == MAP_FAILED) {
2526 LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
2527 }
2528
2529 // Custom logging for libfuse
2530 if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
2531 fuse_set_log_func(fuse_logger);
2532 }
2533
2534 if (MY_USER_ID != 0 && mp.IsAppCloneUser(MY_USER_ID)) {
2535 // Disable dentry caching for the app clone user
2536 fuse->disable_dentry_cache = true;
2537 }
2538
2539 fuse->passthrough = android::base::GetBoolProperty("persist.sys.fuse.passthrough.enable", false);
2540 if (fuse->passthrough) {
2541 LOG(INFO) << "Using FUSE passthrough";
2542 }
2543
2544 struct fuse_session
2545 * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
2546 if (!se) {
2547 PLOG(ERROR) << "Failed to create session ";
2548 return;
2549 }
2550 fuse_default.se = se;
2551 fuse_default.active = &active;
2552 se->fd = fd.release(); // libfuse owns the FD now
2553 se->mountpoint = strdup(path.c_str());
2554
2555 // Single thread. Useful for debugging
2556 // fuse_session_loop(se);
2557 // Multi-threaded
2558 LOG(INFO) << "Starting fuse...";
2559 fuse_session_loop_mt(se, &config);
2560 fuse->active->store(false, std::memory_order_release);
2561 LOG(INFO) << "Ending fuse...";
2562
2563 if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
2564 PLOG(ERROR) << "munmap failed!";
2565 }
2566
2567 fuse_opt_free_args(&args);
2568 fuse_session_destroy(se);
2569 LOG(INFO) << "Ended fuse";
2570 return;
2571 }
2572
CheckFdAccess(int fd,uid_t uid) const2573 std::unique_ptr<FdAccessResult> FuseDaemon::CheckFdAccess(int fd, uid_t uid) const {
2574 struct stat s;
2575 memset(&s, 0, sizeof(s));
2576 if (fstat(fd, &s) < 0) {
2577 PLOG(DEBUG) << "CheckFdAccess fstat failed.";
2578 return std::make_unique<FdAccessResult>(string(), false);
2579 }
2580
2581 ino_t ino = s.st_ino;
2582 dev_t dev = s.st_dev;
2583
2584 dev_t fuse_dev = fuse->dev.load(std::memory_order_acquire);
2585 if (dev != fuse_dev) {
2586 PLOG(DEBUG) << "CheckFdAccess FUSE device id does not match.";
2587 return std::make_unique<FdAccessResult>(string(), false);
2588 }
2589
2590 const node* node = node::LookupInode(fuse->root, ino);
2591 if (!node) {
2592 PLOG(DEBUG) << "CheckFdAccess no node found with given ino";
2593 return std::make_unique<FdAccessResult>(string(), false);
2594 }
2595
2596 return node->CheckHandleForUid(uid);
2597 }
2598
InitializeDeviceId(const std::string & path)2599 void FuseDaemon::InitializeDeviceId(const std::string& path) {
2600 struct stat stat;
2601
2602 if (lstat(path.c_str(), &stat)) {
2603 PLOG(ERROR) << "InitializeDeviceId failed to stat given path " << path;
2604 return;
2605 }
2606
2607 fuse->dev.store(stat.st_dev, std::memory_order_release);
2608 }
2609
SetupLevelDbConnection(const std::string & instance_name)2610 void FuseDaemon::SetupLevelDbConnection(const std::string& instance_name) {
2611 if (CheckLevelDbConnection(instance_name)) {
2612 LOG(DEBUG) << "Leveldb connection already exists for :" << instance_name;
2613 return;
2614 }
2615
2616 std::string leveldbPath =
2617 "/data/media/" + MY_USER_ID_STRING + "/.transforms/recovery/leveldb-" + instance_name;
2618 leveldb::Options options;
2619 options.create_if_missing = true;
2620 leveldb::DB* leveldb;
2621 leveldb::Status status = leveldb::DB::Open(options, leveldbPath, &leveldb);
2622 if (status.ok()) {
2623 fuse->level_db_connection_map.insert(
2624 std::pair<std::string, leveldb::DB*>(instance_name, leveldb));
2625 LOG(INFO) << "Leveldb connection established for :" << instance_name;
2626 } else {
2627 LOG(ERROR) << "Leveldb connection failed for :" << instance_name
2628 << " with error:" << status.ToString();
2629 }
2630 }
2631
SetupLevelDbInstances()2632 void FuseDaemon::SetupLevelDbInstances() {
2633 if (android::base::StartsWith(fuse->root->GetIoPath(), PRIMARY_VOLUME_PREFIX)) {
2634 // Setup leveldb instance for both external primary and internal volume.
2635 fuse->level_db_mutex.lock();
2636 // Create level db instance for internal volume
2637 SetupLevelDbConnection(VOLUME_INTERNAL);
2638 // Create level db instance for external primary volume
2639 SetupLevelDbConnection(VOLUME_EXTERNAL_PRIMARY);
2640 // Create level db instance to store owner id to owner package name and vice versa relation
2641 SetupLevelDbConnection(OWNERSHIP_RELATION);
2642 fuse->level_db_mutex.unlock();
2643 }
2644 }
2645
SetupPublicVolumeLevelDbInstance(const std::string & volume_name)2646 void FuseDaemon::SetupPublicVolumeLevelDbInstance(const std::string& volume_name) {
2647 // Setup leveldb instance for both external primary and internal volume.
2648 fuse->level_db_mutex.lock();
2649 // Create level db instance for public volume
2650 SetupLevelDbConnection(volume_name);
2651 fuse->level_db_mutex.unlock();
2652 }
2653
deriveVolumeName(const std::string & path)2654 std::string deriveVolumeName(const std::string& path) {
2655 std::string volume_name;
2656 if (!android::base::StartsWith(path, STORAGE_PREFIX)) {
2657 volume_name = VOLUME_INTERNAL;
2658 } else if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
2659 volume_name = VOLUME_EXTERNAL_PRIMARY;
2660 } else {
2661 // Return "C58E-1702" from the path like "/storage/C58E-1702/Download/1935694997673.png"
2662 volume_name = path.substr(9, 9);
2663 // Convert to lowercase
2664 std::transform(volume_name.begin(), volume_name.end(), volume_name.begin(), ::tolower);
2665 }
2666 return volume_name;
2667 }
2668
DeleteFromLevelDb(const std::string & key)2669 void FuseDaemon::DeleteFromLevelDb(const std::string& key) {
2670 fuse->level_db_mutex.lock();
2671 std::string volume_name = deriveVolumeName(key);
2672 if (!CheckLevelDbConnection(volume_name)) {
2673 fuse->level_db_mutex.unlock();
2674 LOG(ERROR) << "DeleteFromLevelDb: Missing leveldb connection.";
2675 return;
2676 }
2677
2678 leveldb::Status status;
2679 status = fuse->level_db_connection_map[volume_name]->Delete(leveldb::WriteOptions(), key);
2680 if (!status.ok()) {
2681 LOG(ERROR) << "Failure in leveldb delete for key: " << key
2682 << " from volume:" << volume_name;
2683 }
2684 fuse->level_db_mutex.unlock();
2685 }
2686
InsertInLevelDb(const std::string & volume_name,const std::string & key,const std::string & value)2687 void FuseDaemon::InsertInLevelDb(const std::string& volume_name, const std::string& key,
2688 const std::string& value) {
2689 fuse->level_db_mutex.lock();
2690 if (!CheckLevelDbConnection(volume_name)) {
2691 fuse->level_db_mutex.unlock();
2692 LOG(ERROR) << "InsertInLevelDb: Missing leveldb connection.";
2693 return;
2694 }
2695
2696 leveldb::Status status;
2697 status = fuse->level_db_connection_map[volume_name]->Put(leveldb::WriteOptions(), key,
2698 value);
2699 fuse->level_db_mutex.unlock();
2700 if (!status.ok()) {
2701 LOG(ERROR) << "Failure in leveldb insert for key: " << key
2702 << " in volume:" << volume_name;
2703 LOG(ERROR) << status.ToString();
2704 }
2705 }
2706
ReadFilePathsFromLevelDb(const std::string & volume_name,const std::string & last_read_value,int limit)2707 std::vector<std::string> FuseDaemon::ReadFilePathsFromLevelDb(const std::string& volume_name,
2708 const std::string& last_read_value,
2709 int limit) {
2710 fuse->level_db_mutex.lock();
2711 int counter = 0;
2712 std::vector<std::string> file_paths;
2713
2714 if (!CheckLevelDbConnection(volume_name)) {
2715 fuse->level_db_mutex.unlock();
2716 LOG(ERROR) << "ReadFilePathsFromLevelDb: Missing leveldb connection";
2717 return file_paths;
2718 }
2719
2720 leveldb::Iterator* it =
2721 fuse->level_db_connection_map[volume_name]->NewIterator(leveldb::ReadOptions());
2722 if (android::base::EqualsIgnoreCase(last_read_value, "")) {
2723 it->SeekToFirst();
2724 } else {
2725 // Start after last read value
2726 leveldb::Slice slice = last_read_value;
2727 it->Seek(slice);
2728 it->Next();
2729 }
2730 for (; it->Valid() && counter < limit; it->Next()) {
2731 file_paths.push_back(it->key().ToString());
2732 counter++;
2733 }
2734 fuse->level_db_mutex.unlock();
2735 return file_paths;
2736 }
2737
ReadBackedUpDataFromLevelDb(const std::string & filePath)2738 std::string FuseDaemon::ReadBackedUpDataFromLevelDb(const std::string& filePath) {
2739 fuse->level_db_mutex.lock();
2740 std::string data = "";
2741 std::string volume_name = deriveVolumeName(filePath);
2742 if (!CheckLevelDbConnection(volume_name)) {
2743 fuse->level_db_mutex.unlock();
2744 LOG(ERROR) << "ReadBackedUpDataFromLevelDb: Missing leveldb connection.";
2745 return data;
2746 }
2747
2748 leveldb::Status status = fuse->level_db_connection_map[volume_name]->Get(
2749 leveldb::ReadOptions(), filePath, &data);
2750 fuse->level_db_mutex.unlock();
2751
2752 if (status.IsNotFound()) {
2753 LOG(VERBOSE) << "Key is not found in leveldb: " << filePath << " " << status.ToString();
2754 } else if (!status.ok()) {
2755 LOG(WARNING) << "Failure in leveldb read for key: " << filePath << " "
2756 << status.ToString();
2757 }
2758 return data;
2759 }
2760
ReadOwnership(const std::string & key)2761 std::string FuseDaemon::ReadOwnership(const std::string& key) {
2762 fuse->level_db_mutex.lock();
2763 // Return empty string if key not found
2764 std::string data = "";
2765 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2766 fuse->level_db_mutex.unlock();
2767 LOG(ERROR) << "ReadOwnership: Missing leveldb connection.";
2768 return data;
2769 }
2770
2771 leveldb::Status status = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Get(
2772 leveldb::ReadOptions(), key, &data);
2773 fuse->level_db_mutex.unlock();
2774
2775 if (status.IsNotFound()) {
2776 LOG(VERBOSE) << "Key is not found in leveldb: " << key << " " << status.ToString();
2777 } else if (!status.ok()) {
2778 LOG(WARNING) << "Failure in leveldb read for key: " << key << " " << status.ToString();
2779 }
2780
2781 return data;
2782 }
2783
CreateOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2784 void FuseDaemon::CreateOwnerIdRelation(const std::string& ownerId,
2785 const std::string& ownerPackageIdentifier) {
2786 fuse->level_db_mutex.lock();
2787 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2788 fuse->level_db_mutex.unlock();
2789 LOG(ERROR) << "CreateOwnerIdRelation: Missing leveldb connection.";
2790 return;
2791 }
2792
2793 leveldb::Status status1, status2;
2794 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2795 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2796 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2797 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2798 if (!status1.ok() || !status2.ok()) {
2799 // If both inserts did not go through, remove both.
2800 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2801 ownerId);
2802 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2803 ownerPackageIdentifier);
2804 LOG(ERROR) << "Failure in leveldb insert for owner_id: " << ownerId
2805 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2806 }
2807 fuse->level_db_mutex.unlock();
2808 }
2809
RemoveOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2810 void FuseDaemon::RemoveOwnerIdRelation(const std::string& ownerId,
2811 const std::string& ownerPackageIdentifier) {
2812 fuse->level_db_mutex.lock();
2813 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2814 fuse->level_db_mutex.unlock();
2815 LOG(ERROR) << "RemoveOwnerIdRelation: Missing leveldb connection.";
2816 return;
2817 }
2818
2819 leveldb::Status status1, status2;
2820 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2821 ownerId);
2822 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2823 ownerPackageIdentifier);
2824 if (status1.ok() && status2.ok()) {
2825 LOG(INFO) << "Successfully deleted rows in leveldb for owner_id: " << ownerId
2826 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2827 } else {
2828 // If both deletes did not go through, revert both.
2829 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2830 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2831 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2832 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2833 LOG(ERROR) << "Failure in leveldb delete for owner_id: " << ownerId
2834 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2835 }
2836 fuse->level_db_mutex.unlock();
2837 }
2838
GetOwnerRelationship()2839 std::map<std::string, std::string> FuseDaemon::GetOwnerRelationship() {
2840 fuse->level_db_mutex.lock();
2841 std::map<std::string, std::string> resultMap;
2842 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2843 fuse->level_db_mutex.unlock();
2844 LOG(ERROR) << "GetOwnerRelationship: Missing leveldb connection.";
2845 return resultMap;
2846 }
2847
2848 leveldb::Status status;
2849 // Get the key-value pairs from the database.
2850 leveldb::Iterator* it =
2851 fuse->level_db_connection_map[OWNERSHIP_RELATION]->NewIterator(leveldb::ReadOptions());
2852 for (it->SeekToFirst(); it->Valid(); it->Next()) {
2853 std::string key = it->key().ToString();
2854 std::string value = it->value().ToString();
2855 resultMap.insert(std::pair<std::string, std::string>(key, value));
2856 }
2857
2858 fuse->level_db_mutex.unlock();
2859 return resultMap;
2860 }
2861
CheckLevelDbConnection(const std::string & instance_name)2862 bool FuseDaemon::CheckLevelDbConnection(const std::string& instance_name) {
2863 if (fuse->level_db_connection_map.find(instance_name) == fuse->level_db_connection_map.end()) {
2864 LOG(ERROR) << "Leveldb setup is missing for: " << instance_name;
2865 return false;
2866 }
2867 return true;
2868 }
2869
2870 } //namespace fuse
2871 } // namespace mediaprovider
2872