1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18
19 #include "FuseDaemon.h"
20
21 #include <android-base/logging.h>
22 #include <android-base/properties.h>
23 #include <android/log.h>
24 #include <android/trace.h>
25 #include <ctype.h>
26 #include <dirent.h>
27 #include <errno.h>
28 #include <fcntl.h>
29 #include <fuse_i.h>
30 #include <fuse_log.h>
31 #include <fuse_lowlevel.h>
32 #include <inttypes.h>
33 #include <limits.h>
34 #include <linux/fuse.h>
35 #include <stdbool.h>
36 #include <stdio.h>
37 #include <stdlib.h>
38 #include <string.h>
39 #include <sys/inotify.h>
40 #include <sys/mman.h>
41 #include <sys/mount.h>
42 #include <sys/param.h>
43 #include <sys/resource.h>
44 #include <sys/stat.h>
45 #include <sys/statfs.h>
46 #include <sys/statvfs.h>
47 #include <sys/time.h>
48 #include <sys/types.h>
49 #include <sys/uio.h>
50 #include <unistd.h>
51
52 #include <iostream>
53 #include <list>
54 #include <map>
55 #include <mutex>
56 #include <queue>
57 #include <regex>
58 #include <thread>
59 #include <unordered_map>
60 #include <unordered_set>
61 #include <vector>
62
63 #include "MediaProviderWrapper.h"
64 #include "libfuse_jni/FuseUtils.h"
65 #include "libfuse_jni/ReaddirHelper.h"
66 #include "libfuse_jni/RedactionInfo.h"
67 #include "node-inl.h"
68
69 using mediaprovider::fuse::DirectoryEntry;
70 using mediaprovider::fuse::dirhandle;
71 using mediaprovider::fuse::handle;
72 using mediaprovider::fuse::node;
73 using mediaprovider::fuse::RedactionInfo;
74 using std::list;
75 using std::string;
76 using std::vector;
77
78 // logging macros to avoid duplication.
79 #define TRACE_NODE(__node, __req) \
80 LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
81 << "] (uid=" << __req->ctx.uid << ") "
82
83 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
84 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
85
86 class ScopedTrace {
87 public:
ScopedTrace(const char * name)88 explicit inline ScopedTrace(const char *name) {
89 ATrace_beginSection(name);
90 }
91
~ScopedTrace()92 inline ~ScopedTrace() {
93 ATrace_endSection();
94 }
95 };
96
97 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
98
99 #define FUSE_UNKNOWN_INO 0xffffffff
100
101 // Stolen from: android_filesystem_config.h
102 #define AID_APP_START 10000
103
104 constexpr size_t MAX_READ_SIZE = 128 * 1024;
105 // Stolen from: UserHandle#getUserId
106 constexpr int PER_USER_RANGE = 100000;
107
108 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
109 const std::regex PATTERN_OWNED_PATH(
110 "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb|sandbox)/([^/]+)(/?.*)?",
111 std::regex_constants::icase);
112
113 /*
114 * In order to avoid double caching with fuse, call fadvise on the file handles
115 * in the underlying file system. However, if this is done on every read/write,
116 * the fadvises cause a very significant slowdown in tests (specifically fio
117 * seq_write). So call fadvise on the file handles with the most reads/writes
118 * only after a threshold is passed.
119 */
120 class FAdviser {
121 public:
FAdviser()122 FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
123
~FAdviser()124 ~FAdviser() {
125 SendMessage(Message::quit);
126 thread_.join();
127 }
128
Record(int fd,size_t size)129 void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
130
Close(int fd)131 void Close(int fd) { SendMessage(Message::close, fd); }
132
133 private:
134 struct Message {
135 enum Type { record, close, quit };
136 Type type;
137 int fd;
138 size_t size;
139 };
140
RecordImpl(int fd,size_t size)141 void RecordImpl(int fd, size_t size) {
142 total_size_ += size;
143
144 // Find or create record in files_
145 // Remove record from sizes_ if it exists, adjusting size appropriately
146 auto file = files_.find(fd);
147 if (file != files_.end()) {
148 auto old_size = file->second;
149 size += old_size->first;
150 sizes_.erase(old_size);
151 } else {
152 file = files_.insert(Files::value_type(fd, sizes_.end())).first;
153 }
154
155 // Now (re) insert record in sizes_
156 auto new_size = sizes_.insert(Sizes::value_type(size, fd));
157 file->second = new_size;
158
159 if (total_size_ < threshold_) return;
160
161 LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
162 while (!sizes_.empty() && total_size_ > target_) {
163 auto size = --sizes_.end();
164 total_size_ -= size->first;
165 posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
166 files_.erase(size->second);
167 sizes_.erase(size);
168 }
169 LOG(INFO) << "Threshold now " << total_size_;
170 }
171
CloseImpl(int fd)172 void CloseImpl(int fd) {
173 auto file = files_.find(fd);
174 if (file == files_.end()) return;
175
176 total_size_ -= file->second->first;
177 sizes_.erase(file->second);
178 files_.erase(file);
179 }
180
MessageLoopImpl()181 void MessageLoopImpl() {
182 while (1) {
183 Message message;
184
185 {
186 std::unique_lock<std::mutex> lock(mutex_);
187 cv_.wait(lock, [this] { return !queue_.empty(); });
188 message = queue_.front();
189 queue_.pop();
190 }
191
192 switch (message.type) {
193 case Message::record:
194 RecordImpl(message.fd, message.size);
195 break;
196
197 case Message::close:
198 CloseImpl(message.fd);
199 break;
200
201 case Message::quit:
202 return;
203 }
204 }
205 }
206
MessageLoop(FAdviser * ptr)207 static int MessageLoop(FAdviser* ptr) {
208 ptr->MessageLoopImpl();
209 return 0;
210 }
211
SendMessage(Message::Type type,int fd=-1,size_t size=0)212 void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
213 {
214 std::unique_lock<std::mutex> lock(mutex_);
215 Message message = {type, fd, size};
216 queue_.push(message);
217 }
218 cv_.notify_one();
219 }
220
221 std::mutex mutex_;
222 std::condition_variable cv_;
223 std::queue<Message> queue_;
224 std::thread thread_;
225
226 typedef std::multimap<size_t, int> Sizes;
227 typedef std::map<int, Sizes::iterator> Files;
228
229 Files files_;
230 Sizes sizes_;
231 size_t total_size_;
232
233 const size_t threshold_ = 64 * 1024 * 1024;
234 const size_t target_ = 32 * 1024 * 1024;
235 };
236
237 /* Single FUSE mount */
238 struct fuse {
fusefuse239 explicit fuse(const std::string& _path)
240 : path(_path),
241 tracker(mediaprovider::fuse::NodeTracker(&lock)),
242 root(node::CreateRoot(_path, &lock, &tracker)),
243 mp(0),
244 zero_addr(0) {}
245
IsRootfuse246 inline bool IsRoot(const node* node) const { return node == root; }
247
GetEffectiveRootPathfuse248 inline string GetEffectiveRootPath() {
249 if (path.find("/storage/emulated", 0) == 0) {
250 return path + "/" + std::to_string(getuid() / PER_USER_RANGE);
251 }
252 return path;
253 }
254
255 // Note that these two (FromInode / ToInode) conversion wrappers are required
256 // because fuse_lowlevel_ops documents that the root inode is always one
257 // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
258 // on any of the other inodes in the FS.
FromInodefuse259 inline node* FromInode(__u64 inode) {
260 if (inode == FUSE_ROOT_ID) {
261 return root;
262 }
263
264 return node::FromInode(inode, &tracker);
265 }
266
ToInodefuse267 inline __u64 ToInode(node* node) const {
268 if (IsRoot(node)) {
269 return FUSE_ROOT_ID;
270 }
271
272 return node::ToInode(node);
273 }
274
275 std::recursive_mutex lock;
276 const string path;
277 // The Inode tracker associated with this FUSE instance.
278 mediaprovider::fuse::NodeTracker tracker;
279 node* const root;
280 struct fuse_session* se;
281
282 /*
283 * Used to make JNI calls to MediaProvider.
284 * Responsibility of freeing this object falls on corresponding
285 * FuseDaemon object.
286 */
287 mediaprovider::fuse::MediaProviderWrapper* mp;
288
289 /*
290 * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
291 * The memory is read only and should never be modified.
292 */
293 /* const */ char* zero_addr;
294
295 FAdviser fadviser;
296
297 std::atomic_bool* active;
298 };
299
get_name(node * n)300 static inline string get_name(node* n) {
301 if (n) {
302 std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
303 name += "node_path: " + n->BuildSafePath();
304 return name;
305 }
306 return "?";
307 }
308
ptr_to_id(void * ptr)309 static inline __u64 ptr_to_id(void* ptr) {
310 return (__u64)(uintptr_t) ptr;
311 }
312
313 /*
314 * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
315 *
316 * This is called before the MediaProvider returns fd from the lower file
317 * system to an app over the ContentResolver interface. This allows us
318 * check with is_file_locked if any reference to that fd is still open.
319 */
set_file_lock(int fd,bool for_read,const std::string & path)320 static int set_file_lock(int fd, bool for_read, const std::string& path) {
321 std::string lock_str = (for_read ? "read" : "write");
322
323 struct flock fl{};
324 fl.l_type = for_read ? F_RDLCK : F_WRLCK;
325 fl.l_whence = SEEK_SET;
326
327 int res = fcntl(fd, F_OFD_SETLK, &fl);
328 if (res) {
329 PLOG(WARNING) << "Failed to set lock: " << lock_str;
330 return res;
331 }
332 return res;
333 }
334
335 /*
336 * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
337 *
338 * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
339 * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
340 * allowing us to know if any reference to that fd is still open here.
341 *
342 * Returns true if fd may have a lock, false otherwise
343 */
is_file_locked(int fd,const std::string & path)344 static bool is_file_locked(int fd, const std::string& path) {
345 struct flock fl{};
346 fl.l_type = F_WRLCK;
347 fl.l_whence = SEEK_SET;
348
349 int res = fcntl(fd, F_OFD_GETLK, &fl);
350 if (res) {
351 PLOG(WARNING) << "Failed to check lock";
352 // Assume worst
353 return true;
354 }
355 bool locked = fl.l_type != F_UNLCK;
356 return locked;
357 }
358
get_fuse(fuse_req_t req)359 static struct fuse* get_fuse(fuse_req_t req) {
360 return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
361 }
362
is_package_owned_path(const string & path,const string & fuse_path)363 static bool is_package_owned_path(const string& path, const string& fuse_path) {
364 if (path.rfind(fuse_path, 0) != 0) {
365 return false;
366 }
367 return std::regex_match(path, PATTERN_OWNED_PATH);
368 }
369
370 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
371 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)372 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
373 const string& child_name, const string& path) {
374 if (mediaprovider::fuse::containsMount(path, std::to_string(getuid() / PER_USER_RANGE))) {
375 LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
376 return;
377 }
378
379 if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
380 // Invalidating the dentry can fail if there's no dcache entry, however, there may still
381 // be cached attributes, so attempt to invalidate those by invalidating the inode
382 fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
383 }
384 }
385
get_timeout(struct fuse * fuse,const string & path,bool should_inval)386 static double get_timeout(struct fuse* fuse, const string& path, bool should_inval) {
387 string media_path = fuse->GetEffectiveRootPath() + "/Android/media";
388 if (should_inval || path.find(media_path, 0) == 0 || is_package_owned_path(path, fuse->path)) {
389 // We set dentry timeout to 0 for the following reasons:
390 // 1. Case-insensitive lookups need to invalidate other case-insensitive dentry matches
391 // 2. Installd might delete Android/media/<package> dirs when app data is cleared.
392 // This can leave a stale entry in the kernel dcache, and break subsequent creation of the
393 // dir via FUSE.
394 // 3. With app data isolation enabled, app A should not guess existence of app B from the
395 // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
396 // information.
397 return 0;
398 }
399 return std::numeric_limits<double>::max();
400 }
401
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & path,struct fuse_entry_param * e,int * error_code)402 static node* make_node_entry(fuse_req_t req, node* parent, const string& name, const string& path,
403 struct fuse_entry_param* e, int* error_code) {
404 struct fuse* fuse = get_fuse(req);
405 const struct fuse_ctx* ctx = fuse_req_ctx(req);
406 node* node;
407
408 memset(e, 0, sizeof(*e));
409 if (lstat(path.c_str(), &e->attr) < 0) {
410 *error_code = errno;
411 return NULL;
412 }
413
414 bool should_inval = false;
415 node = parent->LookupChildByName(name, true /* acquire */);
416 if (!node) {
417 node = ::node::Create(parent, name, &fuse->lock, &fuse->tracker);
418 } else if (!mediaprovider::fuse::containsMount(path, std::to_string(getuid() / PER_USER_RANGE))) {
419 should_inval = true;
420 // Only invalidate a path if it does not contain mount.
421 // Invalidate both names to ensure there's no dentry left in the kernel after the following
422 // operations:
423 // 1) touch foo, touch FOO, unlink *foo*
424 // 2) touch foo, touch FOO, unlink *FOO*
425 // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
426 // |should_inval| invalidates lookup_name by using 0 timeout below and we explicitly
427 // invalidate node_name if different case
428 // Note that we invalidate async otherwise we will deadlock the kernel
429 if (name != node->GetName()) {
430 std::thread t([=]() {
431 fuse_inval(fuse->se, fuse->ToInode(parent), fuse->ToInode(node), node->GetName(),
432 path);
433 });
434 t.detach();
435 }
436 }
437 TRACE_NODE(node, req);
438
439 // This FS is not being exported via NFS so just a fixed generation number
440 // for now. If we do need this, we need to increment the generation ID each
441 // time the fuse daemon restarts because that's what it takes for us to
442 // reuse inode numbers.
443 e->generation = 0;
444 e->ino = fuse->ToInode(node);
445 e->entry_timeout = get_timeout(fuse, path, should_inval);
446 e->attr_timeout = is_package_owned_path(path, fuse->path) || should_inval
447 ? 0
448 : std::numeric_limits<double>::max();
449
450 return node;
451 }
452
is_requesting_write(int flags)453 static inline bool is_requesting_write(int flags) {
454 return flags & (O_WRONLY | O_RDWR);
455 }
456
457 namespace mediaprovider {
458 namespace fuse {
459
460 /**
461 * Function implementations
462 *
463 * These implement the various functions in fuse_lowlevel_ops
464 *
465 */
466
pf_init(void * userdata,struct fuse_conn_info * conn)467 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
468 // We don't want a getattr request with every read request
469 conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
470 unsigned mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
471 FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
472 FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
473 conn->want |= conn->capable & mask;
474 conn->max_read = MAX_READ_SIZE;
475
476 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
477 fuse->active->store(true, std::memory_order_release);
478 }
479
pf_destroy(void * userdata)480 static void pf_destroy(void* userdata) {
481 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
482 LOG(INFO) << "DESTROY " << fuse->path;
483
484 node::DeleteTree(fuse->root);
485 }
486
487 // Return true if the path is accessible for that uid.
is_app_accessible_path(MediaProviderWrapper * mp,const string & path,uid_t uid)488 static bool is_app_accessible_path(MediaProviderWrapper* mp, const string& path, uid_t uid) {
489 if (uid < AID_APP_START) {
490 return true;
491 }
492
493 if (path == "/storage/emulated") {
494 // Apps should never refer to /storage/emulated - they should be using the user-spcific
495 // subdirs, eg /storage/emulated/0
496 return false;
497 }
498
499 std::smatch match;
500 if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
501 const std::string& pkg = match[1];
502 // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
503 // and it's not an external file/directory of any package
504 if (pkg == ".nomedia") {
505 return true;
506 }
507 if (!mp->IsUidForPackage(pkg, uid)) {
508 PLOG(WARNING) << "Invalid other package file access from " << pkg << "(: " << path;
509 return false;
510 }
511 }
512 return true;
513 }
514
515 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code)516 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
517 struct fuse_entry_param* e, int* error_code) {
518 struct fuse* fuse = get_fuse(req);
519 node* parent_node = fuse->FromInode(parent);
520 if (!parent_node) {
521 *error_code = ENOENT;
522 return nullptr;
523 }
524 string parent_path = parent_node->BuildPath();
525 // We should always allow lookups on the root, because failing them could cause
526 // bind mounts to be invalidated.
527 if (!fuse->IsRoot(parent_node) && !is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
528 *error_code = ENOENT;
529 return nullptr;
530 }
531
532 string child_path = parent_path + "/" + name;
533
534 TRACE_NODE(parent_node, req);
535
536 std::smatch match;
537 std::regex_search(child_path, match, storage_emulated_regex);
538 if (match.size() == 2 && std::to_string(getuid() / PER_USER_RANGE) != match[1].str()) {
539 // Ensure the FuseDaemon user id matches the user id in requested path
540 *error_code = EPERM;
541 return nullptr;
542 }
543 return make_node_entry(req, parent_node, name, child_path, e, error_code);
544 }
545
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)546 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
547 ATRACE_CALL();
548 struct fuse_entry_param e;
549
550 int error_code = 0;
551 if (do_lookup(req, parent, name, &e, &error_code)) {
552 fuse_reply_entry(req, &e);
553 } else {
554 CHECK(error_code != 0);
555 fuse_reply_err(req, error_code);
556 }
557 }
558
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)559 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
560 node* node = fuse->FromInode(ino);
561 TRACE_NODE(node, req);
562 if (node) {
563 // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
564 // some reason we only keep 32 bit refcounts but the kernel issues
565 // forget requests with a 64 bit counter.
566 node->Release(static_cast<uint32_t>(nlookup));
567 }
568 }
569
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)570 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
571 // Always allow to forget so no need to check is_app_accessible_path()
572 ATRACE_CALL();
573 node* node;
574 struct fuse* fuse = get_fuse(req);
575
576 do_forget(req, fuse, ino, nlookup);
577 fuse_reply_none(req);
578 }
579
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)580 static void pf_forget_multi(fuse_req_t req,
581 size_t count,
582 struct fuse_forget_data* forgets) {
583 ATRACE_CALL();
584 struct fuse* fuse = get_fuse(req);
585
586 for (int i = 0; i < count; i++) {
587 do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
588 }
589 fuse_reply_none(req);
590 }
591
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)592 static void pf_getattr(fuse_req_t req,
593 fuse_ino_t ino,
594 struct fuse_file_info* fi) {
595 ATRACE_CALL();
596 struct fuse* fuse = get_fuse(req);
597 node* node = fuse->FromInode(ino);
598 if (!node) {
599 fuse_reply_err(req, ENOENT);
600 return;
601 }
602 string path = node->BuildPath();
603 if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
604 fuse_reply_err(req, ENOENT);
605 return;
606 }
607 TRACE_NODE(node, req);
608
609 struct stat s;
610 memset(&s, 0, sizeof(s));
611 if (lstat(path.c_str(), &s) < 0) {
612 fuse_reply_err(req, errno);
613 } else {
614 fuse_reply_attr(req, &s, is_package_owned_path(path, fuse->path) ?
615 0 : std::numeric_limits<double>::max());
616 }
617 }
618
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)619 static void pf_setattr(fuse_req_t req,
620 fuse_ino_t ino,
621 struct stat* attr,
622 int to_set,
623 struct fuse_file_info* fi) {
624 ATRACE_CALL();
625 struct fuse* fuse = get_fuse(req);
626 node* node = fuse->FromInode(ino);
627 if (!node) {
628 fuse_reply_err(req, ENOENT);
629 return;
630 }
631 string path = node->BuildPath();
632 if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
633 fuse_reply_err(req, ENOENT);
634 return;
635 }
636
637 int fd = -1;
638 if (fi) {
639 // If we have a file_info, setattr was called with an fd so use the fd instead of path
640 handle* h = reinterpret_cast<handle*>(fi->fh);
641 fd = h->fd;
642 } else {
643 const struct fuse_ctx* ctx = fuse_req_ctx(req);
644 int status = fuse->mp->IsOpenAllowed(path, ctx->uid, true);
645 if (status) {
646 fuse_reply_err(req, EACCES);
647 return;
648 }
649 }
650 struct timespec times[2];
651 TRACE_NODE(node, req);
652
653 /* XXX: incomplete implementation on purpose.
654 * chmod/chown should NEVER be implemented.*/
655
656 if ((to_set & FUSE_SET_ATTR_SIZE)) {
657 int res = 0;
658 if (fd == -1) {
659 res = truncate64(path.c_str(), attr->st_size);
660 } else {
661 res = ftruncate64(fd, attr->st_size);
662 }
663
664 if (res < 0) {
665 fuse_reply_err(req, errno);
666 return;
667 }
668 }
669
670 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
671 * are both set, then set it to the current time. Else, set it to the
672 * time specified in the request. Same goes for mtime. Use utimensat(2)
673 * as it allows ATIME and MTIME to be changed independently, and has
674 * nanosecond resolution which fuse also has.
675 */
676 if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
677 times[0].tv_nsec = UTIME_OMIT;
678 times[1].tv_nsec = UTIME_OMIT;
679 if (to_set & FATTR_ATIME) {
680 if (to_set & FATTR_ATIME_NOW) {
681 times[0].tv_nsec = UTIME_NOW;
682 } else {
683 times[0] = attr->st_atim;
684 }
685 }
686
687 if (to_set & FATTR_MTIME) {
688 if (to_set & FATTR_MTIME_NOW) {
689 times[1].tv_nsec = UTIME_NOW;
690 } else {
691 times[1] = attr->st_mtim;
692 }
693 }
694
695 TRACE_NODE(node, req);
696 int res = 0;
697 if (fd == -1) {
698 res = utimensat(-1, path.c_str(), times, 0);
699 } else {
700 res = futimens(fd, times);
701 }
702
703 if (res < 0) {
704 fuse_reply_err(req, errno);
705 return;
706 }
707 }
708
709 lstat(path.c_str(), attr);
710 fuse_reply_attr(req, attr, is_package_owned_path(path, fuse->path) ?
711 0 : std::numeric_limits<double>::max());
712 }
713
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)714 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
715 {
716 struct fuse* fuse = get_fuse(req);
717 node* node = fuse->FromInode(ino);
718 string path = node ? node->BuildPath() : "";
719
720 if (node && is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
721 // TODO(b/147482155): Check that uid has access to |path| and its contents
722 fuse_reply_canonical_path(req, path.c_str());
723 return;
724 }
725 fuse_reply_err(req, ENOENT);
726 }
727
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)728 static void pf_mknod(fuse_req_t req,
729 fuse_ino_t parent,
730 const char* name,
731 mode_t mode,
732 dev_t rdev) {
733 ATRACE_CALL();
734 struct fuse* fuse = get_fuse(req);
735 node* parent_node = fuse->FromInode(parent);
736 if (!parent_node) {
737 fuse_reply_err(req, ENOENT);
738 return;
739 }
740 string parent_path = parent_node->BuildPath();
741 if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
742 fuse_reply_err(req, ENOENT);
743 return;
744 }
745
746 TRACE_NODE(parent_node, req);
747
748 const string child_path = parent_path + "/" + name;
749
750 mode = (mode & (~0777)) | 0664;
751 if (mknod(child_path.c_str(), mode, rdev) < 0) {
752 fuse_reply_err(req, errno);
753 return;
754 }
755
756 int error_code = 0;
757 struct fuse_entry_param e;
758 if (make_node_entry(req, parent_node, name, child_path, &e, &error_code)) {
759 fuse_reply_entry(req, &e);
760 } else {
761 CHECK(error_code != 0);
762 fuse_reply_err(req, error_code);
763 }
764 }
765
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)766 static void pf_mkdir(fuse_req_t req,
767 fuse_ino_t parent,
768 const char* name,
769 mode_t mode) {
770 ATRACE_CALL();
771 struct fuse* fuse = get_fuse(req);
772 node* parent_node = fuse->FromInode(parent);
773 if (!parent_node) {
774 fuse_reply_err(req, ENOENT);
775 return;
776 }
777 const struct fuse_ctx* ctx = fuse_req_ctx(req);
778 const string parent_path = parent_node->BuildPath();
779 if (!is_app_accessible_path(fuse->mp, parent_path, ctx->uid)) {
780 fuse_reply_err(req, ENOENT);
781 return;
782 }
783
784 TRACE_NODE(parent_node, req);
785
786 const string child_path = parent_path + "/" + name;
787
788 int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
789 if (status) {
790 fuse_reply_err(req, status);
791 return;
792 }
793
794 mode = (mode & (~0777)) | 0775;
795 if (mkdir(child_path.c_str(), mode) < 0) {
796 fuse_reply_err(req, errno);
797 return;
798 }
799
800 int error_code = 0;
801 struct fuse_entry_param e;
802 if (make_node_entry(req, parent_node, name, child_path, &e, &error_code)) {
803 fuse_reply_entry(req, &e);
804 } else {
805 CHECK(error_code != 0);
806 fuse_reply_err(req, error_code);
807 }
808 }
809
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)810 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
811 ATRACE_CALL();
812 struct fuse* fuse = get_fuse(req);
813 node* parent_node = fuse->FromInode(parent);
814 if (!parent_node) {
815 fuse_reply_err(req, ENOENT);
816 return;
817 }
818 const struct fuse_ctx* ctx = fuse_req_ctx(req);
819 const string parent_path = parent_node->BuildPath();
820 if (!is_app_accessible_path(fuse->mp, parent_path, ctx->uid)) {
821 fuse_reply_err(req, ENOENT);
822 return;
823 }
824
825 TRACE_NODE(parent_node, req);
826
827 const string child_path = parent_path + "/" + name;
828
829 int status = fuse->mp->DeleteFile(child_path, ctx->uid);
830 if (status) {
831 fuse_reply_err(req, status);
832 return;
833 }
834
835 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
836 TRACE_NODE(child_node, req);
837 if (child_node) {
838 child_node->SetDeleted();
839 }
840
841 fuse_reply_err(req, 0);
842 }
843
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)844 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
845 ATRACE_CALL();
846 struct fuse* fuse = get_fuse(req);
847 node* parent_node = fuse->FromInode(parent);
848 if (!parent_node) {
849 fuse_reply_err(req, ENOENT);
850 return;
851 }
852 const string parent_path = parent_node->BuildPath();
853 if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
854 fuse_reply_err(req, ENOENT);
855 return;
856 }
857 TRACE_NODE(parent_node, req);
858
859 const string child_path = parent_path + "/" + name;
860
861 int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
862 if (status) {
863 fuse_reply_err(req, status);
864 return;
865 }
866
867 if (rmdir(child_path.c_str()) < 0) {
868 fuse_reply_err(req, errno);
869 return;
870 }
871
872 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
873 TRACE_NODE(child_node, req);
874 if (child_node) {
875 child_node->SetDeleted();
876 }
877
878 fuse_reply_err(req, 0);
879 }
880 /*
881 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
882 const char* name)
883 {
884 cout << "TODO:" << __func__;
885 }
886 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)887 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
888 const char* new_name, unsigned int flags) {
889 ATRACE_CALL();
890 struct fuse* fuse = get_fuse(req);
891
892 if (flags != 0) {
893 return EINVAL;
894 }
895
896 node* old_parent_node = fuse->FromInode(parent);
897 if (!old_parent_node) return ENOENT;
898 const struct fuse_ctx* ctx = fuse_req_ctx(req);
899 const string old_parent_path = old_parent_node->BuildPath();
900 if (!is_app_accessible_path(fuse->mp, old_parent_path, ctx->uid)) {
901 return ENOENT;
902 }
903
904 node* new_parent_node = fuse->FromInode(new_parent);
905 if (!new_parent_node) return ENOENT;
906 const string new_parent_path = new_parent_node->BuildPath();
907 if (!is_app_accessible_path(fuse->mp, new_parent_path, ctx->uid)) {
908 return ENOENT;
909 }
910
911 if (!old_parent_node || !new_parent_node) {
912 return ENOENT;
913 } else if (parent == new_parent && name == new_name) {
914 // No rename required.
915 return 0;
916 }
917
918 TRACE_NODE(old_parent_node, req);
919 TRACE_NODE(new_parent_node, req);
920
921 node* child_node = old_parent_node->LookupChildByName(name, true /* acquire */);
922 TRACE_NODE(child_node, req) << "old_child";
923
924 const string old_child_path = child_node->BuildPath();
925 const string new_child_path = new_parent_path + "/" + new_name;
926
927 // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
928 const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
929 // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
930 // EFAULT/EIO is reported due to JNI exception.
931 if (res == 0) {
932 child_node->Rename(new_name, new_parent_node);
933 }
934 TRACE_NODE(child_node, req) << "new_child";
935
936 child_node->Release(1);
937 return res;
938 }
939
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)940 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
941 const char* new_name, unsigned int flags) {
942 int res = do_rename(req, parent, name, new_parent, new_name, flags);
943 fuse_reply_err(req, res);
944 }
945
946 /*
947 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
948 const char* new_name)
949 {
950 cout << "TODO:" << __func__;
951 }
952 */
953
create_handle_for_node(struct fuse * fuse,const string & path,int fd,node * node,const RedactionInfo * ri)954 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, node* node,
955 const RedactionInfo* ri) {
956 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
957 // We don't want to use the FUSE VFS cache in two cases:
958 // 1. When redaction is needed because app A with EXIF access might access
959 // a region that should have been redacted for app B without EXIF access, but app B on
960 // a subsequent read, will be able to see the EXIF data because the read request for
961 // that region will be served from cache and not get to the FUSE daemon
962 // 2. When the file has a read or write lock on it. This means that the MediaProvider
963 // has given an fd to the lower file system to an app. There are two cases where using
964 // the cache in this case can be a problem:
965 // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
966 // subsequent read from the lower fs fd will not see the write.
967 // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
968 // the lower fs fd because those writes did not go through the FUSE layer and reads from
969 // FUSE after that write may be served from cache
970 bool direct_io = ri->isRedactionNeeded() || is_file_locked(fd, path);
971
972 handle* h = new handle(fd, ri, !direct_io);
973 node->AddHandle(h);
974 return h;
975 }
976
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)977 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
978 ATRACE_CALL();
979 struct fuse* fuse = get_fuse(req);
980 node* node = fuse->FromInode(ino);
981 if (!node) {
982 fuse_reply_err(req, ENOENT);
983 return;
984 }
985 const struct fuse_ctx* ctx = fuse_req_ctx(req);
986 const string path = node->BuildPath();
987 if (!is_app_accessible_path(fuse->mp, path, ctx->uid)) {
988 fuse_reply_err(req, ENOENT);
989 return;
990 }
991
992 TRACE_NODE(node, req) << (is_requesting_write(fi->flags) ? "write" : "read");
993
994 if (fi->flags & O_DIRECT) {
995 fi->flags &= ~O_DIRECT;
996 fi->direct_io = true;
997 }
998
999 int status = fuse->mp->IsOpenAllowed(path, ctx->uid, is_requesting_write(fi->flags));
1000 if (status) {
1001 fuse_reply_err(req, status);
1002 return;
1003 }
1004
1005 // With the writeback cache enabled, FUSE may generate READ requests even for files that
1006 // were opened O_WRONLY; so make sure we open it O_RDWR instead.
1007 int open_flags = fi->flags;
1008 if (open_flags & O_WRONLY) {
1009 open_flags &= ~O_WRONLY;
1010 open_flags |= O_RDWR;
1011 }
1012
1013 if (open_flags & O_APPEND) {
1014 open_flags &= ~O_APPEND;
1015 }
1016
1017 const int fd = open(path.c_str(), open_flags);
1018 if (fd < 0) {
1019 fuse_reply_err(req, errno);
1020 return;
1021 }
1022
1023 // We don't redact if the caller was granted write permission for this file
1024 std::unique_ptr<RedactionInfo> ri;
1025 if (is_requesting_write(fi->flags)) {
1026 ri = std::make_unique<RedactionInfo>();
1027 } else {
1028 ri = fuse->mp->GetRedactionInfo(path, req->ctx.uid, req->ctx.pid);
1029 }
1030
1031 if (!ri) {
1032 close(fd);
1033 fuse_reply_err(req, EFAULT);
1034 return;
1035 }
1036
1037 handle* h = create_handle_for_node(fuse, path, fd, node, ri.release());
1038 fi->fh = ptr_to_id(h);
1039 fi->keep_cache = 1;
1040 fi->direct_io = !h->cached;
1041 fuse_reply_open(req, fi);
1042 }
1043
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi)1044 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi) {
1045 handle* h = reinterpret_cast<handle*>(fi->fh);
1046 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1047
1048 buf.buf[0].fd = h->fd;
1049 buf.buf[0].pos = off;
1050 buf.buf[0].flags =
1051 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1052
1053 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags) 0);
1054 }
1055
range_contains(const RedactionRange & rr,off_t off)1056 static bool range_contains(const RedactionRange& rr, off_t off) {
1057 return rr.first <= off && off <= rr.second;
1058 }
1059
1060 /**
1061 * Sets the parameters for a fuse_buf that reads from memory, including flags.
1062 * Makes buf->mem point to an already mapped region of zeroized memory.
1063 * This memory is read only.
1064 */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1065 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1066 buf->size = size;
1067 buf->mem = fuse->zero_addr;
1068 buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1069 buf->pos = -1;
1070 buf->fd = -1;
1071 }
1072
1073 /**
1074 * Sets the parameters for a fuse_buf that reads from file, including flags.
1075 */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1076 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1077 buf->size = size;
1078 buf->fd = fd;
1079 buf->pos = pos;
1080 buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1081 buf->mem = nullptr;
1082 }
1083
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi)1084 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi) {
1085 handle* h = reinterpret_cast<handle*>(fi->fh);
1086 auto overlapping_rr = h->ri->getOverlappingRedactionRanges(size, off);
1087
1088 if (overlapping_rr->size() <= 0) {
1089 // no relevant redaction ranges for this request
1090 do_read(req, size, off, fi);
1091 return;
1092 }
1093 // the number of buffers we need, if the read doesn't start or end with
1094 // a redaction range.
1095 int num_bufs = overlapping_rr->size() * 2 + 1;
1096 if (overlapping_rr->front().first <= off) {
1097 // the beginning of the read request is redacted
1098 num_bufs--;
1099 }
1100 if (overlapping_rr->back().second >= off + size) {
1101 // the end of the read request is redacted
1102 num_bufs--;
1103 }
1104 auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1105 reinterpret_cast<fuse_bufvec*>(
1106 malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1107 free};
1108 fuse_bufvec& bufvec = *bufvec_ptr;
1109
1110 // initialize bufvec
1111 bufvec.count = num_bufs;
1112 bufvec.idx = 0;
1113 bufvec.off = 0;
1114
1115 int rr_idx = 0;
1116 off_t start = off;
1117 // Add a dummy redaction range to make sure we don't go out of vector
1118 // limits when computing the end of the last non-redacted range.
1119 // This ranges is invalid because its starting point is larger than it's ending point.
1120 overlapping_rr->push_back(RedactionRange(LLONG_MAX, LLONG_MAX - 1));
1121
1122 for (int i = 0; i < num_bufs; ++i) {
1123 off_t end;
1124 if (range_contains(overlapping_rr->at(rr_idx), start)) {
1125 // Handle a redacted range
1126 // end should be the end of the redacted range, but can't be out of
1127 // the read request bounds
1128 end = std::min(static_cast<off_t>(off + size - 1), overlapping_rr->at(rr_idx).second);
1129 create_mem_fuse_buf(/*size*/ end - start + 1, &(bufvec.buf[i]), get_fuse(req));
1130 ++rr_idx;
1131 } else {
1132 // Handle a non-redacted range
1133 // end should be right before the next redaction range starts or
1134 // the end of the read request
1135 end = std::min(static_cast<off_t>(off + size - 1),
1136 overlapping_rr->at(rr_idx).first - 1);
1137 create_file_fuse_buf(/*size*/ end - start + 1, start, h->fd, &(bufvec.buf[i]));
1138 }
1139 start = end + 1;
1140 }
1141
1142 fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1143 }
1144
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1145 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1146 struct fuse_file_info* fi) {
1147 ATRACE_CALL();
1148 handle* h = reinterpret_cast<handle*>(fi->fh);
1149 struct fuse* fuse = get_fuse(req);
1150
1151 fuse->fadviser.Record(h->fd, size);
1152
1153 if (h->ri->isRedactionNeeded()) {
1154 do_read_with_redaction(req, size, off, fi);
1155 } else {
1156 do_read(req, size, off, fi);
1157 }
1158 }
1159
1160 /*
1161 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1162 size_t size, off_t off, struct fuse_file_info* fi)
1163 {
1164 cout << "TODO:" << __func__;
1165 }
1166 */
1167
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1168 static void pf_write_buf(fuse_req_t req,
1169 fuse_ino_t ino,
1170 struct fuse_bufvec* bufv,
1171 off_t off,
1172 struct fuse_file_info* fi) {
1173 ATRACE_CALL();
1174 handle* h = reinterpret_cast<handle*>(fi->fh);
1175 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1176 ssize_t size;
1177 struct fuse* fuse = get_fuse(req);
1178
1179 buf.buf[0].fd = h->fd;
1180 buf.buf[0].pos = off;
1181 buf.buf[0].flags =
1182 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1183 size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1184
1185 if (size < 0)
1186 fuse_reply_err(req, -size);
1187 else {
1188 fuse_reply_write(req, size);
1189 fuse->fadviser.Record(h->fd, size);
1190 }
1191 }
1192 // Haven't tested this one. Not sure what calls it.
1193 #if 0
1194 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1195 off_t off_in, struct fuse_file_info* fi_in,
1196 fuse_ino_t ino_out, off_t off_out,
1197 struct fuse_file_info* fi_out, size_t len,
1198 int flags)
1199 {
1200 handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1201 handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1202 struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1203 struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1204 ssize_t size;
1205
1206 buf_in.buf[0].fd = h_in->fd;
1207 buf_in.buf[0].pos = off_in;
1208 buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1209
1210 buf_out.buf[0].fd = h_out->fd;
1211 buf_out.buf[0].pos = off_out;
1212 buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1213 size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1214
1215 if (size < 0) {
1216 fuse_reply_err(req, -size);
1217 }
1218
1219 fuse_reply_write(req, size);
1220 }
1221 #endif
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1222 static void pf_flush(fuse_req_t req,
1223 fuse_ino_t ino,
1224 struct fuse_file_info* fi) {
1225 ATRACE_CALL();
1226 struct fuse* fuse = get_fuse(req);
1227 TRACE_NODE(nullptr, req) << "noop";
1228 fuse_reply_err(req, 0);
1229 }
1230
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1231 static void pf_release(fuse_req_t req,
1232 fuse_ino_t ino,
1233 struct fuse_file_info* fi) {
1234 ATRACE_CALL();
1235 struct fuse* fuse = get_fuse(req);
1236
1237 node* node = fuse->FromInode(ino);
1238 handle* h = reinterpret_cast<handle*>(fi->fh);
1239 TRACE_NODE(node, req);
1240
1241 fuse->fadviser.Close(h->fd);
1242 if (node) {
1243 node->DestroyHandle(h);
1244 }
1245
1246 fuse_reply_err(req, 0);
1247 }
1248
do_sync_common(int fd,bool datasync)1249 static int do_sync_common(int fd, bool datasync) {
1250 int res = datasync ? fdatasync(fd) : fsync(fd);
1251
1252 if (res == -1) return errno;
1253 return 0;
1254 }
1255
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1256 static void pf_fsync(fuse_req_t req,
1257 fuse_ino_t ino,
1258 int datasync,
1259 struct fuse_file_info* fi) {
1260 ATRACE_CALL();
1261 handle* h = reinterpret_cast<handle*>(fi->fh);
1262 int err = do_sync_common(h->fd, datasync);
1263
1264 fuse_reply_err(req, err);
1265 }
1266
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1267 static void pf_fsyncdir(fuse_req_t req,
1268 fuse_ino_t ino,
1269 int datasync,
1270 struct fuse_file_info* fi) {
1271 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1272 int err = do_sync_common(dirfd(h->d), datasync);
1273
1274 fuse_reply_err(req, err);
1275 }
1276
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1277 static void pf_opendir(fuse_req_t req,
1278 fuse_ino_t ino,
1279 struct fuse_file_info* fi) {
1280 ATRACE_CALL();
1281 struct fuse* fuse = get_fuse(req);
1282 node* node = fuse->FromInode(ino);
1283 if (!node) {
1284 fuse_reply_err(req, ENOENT);
1285 return;
1286 }
1287 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1288 const string path = node->BuildPath();
1289 if (!is_app_accessible_path(fuse->mp, path, ctx->uid)) {
1290 fuse_reply_err(req, ENOENT);
1291 return;
1292 }
1293
1294 TRACE_NODE(node, req);
1295
1296 int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1297 if (status) {
1298 fuse_reply_err(req, status);
1299 return;
1300 }
1301
1302 DIR* dir = opendir(path.c_str());
1303 if (!dir) {
1304 fuse_reply_err(req, errno);
1305 return;
1306 }
1307
1308 dirhandle* h = new dirhandle(dir);
1309 node->AddDirHandle(h);
1310
1311 fi->fh = ptr_to_id(h);
1312 fuse_reply_open(req, fi);
1313 }
1314
1315 #define READDIR_BUF 8192LU
1316
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1317 static void do_readdir_common(fuse_req_t req,
1318 fuse_ino_t ino,
1319 size_t size,
1320 off_t off,
1321 struct fuse_file_info* fi,
1322 bool plus) {
1323 struct fuse* fuse = get_fuse(req);
1324 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1325 size_t len = std::min<size_t>(size, READDIR_BUF);
1326 char buf[READDIR_BUF];
1327 size_t used = 0;
1328 std::shared_ptr<DirectoryEntry> de;
1329
1330 struct fuse_entry_param e;
1331 size_t entry_size = 0;
1332
1333 node* node = fuse->FromInode(ino);
1334 if (!node) {
1335 fuse_reply_err(req, ENOENT);
1336 return;
1337 }
1338 const string path = node->BuildPath();
1339 if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
1340 fuse_reply_err(req, ENOENT);
1341 return;
1342 }
1343
1344 TRACE_NODE(node, req);
1345 // Get all directory entries from MediaProvider on first readdir() call of
1346 // directory handle. h->next_off = 0 indicates that current readdir() call
1347 // is first readdir() call for the directory handle, Avoid multiple JNI calls
1348 // for single directory handle.
1349 if (h->next_off == 0) {
1350 h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
1351 }
1352 // If the last entry in the previous readdir() call was rejected due to
1353 // buffer capacity constraints, update directory offset to start from
1354 // previously rejected entry. Directory offset can also change if there was
1355 // a seekdir() on the given directory handle.
1356 if (off != h->next_off) {
1357 h->next_off = off;
1358 }
1359 const int num_directory_entries = h->de.size();
1360 // Check for errors. Any error/exception occurred while obtaining directory
1361 // entries will be indicated by marking first directory entry name as empty
1362 // string. In the erroneous case corresponding d_type will hold error number.
1363 if (num_directory_entries && h->de[0]->d_name.empty()) {
1364 fuse_reply_err(req, h->de[0]->d_type);
1365 return;
1366 }
1367
1368 while (h->next_off < num_directory_entries) {
1369 de = h->de[h->next_off];
1370 entry_size = 0;
1371 h->next_off++;
1372 if (plus) {
1373 int error_code = 0;
1374 if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code)) {
1375 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
1376 &e, h->next_off);
1377 } else {
1378 // Ignore lookup errors on
1379 // 1. non-existing files returned from MediaProvider database.
1380 // 2. path that doesn't match FuseDaemon UID and calling uid.
1381 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES) continue;
1382 fuse_reply_err(req, error_code);
1383 return;
1384 }
1385 } else {
1386 // This should never happen because we have readdir_plus enabled without adaptive
1387 // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
1388 LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
1389 e.attr.st_ino = FUSE_UNKNOWN_INO;
1390 e.attr.st_mode = de->d_type << 12;
1391 entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
1392 h->next_off);
1393 }
1394 // If buffer in fuse_add_direntry[_plus] is not large enough then
1395 // the entry is not added to buffer but the size of the entry is still
1396 // returned. Check available buffer size + returned entry size is less
1397 // than actual buffer size to confirm entry is added to buffer.
1398 if (used + entry_size > len) {
1399 // When an entry is rejected, lookup called by readdir_plus will not be tracked by
1400 // kernel. Call forget on the rejected node to decrement the reference count.
1401 if (plus) {
1402 do_forget(req, fuse, e.ino, 1);
1403 }
1404 break;
1405 }
1406 used += entry_size;
1407 }
1408 fuse_reply_buf(req, buf, used);
1409 }
1410
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1411 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1412 struct fuse_file_info* fi) {
1413 ATRACE_CALL();
1414 do_readdir_common(req, ino, size, off, fi, false);
1415 }
1416
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1417 static void pf_readdirplus(fuse_req_t req,
1418 fuse_ino_t ino,
1419 size_t size,
1420 off_t off,
1421 struct fuse_file_info* fi) {
1422 ATRACE_CALL();
1423 do_readdir_common(req, ino, size, off, fi, true);
1424 }
1425
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1426 static void pf_releasedir(fuse_req_t req,
1427 fuse_ino_t ino,
1428 struct fuse_file_info* fi) {
1429 ATRACE_CALL();
1430 struct fuse* fuse = get_fuse(req);
1431
1432 node* node = fuse->FromInode(ino);
1433
1434 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1435 TRACE_NODE(node, req);
1436 if (node) {
1437 node->DestroyDirHandle(h);
1438 }
1439
1440 fuse_reply_err(req, 0);
1441 }
1442
pf_statfs(fuse_req_t req,fuse_ino_t ino)1443 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
1444 ATRACE_CALL();
1445 struct statvfs st;
1446 struct fuse* fuse = get_fuse(req);
1447
1448 if (statvfs(fuse->root->GetName().c_str(), &st))
1449 fuse_reply_err(req, errno);
1450 else
1451 fuse_reply_statfs(req, &st);
1452 }
1453 /*
1454 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
1455 const char* value, size_t size, int flags)
1456 {
1457 cout << "TODO:" << __func__;
1458 }
1459
1460 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
1461 size_t size)
1462 {
1463 cout << "TODO:" << __func__;
1464 }
1465
1466 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
1467 {
1468 cout << "TODO:" << __func__;
1469 }
1470
1471 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
1472 {
1473 cout << "TODO:" << __func__;
1474 }*/
1475
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)1476 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
1477 ATRACE_CALL();
1478 struct fuse* fuse = get_fuse(req);
1479
1480 node* node = fuse->FromInode(ino);
1481 if (!node) {
1482 fuse_reply_err(req, ENOENT);
1483 return;
1484 }
1485 const string path = node->BuildPath();
1486 if (!is_app_accessible_path(fuse->mp, path, req->ctx.uid)) {
1487 fuse_reply_err(req, ENOENT);
1488 return;
1489 }
1490 TRACE_NODE(node, req);
1491
1492 // exists() checks are always allowed.
1493 if (mask == F_OK) {
1494 int res = access(path.c_str(), F_OK);
1495 fuse_reply_err(req, res ? errno : 0);
1496 return;
1497 }
1498 struct stat stat;
1499 if (lstat(path.c_str(), &stat)) {
1500 // File doesn't exist
1501 fuse_reply_err(req, ENOENT);
1502 return;
1503 }
1504
1505 // For read and write permission checks we go to MediaProvider.
1506 int status = 0;
1507 bool for_write = mask & W_OK;
1508 bool is_directory = S_ISDIR(stat.st_mode);
1509 if (is_directory) {
1510 status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
1511 } else {
1512 if (mask & X_OK) {
1513 // Fuse is mounted with MS_NOEXEC.
1514 fuse_reply_err(req, EACCES);
1515 return;
1516 }
1517
1518 status = fuse->mp->IsOpenAllowed(path, req->ctx.uid, for_write);
1519 }
1520
1521 fuse_reply_err(req, status);
1522 }
1523
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)1524 static void pf_create(fuse_req_t req,
1525 fuse_ino_t parent,
1526 const char* name,
1527 mode_t mode,
1528 struct fuse_file_info* fi) {
1529 ATRACE_CALL();
1530 struct fuse* fuse = get_fuse(req);
1531 node* parent_node = fuse->FromInode(parent);
1532 if (!parent_node) {
1533 fuse_reply_err(req, ENOENT);
1534 return;
1535 }
1536 const string parent_path = parent_node->BuildPath();
1537 if (!is_app_accessible_path(fuse->mp, parent_path, req->ctx.uid)) {
1538 fuse_reply_err(req, ENOENT);
1539 return;
1540 }
1541
1542 TRACE_NODE(parent_node, req);
1543
1544 const string child_path = parent_path + "/" + name;
1545
1546 int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
1547 if (mp_return_code) {
1548 fuse_reply_err(req, mp_return_code);
1549 return;
1550 }
1551
1552 // With the writeback cache enabled, FUSE may generate READ requests even for files that
1553 // were opened O_WRONLY; so make sure we open it O_RDWR instead.
1554 int open_flags = fi->flags;
1555 if (open_flags & O_WRONLY) {
1556 open_flags &= ~O_WRONLY;
1557 open_flags |= O_RDWR;
1558 }
1559
1560 if (open_flags & O_APPEND) {
1561 open_flags &= ~O_APPEND;
1562 }
1563
1564 mode = (mode & (~0777)) | 0664;
1565 int fd = open(child_path.c_str(), open_flags, mode);
1566 if (fd < 0) {
1567 int error_code = errno;
1568 // We've already inserted the file into the MP database before the
1569 // failed open(), so that needs to be rolled back here.
1570 fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
1571 fuse_reply_err(req, error_code);
1572 return;
1573 }
1574
1575 int error_code = 0;
1576 struct fuse_entry_param e;
1577 node* node = make_node_entry(req, parent_node, name, child_path, &e, &error_code);
1578 TRACE_NODE(node, req);
1579 if (!node) {
1580 CHECK(error_code != 0);
1581 fuse_reply_err(req, error_code);
1582 return;
1583 }
1584
1585 // Let MediaProvider know we've created a new file
1586 fuse->mp->OnFileCreated(child_path);
1587
1588 // TODO(b/147274248): Assume there will be no EXIF to redact.
1589 // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
1590 // to the file before all the EXIF content is written. We could special case reads before the
1591 // first close after a file has just been created.
1592 handle* h = create_handle_for_node(fuse, child_path, fd, node, new RedactionInfo());
1593 fi->fh = ptr_to_id(h);
1594 fi->keep_cache = 1;
1595 fi->direct_io = !h->cached;
1596 fuse_reply_create(req, &e, fi);
1597 }
1598 /*
1599 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
1600 struct fuse_file_info* fi, struct flock* lock)
1601 {
1602 cout << "TODO:" << __func__;
1603 }
1604
1605 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
1606 struct fuse_file_info* fi,
1607 struct flock* lock, int sleep)
1608 {
1609 cout << "TODO:" << __func__;
1610 }
1611
1612 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
1613 uint64_t idx)
1614 {
1615 cout << "TODO:" << __func__;
1616 }
1617
1618 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
1619 void* arg, struct fuse_file_info* fi, unsigned flags,
1620 const void* in_buf, size_t in_bufsz, size_t out_bufsz)
1621 {
1622 cout << "TODO:" << __func__;
1623 }
1624
1625 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
1626 struct fuse_pollhandle* ph)
1627 {
1628 cout << "TODO:" << __func__;
1629 }
1630
1631 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
1632 off_t offset, struct fuse_bufvec* bufv)
1633 {
1634 cout << "TODO:" << __func__;
1635 }
1636
1637 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
1638 struct fuse_file_info* fi, int op)
1639 {
1640 cout << "TODO:" << __func__;
1641 }
1642
1643 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
1644 off_t offset, off_t length, struct fuse_file_info* fi)
1645 {
1646 cout << "TODO:" << __func__;
1647 }
1648 */
1649
1650 static struct fuse_lowlevel_ops ops{
1651 .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup, .forget = pf_forget,
1652 .getattr = pf_getattr, .setattr = pf_setattr, .canonical_path = pf_canonical_path,
1653 .mknod = pf_mknod, .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
1654 /*.symlink = pf_symlink,*/
1655 .rename = pf_rename,
1656 /*.link = pf_link,*/
1657 .open = pf_open, .read = pf_read,
1658 /*.write = pf_write,*/
1659 .flush = pf_flush,
1660 .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir, .readdir = pf_readdir,
1661 .releasedir = pf_releasedir, .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
1662 /*.setxattr = pf_setxattr,
1663 .getxattr = pf_getxattr,
1664 .listxattr = pf_listxattr,
1665 .removexattr = pf_removexattr,*/
1666 .access = pf_access, .create = pf_create,
1667 /*.getlk = pf_getlk,
1668 .setlk = pf_setlk,
1669 .bmap = pf_bmap,
1670 .ioctl = pf_ioctl,
1671 .poll = pf_poll,*/
1672 .write_buf = pf_write_buf,
1673 /*.retrieve_reply = pf_retrieve_reply,*/
1674 .forget_multi = pf_forget_multi,
1675 /*.flock = pf_flock,
1676 .fallocate = pf_fallocate,*/
1677 .readdirplus = pf_readdirplus,
1678 /*.copy_file_range = pf_copy_file_range,*/
1679 };
1680
1681 static struct fuse_loop_config config = {
1682 .clone_fd = 1,
1683 .max_idle_threads = 10,
1684 };
1685
1686 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
1687 {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
1688 {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
1689 {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
1690 {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
1691 {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
1692 {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
1693 {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
1694 {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
1695 });
1696
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)1697 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
1698 __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
1699 }
1700
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)1701 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
1702 bool use_fuse = false;
1703
1704 if (active.load(std::memory_order_acquire)) {
1705 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1706 const node* node = node::LookupAbsolutePath(fuse->root, path);
1707 if (node && node->HasCachedHandle()) {
1708 use_fuse = true;
1709 } else {
1710 // If we are unable to set a lock, we should use fuse since we can't track
1711 // when all fd references (including dups) are closed. This can happen when
1712 // we try to set a write lock twice on the same file
1713 use_fuse = set_file_lock(fd, for_read, path);
1714 }
1715 } else {
1716 LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
1717 }
1718
1719 return use_fuse;
1720 }
1721
InvalidateFuseDentryCache(const std::string & path)1722 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
1723 LOG(VERBOSE) << "Invalidating FUSE dentry cache";
1724 if (active.load(std::memory_order_acquire)) {
1725 string name;
1726 fuse_ino_t parent;
1727 fuse_ino_t child;
1728 {
1729 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1730 const node* node = node::LookupAbsolutePath(fuse->root, path);
1731 if (node) {
1732 name = node->GetName();
1733 child = fuse->ToInode(const_cast<class node*>(node));
1734 parent = fuse->ToInode(node->GetParent());
1735 }
1736 }
1737
1738 if (!name.empty()) {
1739 fuse_inval(fuse->se, parent, child, name, path);
1740 }
1741 } else {
1742 LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
1743 }
1744 }
1745
FuseDaemon(JNIEnv * env,jobject mediaProvider)1746 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
1747 active(false), fuse(nullptr) {}
1748
IsStarted() const1749 bool FuseDaemon::IsStarted() const {
1750 return active.load(std::memory_order_acquire);
1751 }
1752
Start(android::base::unique_fd fd,const std::string & path)1753 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path) {
1754 android::base::SetDefaultTag(LOG_TAG);
1755
1756 struct fuse_args args;
1757 struct fuse_cmdline_opts opts;
1758
1759 struct stat stat;
1760
1761 if (lstat(path.c_str(), &stat)) {
1762 PLOG(ERROR) << "ERROR: failed to stat source " << path;
1763 return;
1764 }
1765
1766 if (!S_ISDIR(stat.st_mode)) {
1767 PLOG(ERROR) << "ERROR: source is not a directory";
1768 return;
1769 }
1770
1771 args = FUSE_ARGS_INIT(0, nullptr);
1772 if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
1773 fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
1774 LOG(ERROR) << "ERROR: failed to set options";
1775 return;
1776 }
1777
1778 struct fuse fuse_default(path);
1779 fuse_default.mp = ∓
1780 // fuse_default is stack allocated, but it's safe to save it as an instance variable because
1781 // this method blocks and FuseDaemon#active tells if we are currently blocking
1782 fuse = &fuse_default;
1783
1784 // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
1785 // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
1786 // on each read.
1787 fuse_default.zero_addr = static_cast<char*>(mmap(
1788 NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
1789 if (fuse_default.zero_addr == MAP_FAILED) {
1790 LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
1791 }
1792
1793 // Custom logging for libfuse
1794 if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
1795 fuse_set_log_func(fuse_logger);
1796 }
1797
1798 struct fuse_session
1799 * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
1800 if (!se) {
1801 PLOG(ERROR) << "Failed to create session ";
1802 return;
1803 }
1804 fuse_default.se = se;
1805 fuse_default.active = &active;
1806 se->fd = fd.release(); // libfuse owns the FD now
1807 se->mountpoint = strdup(path.c_str());
1808
1809 // Single thread. Useful for debugging
1810 // fuse_session_loop(se);
1811 // Multi-threaded
1812 LOG(INFO) << "Starting fuse...";
1813 fuse_session_loop_mt(se, &config);
1814 fuse->active->store(false, std::memory_order_release);
1815 LOG(INFO) << "Ending fuse...";
1816
1817 if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
1818 PLOG(ERROR) << "munmap failed!";
1819 }
1820
1821 fuse_opt_free_args(&args);
1822 fuse_session_destroy(se);
1823 LOG(INFO) << "Ended fuse";
1824 return;
1825 }
1826 } //namespace fuse
1827 } // namespace mediaprovider
1828