1 // Copyright (C) 2019 The Android Open Source Project
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #define ATRACE_TAG ATRACE_TAG_APP
16 #define LOG_TAG "FuseDaemon"
17 #define LIBFUSE_LOG_TAG "libfuse"
18
19 #include "FuseDaemon.h"
20
21 #include <android-base/file.h>
22 #include <android-base/logging.h>
23 #include <android-base/properties.h>
24 #include <android-base/strings.h>
25 #include <android/log.h>
26 #include <android/trace.h>
27 #include <ctype.h>
28 #include <dirent.h>
29 #include <errno.h>
30 #include <fcntl.h>
31 #include <fuse_i.h>
32 #include <fuse_kernel.h>
33 #include <fuse_log.h>
34 #include <fuse_lowlevel.h>
35 #include <inttypes.h>
36 #include <limits.h>
37 #include <stdbool.h>
38 #include <stdio.h>
39 #include <stdlib.h>
40 #include <string.h>
41 #include <sys/inotify.h>
42 #include <sys/mman.h>
43 #include <sys/mount.h>
44 #include <sys/param.h>
45 #include <sys/resource.h>
46 #include <sys/stat.h>
47 #include <sys/statfs.h>
48 #include <sys/statvfs.h>
49 #include <sys/time.h>
50 #include <sys/types.h>
51 #include <sys/uio.h>
52 #include <unistd.h>
53
54 #include <iostream>
55 #include <map>
56 #include <mutex>
57 #include <queue>
58 #include <regex>
59 #include <thread>
60 #include <unordered_map>
61 #include <unordered_set>
62 #include <vector>
63
64 #include "BpfSyscallWrappers.h"
65 #include "MediaProviderWrapper.h"
66 #include "leveldb/db.h"
67 #include "libfuse_jni/FuseUtils.h"
68 #include "libfuse_jni/ReaddirHelper.h"
69 #include "libfuse_jni/RedactionInfo.h"
70
71 using mediaprovider::fuse::DirectoryEntry;
72 using mediaprovider::fuse::dirhandle;
73 using mediaprovider::fuse::handle;
74 using mediaprovider::fuse::node;
75 using mediaprovider::fuse::RedactionInfo;
76 using std::string;
77 using std::vector;
78
79 // logging macros to avoid duplication.
80 #define TRACE_NODE(__node, __req) \
81 LOG(VERBOSE) << __FUNCTION__ << " : " << #__node << " = [" << get_name(__node) \
82 << "] (uid=" << (__req)->ctx.uid << ") "
83
84 #define ATRACE_NAME(name) ScopedTrace ___tracer(name)
85 #define ATRACE_CALL() ATRACE_NAME(__FUNCTION__)
86
87 class ScopedTrace {
88 public:
ScopedTrace(const char * name)89 explicit inline ScopedTrace(const char *name) {
90 ATrace_beginSection(name);
91 }
92
~ScopedTrace()93 inline ~ScopedTrace() {
94 ATrace_endSection();
95 }
96 };
97
98 const bool IS_OS_DEBUGABLE = android::base::GetIntProperty("ro.debuggable", 0);
99
100 #define FUSE_UNKNOWN_INO 0xffffffff
101
102 // Stolen from: android_filesystem_config.h
103 #define AID_APP_START 10000
104
105 #define FUSE_MAX_MAX_PAGES 256
106
107 const size_t MAX_READ_SIZE = FUSE_MAX_MAX_PAGES * getpagesize();
108 // Stolen from: UserHandle#getUserId
109 constexpr int PER_USER_RANGE = 100000;
110
111 // Stolen from: UserManagerService
112 constexpr int MAX_USER_ID = UINT32_MAX / PER_USER_RANGE;
113
114 const int MY_UID = getuid();
115 const int MY_USER_ID = MY_UID / PER_USER_RANGE;
116 const std::string MY_USER_ID_STRING(std::to_string(MY_UID / PER_USER_RANGE));
117
118 // Regex copied from FileUtils.java in MediaProvider, but without media directory.
119 const std::regex PATTERN_OWNED_PATH(
120 "^/storage/[^/]+/(?:[0-9]+/)?Android/(?:data|obb)/([^/]+)(/?.*)?",
121 std::regex_constants::icase);
122 const std::regex PATTERN_BPF_BACKING_PATH("^/storage/[^/]+/[0-9]+/Android/(data|obb)$",
123 std::regex_constants::icase);
124
125 static constexpr char TRANSFORM_SYNTHETIC_DIR[] = "synthetic";
126 static constexpr char TRANSFORM_TRANSCODE_DIR[] = "transcode";
127
128 static constexpr char OWNERSHIP_RELATION[] = "ownership";
129
130 static constexpr char FUSE_BPF_PROG_PATH[] = "/sys/fs/bpf/prog_fuseMedia_fuse_media";
131
132 enum class BpfFd { REMOVE = -2 };
133
134 /*
135 * In order to avoid double caching with fuse, call fadvise on the file handles
136 * in the underlying file system. However, if this is done on every read/write,
137 * the fadvises cause a very significant slowdown in tests (specifically fio
138 * seq_write). So call fadvise on the file handles with the most reads/writes
139 * only after a threshold is passed.
140 */
141 class FAdviser {
142 public:
FAdviser()143 FAdviser() : thread_(MessageLoop, this), total_size_(0) {}
144
~FAdviser()145 ~FAdviser() {
146 SendMessage(Message::quit);
147 thread_.join();
148 }
149
Record(int fd,size_t size)150 void Record(int fd, size_t size) { SendMessage(Message::record, fd, size); }
151
Close(int fd)152 void Close(int fd) { SendMessage(Message::close, fd); }
153
154 private:
155 struct Message {
156 enum Type { record, close, quit };
157 Type type;
158 int fd;
159 size_t size;
160 };
161
RecordImpl(int fd,size_t size)162 void RecordImpl(int fd, size_t size) {
163 total_size_ += size;
164
165 // Find or create record in files_
166 // Remove record from sizes_ if it exists, adjusting size appropriately
167 auto file = files_.find(fd);
168 if (file != files_.end()) {
169 auto old_size = file->second;
170 size += old_size->first;
171 sizes_.erase(old_size);
172 } else {
173 file = files_.insert(Files::value_type(fd, sizes_.end())).first;
174 }
175
176 // Now (re) insert record in sizes_
177 auto new_size = sizes_.insert(Sizes::value_type(size, fd));
178 file->second = new_size;
179
180 if (total_size_ < threshold_) return;
181
182 LOG(INFO) << "Threshold exceeded - fadvising " << total_size_;
183 while (!sizes_.empty() && total_size_ > target_) {
184 auto size = --sizes_.end();
185 total_size_ -= size->first;
186 posix_fadvise(size->second, 0, 0, POSIX_FADV_DONTNEED);
187 files_.erase(size->second);
188 sizes_.erase(size);
189 }
190 LOG(INFO) << "Threshold now " << total_size_;
191 }
192
CloseImpl(int fd)193 void CloseImpl(int fd) {
194 auto file = files_.find(fd);
195 if (file == files_.end()) return;
196
197 total_size_ -= file->second->first;
198 sizes_.erase(file->second);
199 files_.erase(file);
200 }
201
MessageLoopImpl()202 void MessageLoopImpl() {
203 while (1) {
204 Message message;
205
206 {
207 std::unique_lock<std::mutex> lock(mutex_);
208 cv_.wait(lock, [this] { return !queue_.empty(); });
209 message = queue_.front();
210 queue_.pop();
211 }
212
213 switch (message.type) {
214 case Message::record:
215 RecordImpl(message.fd, message.size);
216 break;
217
218 case Message::close:
219 CloseImpl(message.fd);
220 break;
221
222 case Message::quit:
223 return;
224 }
225 }
226 }
227
MessageLoop(FAdviser * ptr)228 static int MessageLoop(FAdviser* ptr) {
229 ptr->MessageLoopImpl();
230 return 0;
231 }
232
SendMessage(Message::Type type,int fd=-1,size_t size=0)233 void SendMessage(Message::Type type, int fd = -1, size_t size = 0) {
234 {
235 std::unique_lock<std::mutex> lock(mutex_);
236 Message message = {type, fd, size};
237 queue_.push(message);
238 }
239 cv_.notify_one();
240 }
241
242 std::mutex mutex_;
243 std::condition_variable cv_;
244 std::queue<Message> queue_;
245 std::thread thread_;
246
247 typedef std::multimap<size_t, int> Sizes;
248 typedef std::map<int, Sizes::iterator> Files;
249
250 Files files_;
251 Sizes sizes_;
252 size_t total_size_;
253
254 const size_t threshold_ = 64 * 1024 * 1024;
255 const size_t target_ = 32 * 1024 * 1024;
256 };
257
258 /* Single FUSE mount */
259 struct fuse {
fusefuse260 explicit fuse(const std::string& _path, const ino_t _ino, const bool _uncached_mode,
261 const bool _bpf, android::base::unique_fd&& _bpf_fd,
262 const std::vector<string>& _supported_transcoding_relative_paths,
263 const std::vector<string>& _supported_uncached_relative_paths)
264 : path(_path),
265 tracker(mediaprovider::fuse::NodeTracker(&lock)),
266 root(node::CreateRoot(_path, &lock, _ino, &tracker)),
267 uncached_mode(_uncached_mode),
268 mp(0),
269 zero_addr(0),
270 disable_dentry_cache(false),
271 passthrough(false),
272 upstream_passthrough(false),
273 bpf(_bpf),
274 bpf_fd(std::move(_bpf_fd)),
275 supported_transcoding_relative_paths(_supported_transcoding_relative_paths),
276 supported_uncached_relative_paths(_supported_uncached_relative_paths) {}
277
IsRootfuse278 inline bool IsRoot(const node* node) const { return node == root; }
279
GetEffectiveRootPathfuse280 inline string GetEffectiveRootPath() {
281 if (android::base::StartsWith(path, mediaprovider::fuse::PRIMARY_VOLUME_PREFIX)) {
282 return path + "/" + MY_USER_ID_STRING;
283 }
284 return path;
285 }
286
GetTransformsDirfuse287 inline string GetTransformsDir() { return GetEffectiveRootPath() + "/.transforms"; }
288
289 // Note that these two (FromInode / ToInode) conversion wrappers are required
290 // because fuse_lowlevel_ops documents that the root inode is always one
291 // (see FUSE_ROOT_ID in fuse_lowlevel.h). There are no particular requirements
292 // on any of the other inodes in the FS.
FromInodefuse293 inline node* FromInode(__u64 inode) {
294 if (inode == FUSE_ROOT_ID) {
295 return root;
296 }
297
298 return node::FromInode(inode, &tracker);
299 }
300
FromInodeNoThrowfuse301 inline node* FromInodeNoThrow(__u64 inode) {
302 if (inode == FUSE_ROOT_ID) {
303 return root;
304 }
305
306 return node::FromInodeNoThrow(inode, &tracker);
307 }
308
ToInodefuse309 inline __u64 ToInode(node* node) const {
310 if (IsRoot(node)) {
311 return FUSE_ROOT_ID;
312 }
313
314 return node::ToInode(node);
315 }
316
IsTranscodeSupportedPathfuse317 inline bool IsTranscodeSupportedPath(const string& path) {
318 // Keep in sync with MediaProvider#supportsTranscode
319 if (!android::base::EndsWithIgnoreCase(path, ".mp4")) {
320 return false;
321 }
322
323 const std::string& base_path = GetEffectiveRootPath() + "/";
324 for (const std::string& relative_path : supported_transcoding_relative_paths) {
325 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
326 return true;
327 }
328 }
329
330 return false;
331 }
332
IsUncachedPathfuse333 inline bool IsUncachedPath(const std::string& path) {
334 const std::string base_path = GetEffectiveRootPath() + "/";
335 for (const std::string& relative_path : supported_uncached_relative_paths) {
336 if (android::base::StartsWithIgnoreCase(path, base_path + relative_path)) {
337 return true;
338 }
339 }
340
341 return false;
342 }
343
ShouldNotCachefuse344 inline bool ShouldNotCache(const std::string& path) {
345 if (uncached_mode) {
346 // Cache is disabled for the entire volume.
347 return true;
348 }
349
350 if (supported_uncached_relative_paths.empty()) {
351 // By default there is no supported uncached path. Just return early in this case.
352 return false;
353 }
354
355 if (!android::base::StartsWithIgnoreCase(path, mediaprovider::fuse::PRIMARY_VOLUME_PREFIX)) {
356 // Uncached path config applies only to primary volumes.
357 return false;
358 }
359
360 if (android::base::EndsWith(path, "/")) {
361 return IsUncachedPath(path);
362 } else {
363 // Append a slash at the end to make sure that the exact match is picked up.
364 return IsUncachedPath(path + "/");
365 }
366 }
367
368 std::recursive_mutex lock;
369 const string path;
370 // The Inode tracker associated with this FUSE instance.
371 mediaprovider::fuse::NodeTracker tracker;
372 node* const root;
373 struct fuse_session* se;
374
375 const bool uncached_mode;
376
377 /*
378 * Used to make JNI calls to MediaProvider.
379 * Responsibility of freeing this object falls on corresponding
380 * FuseDaemon object.
381 */
382 mediaprovider::fuse::MediaProviderWrapper* mp;
383
384 /*
385 * Points to a range of zeroized bytes, used by pf_read to represent redacted ranges.
386 * The memory is read only and should never be modified.
387 */
388 /* const */ char* zero_addr;
389
390 FAdviser fadviser;
391
392 std::atomic_bool* active;
393 std::atomic_bool disable_dentry_cache;
394 std::atomic_bool passthrough;
395 std::atomic_bool upstream_passthrough;
396 std::atomic_bool bpf;
397
398 const android::base::unique_fd bpf_fd;
399
400 // FUSE device id.
401 std::atomic_uint dev;
402 const std::vector<string> supported_transcoding_relative_paths;
403 const std::vector<string> supported_uncached_relative_paths;
404
405 // LevelDb Connection Map
406 std::map<std::string, leveldb::DB*> level_db_connection_map;
407 std::recursive_mutex level_db_mutex;
408 };
409
410 struct OpenInfo {
411 int flags;
412 bool for_write;
413 bool direct_io;
414 };
415
416 enum class FuseOp { lookup, readdir, mknod, mkdir, create };
417
get_name(node * n)418 static inline string get_name(node* n) {
419 if (n) {
420 std::string name = IS_OS_DEBUGABLE ? "real_path: " + n->BuildPath() + " " : "";
421 name += "node_path: " + n->BuildSafePath();
422 return name;
423 }
424 return "?";
425 }
426
ptr_to_id(const void * ptr)427 static inline __u64 ptr_to_id(const void* ptr) {
428 return (__u64)(uintptr_t) ptr;
429 }
430
431 /*
432 * Set an F_RDLCK or F_WRLCKK on fd with fcntl(2).
433 *
434 * This is called before the MediaProvider returns fd from the lower file
435 * system to an app over the ContentResolver interface. This allows us
436 * check with is_file_locked if any reference to that fd is still open.
437 */
set_file_lock(int fd,bool for_read,const std::string & path)438 static int set_file_lock(int fd, bool for_read, const std::string& path) {
439 std::string lock_str = (for_read ? "read" : "write");
440
441 struct flock fl{};
442 fl.l_type = for_read ? F_RDLCK : F_WRLCK;
443 fl.l_whence = SEEK_SET;
444
445 int res = fcntl(fd, F_OFD_SETLK, &fl);
446 if (res) {
447 PLOG(WARNING) << "Failed to set lock: " << lock_str;
448 return res;
449 }
450 return res;
451 }
452
453 /*
454 * Check if an F_RDLCK or F_WRLCK is set on fd with fcntl(2).
455 *
456 * This is used to determine if the MediaProvider has given an fd to the lower fs to an app over
457 * the ContentResolver interface. Before that happens, we always call set_file_lock on the file
458 * allowing us to know if any reference to that fd is still open here.
459 *
460 * Returns true if fd may have a lock, false otherwise
461 */
is_file_locked(int fd,const std::string & path)462 static bool is_file_locked(int fd, const std::string& path) {
463 struct flock fl{};
464 fl.l_type = F_WRLCK;
465 fl.l_whence = SEEK_SET;
466
467 int res = fcntl(fd, F_OFD_GETLK, &fl);
468 if (res) {
469 PLOG(WARNING) << "Failed to check lock";
470 // Assume worst
471 return true;
472 }
473 bool locked = fl.l_type != F_UNLCK;
474 return locked;
475 }
476
get_fuse(fuse_req_t req)477 static struct fuse* get_fuse(fuse_req_t req) {
478 return reinterpret_cast<struct fuse*>(fuse_req_userdata(req));
479 }
480
is_package_owned_path(const string & path,const string & fuse_path)481 static bool is_package_owned_path(const string& path, const string& fuse_path) {
482 if (path.rfind(fuse_path, 0) != 0) {
483 return false;
484 }
485 return std::regex_match(path, PATTERN_OWNED_PATH);
486 }
487
is_bpf_backing_path(const string & path)488 static bool is_bpf_backing_path(const string& path) {
489 return std::regex_match(path, PATTERN_BPF_BACKING_PATH);
490 }
491
492 // See fuse_lowlevel.h fuse_lowlevel_notify_inval_entry for how to call this safetly without
493 // deadlocking the kernel
fuse_inval(fuse_session * se,fuse_ino_t parent_ino,fuse_ino_t child_ino,const string & child_name,const string & path)494 static void fuse_inval(fuse_session* se, fuse_ino_t parent_ino, fuse_ino_t child_ino,
495 const string& child_name, const string& path) {
496 if (mediaprovider::fuse::containsMount(path)) {
497 LOG(WARNING) << "Ignoring attempt to invalidate dentry for FUSE mounts";
498 return;
499 }
500
501 if (fuse_lowlevel_notify_inval_entry(se, parent_ino, child_name.c_str(), child_name.size())) {
502 // Invalidating the dentry can fail if there's no dcache entry, however, there may still
503 // be cached attributes, so attempt to invalidate those by invalidating the inode
504 fuse_lowlevel_notify_inval_inode(se, child_ino, 0, 0);
505 }
506 }
507
get_entry_timeout(const string & path,bool should_inval,struct fuse * fuse)508 static double get_entry_timeout(const string& path, bool should_inval, struct fuse* fuse) {
509 if (fuse->disable_dentry_cache || should_inval || is_package_owned_path(path, fuse->path) ||
510 fuse->ShouldNotCache(path)) {
511 // We set dentry timeout to 0 for the following reasons:
512 // 1. The dentry cache was completely disabled for the entire volume.
513 // 2.1 Case-insensitive lookups need to invalidate other case-insensitive dentry matches
514 // 2.2 Nodes supporting transforms need to be invalidated, so that subsequent lookups by a
515 // uid requiring a transform is guaranteed to come to the FUSE daemon.
516 // 3. With app data isolation enabled, app A should not guess existence of app B from the
517 // Android/{data,obb}/<package> paths, hence we prevent the kernel from caching that
518 // information.
519 // 4. The dentry cache was completely disabled for the given path.
520 return 0;
521 }
522 return std::numeric_limits<double>::max();
523 }
524
get_path(node * node)525 static std::string get_path(node* node) {
526 const string& io_path = node->GetIoPath();
527 return io_path.empty() ? node->BuildPath() : io_path;
528 }
529
530 // Returns true if the path resides under .transforms/synthetic.
531 // NOTE: currently only file paths corresponding to redacted URIs reside under this folder. The path
532 // itself never exists and just a link for transformation.
is_synthetic_path(const string & path,struct fuse * fuse)533 static inline bool is_synthetic_path(const string& path, struct fuse* fuse) {
534 return android::base::StartsWithIgnoreCase(
535 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR);
536 }
537
is_transforms_dir_path(const string & path,struct fuse * fuse)538 static inline bool is_transforms_dir_path(const string& path, struct fuse* fuse) {
539 return android::base::StartsWithIgnoreCase(path, fuse->GetTransformsDir());
540 }
541
validate_node_path(const std::string & path,const std::string & name,fuse_req_t req,int * error_code,struct fuse_entry_param * e,const FuseOp op)542 static std::unique_ptr<mediaprovider::fuse::FileLookupResult> validate_node_path(
543 const std::string& path, const std::string& name, fuse_req_t req, int* error_code,
544 struct fuse_entry_param* e, const FuseOp op) {
545 struct fuse* fuse = get_fuse(req);
546 const struct fuse_ctx* ctx = fuse_req_ctx(req);
547 memset(e, 0, sizeof(*e));
548
549 const bool synthetic_path = is_synthetic_path(path, fuse);
550 if (lstat(path.c_str(), &e->attr) < 0 && !(op == FuseOp::lookup && synthetic_path)) {
551 *error_code = errno;
552 return nullptr;
553 }
554
555 if (is_transforms_dir_path(path, fuse)) {
556 if (op == FuseOp::lookup) {
557 // Lookups are only allowed under .transforms/synthetic dir
558 if (!(android::base::EqualsIgnoreCase(path, fuse->GetTransformsDir()) ||
559 android::base::StartsWithIgnoreCase(
560 path, fuse->GetTransformsDir() + "/" + TRANSFORM_SYNTHETIC_DIR))) {
561 *error_code = ENONET;
562 return nullptr;
563 }
564 } else {
565 // user-code is only allowed to make lookups under .transforms dir, and that too only
566 // under .transforms/synthetic dir
567 *error_code = ENOENT;
568 return nullptr;
569 }
570 }
571
572 if (S_ISDIR(e->attr.st_mode)) {
573 // now that we have reached this point, ops on directories are safe and require no
574 // transformation.
575 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
576 }
577
578 if (!synthetic_path && !fuse->IsTranscodeSupportedPath(path)) {
579 // Transforms are only supported for synthetic or transcode-supported paths
580 return std::make_unique<mediaprovider::fuse::FileLookupResult>(0, 0, 0, true, false, "");
581 }
582
583 // Handle potential file transforms
584 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
585 fuse->mp->FileLookup(path, req->ctx.uid, req->ctx.pid);
586
587 if (!file_lookup_result) {
588 // Fail lookup if we can't fetch FileLookupResult for path
589 LOG(WARNING) << "Failed to fetch FileLookupResult for " << path;
590 *error_code = EFAULT;
591 return nullptr;
592 }
593
594 const string& io_path = file_lookup_result->io_path;
595 // Update size with io_path iff there's an io_path
596 if (!io_path.empty() && (lstat(io_path.c_str(), &e->attr) < 0)) {
597 *error_code = errno;
598 return nullptr;
599 }
600
601 return file_lookup_result;
602 }
603
make_node_entry(fuse_req_t req,node * parent,const string & name,const string & parent_path,const string & path,struct fuse_entry_param * e,int * error_code,const FuseOp op)604 static node* make_node_entry(fuse_req_t req, node* parent, const string& name,
605 const string& parent_path, const string& path,
606 struct fuse_entry_param* e, int* error_code, const FuseOp op) {
607 struct fuse* fuse = get_fuse(req);
608 const struct fuse_ctx* ctx = fuse_req_ctx(req);
609 node* node;
610
611 memset(e, 0, sizeof(*e));
612
613 std::unique_ptr<mediaprovider::fuse::FileLookupResult> file_lookup_result =
614 validate_node_path(path, name, req, error_code, e, op);
615 if (!file_lookup_result) {
616 // Fail lookup if we can't validate |path, |errno| would have already been set
617 return nullptr;
618 }
619
620 bool should_invalidate = file_lookup_result->transforms_supported;
621 const bool transforms_complete = file_lookup_result->transforms_complete;
622 const int transforms = file_lookup_result->transforms;
623 const int transforms_reason = file_lookup_result->transforms_reason;
624 const string& io_path = file_lookup_result->io_path;
625 if (transforms) {
626 // If the node requires transforms, we MUST never cache it in the VFS
627 CHECK(should_invalidate);
628 }
629
630 node = parent->LookupChildByName(name, true /* acquire */, transforms);
631 if (!node) {
632 ino_t ino = e->attr.st_ino;
633 node = ::node::Create(parent, name, io_path, transforms_complete, transforms,
634 transforms_reason, &fuse->lock, ino, &fuse->tracker);
635 } else if (!mediaprovider::fuse::containsMount(path)) {
636 // Only invalidate a path if it does not contain mount and |name| != node->GetName.
637 // Invalidate both names to ensure there's no dentry left in the kernel after the following
638 // operations:
639 // 1) touch foo, touch FOO, unlink *foo*
640 // 2) touch foo, touch FOO, unlink *FOO*
641 // Invalidating lookup_name fixes (1) and invalidating node_name fixes (2)
642 // -Set |should_invalidate| to true to invalidate lookup_name by using 0 timeout below
643 // -Explicitly invalidate node_name. Note that we invalidate async otherwise we will
644 // deadlock the kernel
645 if (name != node->GetName()) {
646 // Force node invalidation to fix the kernel dentry cache for case (1) above
647 should_invalidate = true;
648 // Make copies of the node name and path so we're not attempting to acquire
649 // any node locks from the invalidation thread. Depending on timing, we may end
650 // up invalidating the wrong inode but that shouldn't result in correctness issues.
651 const fuse_ino_t parent_ino = fuse->ToInode(parent);
652 const fuse_ino_t child_ino = fuse->ToInode(node);
653 const std::string& node_name = node->GetName();
654 std::thread t([=]() { fuse_inval(fuse->se, parent_ino, child_ino, node_name, path); });
655 t.detach();
656 // Update the name after |node_name| reference above has been captured in lambda
657 // This avoids invalidating the node again on subsequent accesses with |name|
658 node->SetName(name);
659 }
660
661 // This updated value allows us correctly decide if to keep_cache and use direct_io during
662 // FUSE_OPEN. Between the last lookup and this lookup, we might have deleted a cached
663 // transcoded file on the lower fs. A subsequent transcode at FUSE_READ should ensure we
664 // don't reuse any stale transcode page cache content.
665 node->SetTransformsComplete(transforms_complete);
666 }
667 TRACE_NODE(node, req);
668
669 if (should_invalidate && fuse->IsTranscodeSupportedPath(path)) {
670 // Some components like the MTP stack need an efficient mechanism to determine if a file
671 // supports transcoding. This allows them workaround an issue with MTP clients on windows
672 // where those clients incorrectly use the original file size instead of the transcoded file
673 // size to copy files from the device. This size misuse causes transcoded files to be
674 // truncated to the original file size, hence corrupting the transcoded file.
675 //
676 // We expose the transcode bit via the st_nlink stat field. This should be safe because the
677 // field is not supported on FAT filesystems which FUSE is emulating.
678 // WARNING: Apps should never rely on this behavior as it is NOT supported API and will be
679 // removed in a future release when the MTP stack has better support for transcoded files on
680 // Windows OS.
681 e->attr.st_nlink = 2;
682 }
683
684 // This FS is not being exported via NFS so just a fixed generation number
685 // for now. If we do need this, we need to increment the generation ID each
686 // time the fuse daemon restarts because that's what it takes for us to
687 // reuse inode numbers.
688 e->generation = 0;
689 e->ino = fuse->ToInode(node);
690
691 // When FUSE BPF is used, the caching of node attributes and lookups is
692 // disabled to avoid possible inconsistencies between the FUSE cache and
693 // the lower file system state.
694 // With FUSE BPF the file system requests are forwarded to the lower file
695 // system bypassing the FUSE daemon, so dropping the caching does not
696 // introduce a performance regression.
697 // Currently FUSE BPF is limited to the Android/data and Android/obb
698 // directories.
699 if (!fuse->bpf || !is_bpf_backing_path(parent_path)) {
700 e->entry_timeout = get_entry_timeout(path, should_invalidate, fuse);
701 e->attr_timeout = fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max();
702 }
703 return node;
704 }
705
706 namespace mediaprovider {
707 namespace fuse {
708
709 /**
710 * Function implementations
711 *
712 * These implement the various functions in fuse_lowlevel_ops
713 *
714 */
715
pf_init(void * userdata,struct fuse_conn_info * conn)716 static void pf_init(void* userdata, struct fuse_conn_info* conn) {
717 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
718
719 // Check the same property as android.os.Build.IS_ARC.
720 const bool is_arc = android::base::GetBoolProperty("ro.boot.container", false);
721
722 // We don't want a getattr request with every read request
723 conn->want &= ~FUSE_CAP_AUTO_INVAL_DATA & ~FUSE_CAP_READDIRPLUS_AUTO;
724 uint64_t mask = (FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE | FUSE_CAP_SPLICE_READ |
725 FUSE_CAP_ASYNC_READ | FUSE_CAP_ATOMIC_O_TRUNC | FUSE_CAP_WRITEBACK_CACHE |
726 FUSE_CAP_EXPORT_SUPPORT | FUSE_CAP_FLOCK_LOCKS);
727 // Disable writeback cache if it's uncached mode or if it's ARC. In ARC, due to the Downloads
728 // bind-mount, we need to disable it on the primary emulated volume as well as on StubVolumes.
729 if (fuse->uncached_mode || is_arc) {
730 mask &= ~FUSE_CAP_WRITEBACK_CACHE;
731 }
732
733 bool disable_splice_write = false;
734 if (fuse->passthrough) {
735 if (conn->capable & FUSE_CAP_PASSTHROUGH) {
736 mask |= FUSE_CAP_PASSTHROUGH;
737
738 // SPLICE_WRITE seems to cause linux kernel cache corruption with passthrough enabled.
739 // It is still under investigation but while running
740 // ScopedStorageDeviceTest#testAccessMediaLocationInvalidation, we notice test flakes
741 // of about 1/20 for the following reason:
742 // 1. App without ACCESS_MEDIA_LOCATION permission reads redacted bytes via FUSE cache
743 // 2. App with ACCESS_MEDIA_LOCATION permission reads non-redacted bytes via passthrough
744 // cache
745 // (2) fails because bytes from (1) sneak into the passthrough cache??
746 // To workaround, we disable splice for write when passthrough is enabled.
747 // This shouldn't have any performance regression if comparing passthrough devices to
748 // no-passthrough devices for the following reasons:
749 // 1. No-op for no-passthrough devices
750 // 2. Passthrough devices
751 // a. Files not requiring redaction use passthrough which bypasses FUSE_READ entirely
752 // b. Files requiring redaction are still faster than no-passthrough devices that use
753 // direct_io
754 disable_splice_write = true;
755 } else if (conn->capable & FUSE_CAP_PASSTHROUGH_UPSTREAM) {
756 mask |= FUSE_CAP_PASSTHROUGH_UPSTREAM;
757 disable_splice_write = true;
758 fuse->upstream_passthrough = true;
759 } else {
760 LOG(WARNING) << "Passthrough feature not supported by the kernel";
761 fuse->passthrough = false;
762 }
763 }
764
765 conn->want |= conn->capable & mask;
766 if (disable_splice_write) {
767 conn->want &= ~FUSE_CAP_SPLICE_WRITE;
768 }
769
770 conn->max_read = MAX_READ_SIZE;
771
772 fuse->active->store(true, std::memory_order_release);
773 }
774
removeInstance(struct fuse * fuse,std::string instance_name)775 static void removeInstance(struct fuse* fuse, std::string instance_name) {
776 if (fuse->level_db_connection_map.find(instance_name) != fuse->level_db_connection_map.end()) {
777 delete fuse->level_db_connection_map[instance_name];
778 (fuse->level_db_connection_map).erase(instance_name);
779 LOG(INFO) << "Removed leveldb connection for " << instance_name;
780 }
781 }
782
removeLevelDbConnection(struct fuse * fuse)783 static void removeLevelDbConnection(struct fuse* fuse) {
784 fuse->level_db_mutex.lock();
785 if (android::base::StartsWith(fuse->path, PRIMARY_VOLUME_PREFIX)) {
786 removeInstance(fuse, VOLUME_INTERNAL);
787 removeInstance(fuse, OWNERSHIP_RELATION);
788 removeInstance(fuse, VOLUME_EXTERNAL_PRIMARY);
789 } else {
790 // Return "C58E-1702" from the path like "/storage/C58E-1702"
791 std::string volume_name = (fuse->path).substr(9);
792 // Convert to lowercase
793 std::transform(volume_name.begin(), volume_name.end(), volume_name.begin(), ::tolower);
794 removeInstance(fuse, volume_name);
795 }
796 fuse->level_db_mutex.unlock();
797 }
798
pf_destroy(void * userdata)799 static void pf_destroy(void* userdata) {
800 struct fuse* fuse = reinterpret_cast<struct fuse*>(userdata);
801 removeLevelDbConnection(fuse);
802 LOG(INFO) << "DESTROY " << fuse->path;
803
804 node::DeleteTree(fuse->root);
805 }
806
807 // Return true if the path is accessible for that uid.
is_app_accessible_path(struct fuse * fuse,const string & path,uid_t uid)808 static bool is_app_accessible_path(struct fuse* fuse, const string& path, uid_t uid) {
809 MediaProviderWrapper* mp = fuse->mp;
810
811 if (uid < AID_APP_START || uid == MY_UID) {
812 return true;
813 }
814
815 if (path == PRIMARY_VOLUME_PREFIX) {
816 // Apps should never refer to /storage/emulated - they should be using the user-spcific
817 // subdirs, eg /storage/emulated/0
818 return false;
819 }
820
821 std::smatch match;
822 if (std::regex_match(path, match, PATTERN_OWNED_PATH)) {
823 const std::string& pkg = match[1];
824 // .nomedia is not a valid package. .nomedia always exists in /Android/data directory,
825 // and it's not an external file/directory of any package
826 if (pkg == ".nomedia") {
827 return true;
828 }
829 if (!fuse->bpf && android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
830 // Emulated storage bind-mounts app-private data directories, and so these
831 // should not be accessible through FUSE anyway.
832 LOG(WARNING) << "Rejected access to app-private dir on FUSE: " << path
833 << " from uid: " << uid;
834 return false;
835 }
836 if (!mp->isUidAllowedAccessToDataOrObbPath(uid, path)) {
837 PLOG(WARNING) << "Invalid other package file access from " << uid << "(: " << path;
838 return false;
839 }
840 }
841 return true;
842 }
843
fuse_bpf_fill_entries(const string & path,const int bpf_fd,struct fuse_entry_param * e,int & backing_fd)844 void fuse_bpf_fill_entries(const string& path, const int bpf_fd, struct fuse_entry_param* e,
845 int& backing_fd) {
846 /*
847 * The file descriptor `backing_fd` must not be closed as it is closed
848 * automatically by the kernel as soon as it consumes the FUSE reply. This
849 * mechanism is necessary because userspace doesn't know when the kernel
850 * will consume the FUSE response containing `backing_fd`, thus it may close
851 * the `backing_fd` too soon, with the risk of assigning a backing file
852 * which is either invalid or corresponds to the wrong file in the lower
853 * file system.
854 */
855 backing_fd = open(path.c_str(), O_CLOEXEC | O_DIRECTORY | O_RDONLY);
856 if (backing_fd < 0) {
857 PLOG(ERROR) << "Failed to open: " << path;
858 return;
859 }
860
861 e->backing_action = FUSE_ACTION_REPLACE;
862 e->backing_fd = backing_fd;
863
864 if (bpf_fd >= 0) {
865 e->bpf_action = FUSE_ACTION_REPLACE;
866 e->bpf_fd = bpf_fd;
867 } else if (bpf_fd == static_cast<int>(BpfFd::REMOVE)) {
868 e->bpf_action = FUSE_ACTION_REMOVE;
869 } else {
870 e->bpf_action = FUSE_ACTION_KEEP;
871 }
872 }
873
fuse_bpf_install(struct fuse * fuse,struct fuse_entry_param * e,const string & child_path,int & backing_fd)874 void fuse_bpf_install(struct fuse* fuse, struct fuse_entry_param* e, const string& child_path,
875 int& backing_fd) {
876 // TODO(b/211873756) Enable only for the primary volume. Must be
877 // extended for other media devices.
878 if (android::base::StartsWith(child_path, PRIMARY_VOLUME_PREFIX)) {
879 if (is_bpf_backing_path(child_path)) {
880 fuse_bpf_fill_entries(child_path, fuse->bpf_fd.get(), e, backing_fd);
881 } else if (is_package_owned_path(child_path, fuse->path)) {
882 fuse_bpf_fill_entries(child_path, static_cast<int>(BpfFd::REMOVE), e, backing_fd);
883 }
884 }
885 }
886
887 static std::regex storage_emulated_regex("^\\/storage\\/emulated\\/([0-9]+)");
888
is_user_accessible_path(fuse_req_t req,const struct fuse * fuse,const string & path)889 static bool is_user_accessible_path(fuse_req_t req, const struct fuse* fuse, const string& path) {
890 std::smatch match;
891 std::regex_search(path, match, storage_emulated_regex);
892
893 // Ensure the FuseDaemon user id matches the user id or cross-user lookups are allowed in
894 // requested path
895 if (match.size() == 2 && std::to_string(getuid() / PER_USER_RANGE) != match[1].str()) {
896 // If user id mismatch, check cross-user lookups
897 long userId = strtol(match[1].str().c_str(), nullptr, 10);
898 if (userId < 0 || userId > MAX_USER_ID ||
899 !fuse->mp->ShouldAllowLookup(req->ctx.uid, userId)) {
900 return false;
901 }
902 }
903 return true;
904 }
905
do_lookup(fuse_req_t req,fuse_ino_t parent,const char * name,struct fuse_entry_param * e,int * error_code,const FuseOp op,const bool validate_access,int * backing_fd=NULL)906 static node* do_lookup(fuse_req_t req, fuse_ino_t parent, const char* name,
907 struct fuse_entry_param* e, int* error_code, const FuseOp op,
908 const bool validate_access, int* backing_fd = NULL) {
909 struct fuse* fuse = get_fuse(req);
910 node* parent_node = fuse->FromInode(parent);
911 if (!parent_node) {
912 *error_code = ENOENT;
913 return nullptr;
914 }
915 string parent_path = parent_node->BuildPath();
916
917 // We should always allow lookups on the root, because failing them could cause
918 // bind mounts to be invalidated.
919 if (validate_access && !fuse->IsRoot(parent_node) &&
920 !is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
921 *error_code = ENOENT;
922 return nullptr;
923 }
924
925 TRACE_NODE(parent_node, req);
926
927 const string child_path = parent_path + "/" + name;
928
929 if (validate_access && !is_user_accessible_path(req, fuse, child_path)) {
930 *error_code = EACCES;
931 return nullptr;
932 }
933
934 auto node = make_node_entry(req, parent_node, name, parent_path, child_path, e, error_code, op);
935
936 if (fuse->bpf) {
937 if (op == FuseOp::lookup) {
938 // Only direct lookup calls support setting backing_fd and bpf program
939 fuse_bpf_install(fuse, e, child_path, *backing_fd);
940 } else if (is_bpf_backing_path(child_path) && op == FuseOp::readdir) {
941 // Fuse-bpf driver implementation doesn’t support providing backing_fd
942 // and bpf program as a part of readdirplus lookup. So we make sure
943 // here we're not making any lookups on backed files because we want
944 // to receive separate lookup calls for them later to set backing_fd and bpf.
945 e->ino = 0;
946 }
947 }
948
949 return node;
950 }
951
pf_lookup(fuse_req_t req,fuse_ino_t parent,const char * name)952 static void pf_lookup(fuse_req_t req, fuse_ino_t parent, const char* name) {
953 ATRACE_CALL();
954 struct fuse_entry_param e;
955 int backing_fd = -1;
956
957 int error_code = 0;
958 if (do_lookup(req, parent, name, &e, &error_code, FuseOp::lookup, true, &backing_fd)) {
959 fuse_reply_entry(req, &e);
960 } else {
961 CHECK(error_code != 0);
962 fuse_reply_err(req, error_code);
963 }
964
965 if (backing_fd != -1) close(backing_fd);
966 }
967
pf_lookup_postfilter(fuse_req_t req,fuse_ino_t parent,uint32_t error_in,const char * name,struct fuse_entry_out * feo,struct fuse_entry_bpf_out * febo)968 static void pf_lookup_postfilter(fuse_req_t req, fuse_ino_t parent, uint32_t error_in,
969 const char* name, struct fuse_entry_out* feo,
970 struct fuse_entry_bpf_out* febo) {
971 struct fuse* fuse = get_fuse(req);
972
973 ATRACE_CALL();
974 node* parent_node = fuse->FromInode(parent);
975 if (!parent_node) {
976 fuse_reply_err(req, ENOENT);
977 return;
978 }
979
980 TRACE_NODE(parent_node, req);
981 const string path = parent_node->BuildPath() + "/" + name;
982 if (strcmp(name, ".nomedia") != 0 &&
983 !fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, path)) {
984 fuse_reply_err(req, ENOENT);
985 return;
986 }
987
988 struct {
989 struct fuse_entry_out feo;
990 struct fuse_entry_bpf_out febo;
991 } buf = {*feo, *febo};
992
993 fuse_reply_buf(req, (const char*)&buf, sizeof(buf));
994 }
995
do_forget(fuse_req_t req,struct fuse * fuse,fuse_ino_t ino,uint64_t nlookup)996 static void do_forget(fuse_req_t req, struct fuse* fuse, fuse_ino_t ino, uint64_t nlookup) {
997 node* node = fuse->FromInode(ino);
998 TRACE_NODE(node, req);
999 if (node) {
1000 // This is a narrowing conversion from an unsigned 64bit to a 32bit value. For
1001 // some reason we only keep 32 bit refcounts but the kernel issues
1002 // forget requests with a 64 bit counter.
1003 int backing_id = node->GetBackingId();
1004 if (node->Release(static_cast<uint32_t>(nlookup))) {
1005 if (backing_id) fuse_passthrough_close(req, backing_id);
1006 }
1007 }
1008 }
1009
pf_forget(fuse_req_t req,fuse_ino_t ino,uint64_t nlookup)1010 static void pf_forget(fuse_req_t req, fuse_ino_t ino, uint64_t nlookup) {
1011 // Always allow to forget so no need to check is_app_accessible_path()
1012 ATRACE_CALL();
1013 node* node;
1014 struct fuse* fuse = get_fuse(req);
1015
1016 do_forget(req, fuse, ino, nlookup);
1017 fuse_reply_none(req);
1018 }
1019
pf_forget_multi(fuse_req_t req,size_t count,struct fuse_forget_data * forgets)1020 static void pf_forget_multi(fuse_req_t req,
1021 size_t count,
1022 struct fuse_forget_data* forgets) {
1023 ATRACE_CALL();
1024 struct fuse* fuse = get_fuse(req);
1025
1026 for (int i = 0; i < count; i++) {
1027 do_forget(req, fuse, forgets[i].ino, forgets[i].nlookup);
1028 }
1029 fuse_reply_none(req);
1030 }
1031
pf_fallocate(fuse_req_t req,fuse_ino_t ino,int mode,off_t offset,off_t length,fuse_file_info * fi)1032 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode, off_t offset, off_t length,
1033 fuse_file_info* fi) {
1034 ATRACE_CALL();
1035 struct fuse* fuse = get_fuse(req);
1036
1037 handle* h = reinterpret_cast<handle*>(fi->fh);
1038 auto err = fallocate(h->fd, mode, offset, length);
1039 fuse_reply_err(req, err ? errno : 0);
1040 }
1041
pf_getattr(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1042 static void pf_getattr(fuse_req_t req,
1043 fuse_ino_t ino,
1044 struct fuse_file_info* fi) {
1045 ATRACE_CALL();
1046 struct fuse* fuse = get_fuse(req);
1047 node* node = fuse->FromInode(ino);
1048 if (!node) {
1049 fuse_reply_err(req, ENOENT);
1050 return;
1051 }
1052 const string& path = get_path(node);
1053 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1054 fuse_reply_err(req, ENOENT);
1055 return;
1056 }
1057 TRACE_NODE(node, req);
1058
1059 struct stat s;
1060 memset(&s, 0, sizeof(s));
1061 if (lstat(path.c_str(), &s) < 0) {
1062 fuse_reply_err(req, errno);
1063 } else {
1064 fuse_reply_attr(req, &s,
1065 fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1066 }
1067 }
1068
pf_setattr(fuse_req_t req,fuse_ino_t ino,struct stat * attr,int to_set,struct fuse_file_info * fi)1069 static void pf_setattr(fuse_req_t req,
1070 fuse_ino_t ino,
1071 struct stat* attr,
1072 int to_set,
1073 struct fuse_file_info* fi) {
1074 ATRACE_CALL();
1075 struct fuse* fuse = get_fuse(req);
1076 node* node = fuse->FromInode(ino);
1077 if (!node) {
1078 fuse_reply_err(req, ENOENT);
1079 return;
1080 }
1081 const string& path = get_path(node);
1082 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1083 fuse_reply_err(req, ENOENT);
1084 return;
1085 }
1086
1087 int fd = -1;
1088 if (fi) {
1089 // If we have a file_info, setattr was called with an fd so use the fd instead of path
1090 handle* h = reinterpret_cast<handle*>(fi->fh);
1091 fd = h->fd;
1092 } else {
1093 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1094 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1095 path, path, ctx->uid, ctx->pid, node->GetTransformsReason(), true /* for_write */,
1096 false /* redact */, false /* log_transforms_metrics */);
1097
1098 if (!result) {
1099 fuse_reply_err(req, EFAULT);
1100 return;
1101 }
1102
1103 if (result->status) {
1104 fuse_reply_err(req, EACCES);
1105 return;
1106 }
1107 }
1108 struct timespec times[2];
1109 TRACE_NODE(node, req);
1110
1111 /* XXX: incomplete implementation on purpose.
1112 * chmod/chown should NEVER be implemented.*/
1113
1114 if ((to_set & FUSE_SET_ATTR_SIZE)) {
1115 int res = 0;
1116 if (fd == -1) {
1117 res = truncate64(path.c_str(), attr->st_size);
1118 } else {
1119 res = ftruncate64(fd, attr->st_size);
1120 }
1121
1122 if (res < 0) {
1123 fuse_reply_err(req, errno);
1124 return;
1125 }
1126 }
1127
1128 /* Handle changing atime and mtime. If FATTR_ATIME_and FATTR_ATIME_NOW
1129 * are both set, then set it to the current time. Else, set it to the
1130 * time specified in the request. Same goes for mtime. Use utimensat(2)
1131 * as it allows ATIME and MTIME to be changed independently, and has
1132 * nanosecond resolution which fuse also has.
1133 */
1134 if (to_set & (FATTR_ATIME | FATTR_MTIME)) {
1135 times[0].tv_nsec = UTIME_OMIT;
1136 times[1].tv_nsec = UTIME_OMIT;
1137 if (to_set & FATTR_ATIME) {
1138 if (to_set & FATTR_ATIME_NOW) {
1139 times[0].tv_nsec = UTIME_NOW;
1140 } else {
1141 times[0] = attr->st_atim;
1142 }
1143 }
1144
1145 if (to_set & FATTR_MTIME) {
1146 if (to_set & FATTR_MTIME_NOW) {
1147 times[1].tv_nsec = UTIME_NOW;
1148 } else {
1149 times[1] = attr->st_mtim;
1150 }
1151 }
1152
1153 TRACE_NODE(node, req);
1154 int res = 0;
1155 if (fd == -1) {
1156 res = utimensat(-1, path.c_str(), times, 0);
1157 } else {
1158 res = futimens(fd, times);
1159 }
1160
1161 if (res < 0) {
1162 fuse_reply_err(req, errno);
1163 return;
1164 }
1165 }
1166
1167 lstat(path.c_str(), attr);
1168 fuse_reply_attr(req, attr, fuse->ShouldNotCache(path) ? 0 : std::numeric_limits<double>::max());
1169 }
1170
pf_canonical_path(fuse_req_t req,fuse_ino_t ino)1171 static void pf_canonical_path(fuse_req_t req, fuse_ino_t ino)
1172 {
1173 struct fuse* fuse = get_fuse(req);
1174 node* node = fuse->FromInode(ino);
1175 const string& path = node ? get_path(node) : "";
1176
1177 if (node && is_app_accessible_path(fuse, path, req->ctx.uid)) {
1178 // TODO(b/147482155): Check that uid has access to |path| and its contents
1179 fuse_reply_canonical_path(req, path.c_str());
1180 return;
1181 }
1182 fuse_reply_err(req, ENOENT);
1183 }
1184
pf_mknod(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,dev_t rdev)1185 static void pf_mknod(fuse_req_t req,
1186 fuse_ino_t parent,
1187 const char* name,
1188 mode_t mode,
1189 dev_t rdev) {
1190 ATRACE_CALL();
1191 struct fuse* fuse = get_fuse(req);
1192 node* parent_node = fuse->FromInode(parent);
1193 if (!parent_node) {
1194 fuse_reply_err(req, ENOENT);
1195 return;
1196 }
1197 string parent_path = parent_node->BuildPath();
1198 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1199 fuse_reply_err(req, ENOENT);
1200 return;
1201 }
1202
1203 TRACE_NODE(parent_node, req);
1204
1205 const string child_path = parent_path + "/" + name;
1206
1207 mode = (mode & (~0777)) | 0664;
1208 if (mknod(child_path.c_str(), mode, rdev) < 0) {
1209 fuse_reply_err(req, errno);
1210 return;
1211 }
1212
1213 int error_code = 0;
1214 struct fuse_entry_param e;
1215 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1216 FuseOp::mknod)) {
1217 fuse_reply_entry(req, &e);
1218 } else {
1219 CHECK(error_code != 0);
1220 fuse_reply_err(req, error_code);
1221 }
1222 }
1223
pf_mkdir(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode)1224 static void pf_mkdir(fuse_req_t req,
1225 fuse_ino_t parent,
1226 const char* name,
1227 mode_t mode) {
1228 ATRACE_CALL();
1229 struct fuse* fuse = get_fuse(req);
1230 node* parent_node = fuse->FromInode(parent);
1231 if (!parent_node) {
1232 fuse_reply_err(req, ENOENT);
1233 return;
1234 }
1235 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1236 const string parent_path = parent_node->BuildPath();
1237 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1238 fuse_reply_err(req, ENOENT);
1239 return;
1240 }
1241
1242 TRACE_NODE(parent_node, req);
1243
1244 const string child_path = parent_path + "/" + name;
1245
1246 int status = fuse->mp->IsCreatingDirAllowed(child_path, ctx->uid);
1247 if (status) {
1248 fuse_reply_err(req, status);
1249 return;
1250 }
1251
1252 mode = (mode & (~0777)) | 0775;
1253 if (mkdir(child_path.c_str(), mode) < 0) {
1254 fuse_reply_err(req, errno);
1255 return;
1256 }
1257
1258 int error_code = 0;
1259 struct fuse_entry_param e;
1260 if (make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
1261 FuseOp::mkdir)) {
1262 fuse_reply_entry(req, &e);
1263 } else {
1264 CHECK(error_code != 0);
1265 fuse_reply_err(req, error_code);
1266 }
1267 }
1268
pf_unlink(fuse_req_t req,fuse_ino_t parent,const char * name)1269 static void pf_unlink(fuse_req_t req, fuse_ino_t parent, const char* name) {
1270 ATRACE_CALL();
1271 struct fuse* fuse = get_fuse(req);
1272 node* parent_node = fuse->FromInode(parent);
1273 if (!parent_node) {
1274 fuse_reply_err(req, ENOENT);
1275 return;
1276 }
1277 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1278 const string parent_path = parent_node->BuildPath();
1279 if (!is_app_accessible_path(fuse, parent_path, ctx->uid)) {
1280 fuse_reply_err(req, ENOENT);
1281 return;
1282 }
1283
1284 TRACE_NODE(parent_node, req);
1285
1286 const string child_path = parent_path + "/" + name;
1287
1288 int status = fuse->mp->DeleteFile(child_path, ctx->uid);
1289 if (status) {
1290 fuse_reply_err(req, status);
1291 return;
1292 }
1293
1294 // TODO(b/169306422): Log each deleted node
1295 parent_node->SetDeletedForChild(name);
1296 fuse_reply_err(req, 0);
1297 }
1298
pf_rmdir(fuse_req_t req,fuse_ino_t parent,const char * name)1299 static void pf_rmdir(fuse_req_t req, fuse_ino_t parent, const char* name) {
1300 ATRACE_CALL();
1301 struct fuse* fuse = get_fuse(req);
1302 node* parent_node = fuse->FromInode(parent);
1303 if (!parent_node) {
1304 fuse_reply_err(req, ENOENT);
1305 return;
1306 }
1307 const string parent_path = parent_node->BuildPath();
1308 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
1309 fuse_reply_err(req, ENOENT);
1310 return;
1311 }
1312
1313 if (is_transforms_dir_path(parent_path, fuse)) {
1314 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1315 // readdir, and any dir operations attempted on it should fail
1316 fuse_reply_err(req, ENOENT);
1317 return;
1318 }
1319
1320 TRACE_NODE(parent_node, req);
1321
1322 const string child_path = parent_path + "/" + name;
1323
1324 int status = fuse->mp->IsDeletingDirAllowed(child_path, req->ctx.uid);
1325 if (status) {
1326 fuse_reply_err(req, status);
1327 return;
1328 }
1329
1330 if (rmdir(child_path.c_str()) < 0) {
1331 fuse_reply_err(req, errno);
1332 return;
1333 }
1334
1335 node* child_node = parent_node->LookupChildByName(name, false /* acquire */);
1336 TRACE_NODE(child_node, req);
1337 if (child_node) {
1338 child_node->SetDeleted();
1339 }
1340
1341 fuse_reply_err(req, 0);
1342 }
1343 /*
1344 static void pf_symlink(fuse_req_t req, const char* link, fuse_ino_t parent,
1345 const char* name)
1346 {
1347 cout << "TODO:" << __func__;
1348 }
1349 */
do_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1350 static int do_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1351 const char* new_name, unsigned int flags) {
1352 ATRACE_CALL();
1353 struct fuse* fuse = get_fuse(req);
1354
1355 // VFS handles request with RENAME_NOREPLACE by ensuring that new file does not exist
1356 // before redirecting the call to FuseDaemon.
1357 if (flags & ~RENAME_NOREPLACE) {
1358 return EINVAL;
1359 }
1360
1361 node* old_parent_node = fuse->FromInode(parent);
1362 if (!old_parent_node) return ENOENT;
1363 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1364 const string old_parent_path = old_parent_node->BuildPath();
1365 if (!is_app_accessible_path(fuse, old_parent_path, ctx->uid)) {
1366 return ENOENT;
1367 }
1368
1369 if (is_transforms_dir_path(old_parent_path, fuse)) {
1370 // .transforms is a special daemon controlled dir so apps shouldn't be able to see it via
1371 // readdir, and any dir operations attempted on it should fail
1372 return ENOENT;
1373 }
1374
1375 node* new_parent_node;
1376 if (fuse->bpf) {
1377 new_parent_node = fuse->FromInodeNoThrow(new_parent);
1378 if (!new_parent_node) return EXDEV;
1379 } else {
1380 new_parent_node = fuse->FromInode(new_parent);
1381 if (!new_parent_node) return ENOENT;
1382 }
1383 const string new_parent_path = new_parent_node->BuildPath();
1384 if (fuse->bpf && is_bpf_backing_path(new_parent_path)) {
1385 return EXDEV;
1386 }
1387 if (!is_app_accessible_path(fuse, new_parent_path, ctx->uid)) {
1388 return ENOENT;
1389 }
1390
1391 if (!old_parent_node || !new_parent_node) {
1392 return ENOENT;
1393 } else if (parent == new_parent && name == new_name) {
1394 // No rename required.
1395 return 0;
1396 }
1397
1398 TRACE_NODE(old_parent_node, req);
1399 TRACE_NODE(new_parent_node, req);
1400
1401 const string old_child_path = old_parent_path + "/" + name;
1402 const string new_child_path = new_parent_path + "/" + new_name;
1403
1404 if (android::base::EqualsIgnoreCase(fuse->GetEffectiveRootPath() + "/android", old_child_path)) {
1405 // Prevent renaming Android/ dir since it contains bind-mounts on the primary volume
1406 return EACCES;
1407 }
1408
1409 // TODO(b/147408834): Check ENOTEMPTY & EEXIST error conditions before JNI call.
1410 const int res = fuse->mp->Rename(old_child_path, new_child_path, req->ctx.uid);
1411 // TODO(b/145663158): Lookups can go out of sync if file/directory is actually moved but
1412 // EFAULT/EIO is reported due to JNI exception.
1413 if (res == 0) {
1414 // Mark any existing destination nodes as deleted. This fixes the following edge case:
1415 // 1. New destination node is forgotten
1416 // 2. Old destination node is not forgotten because there's still an open fd ref to it
1417 // 3. Lookup for |new_name| returns old destination node with stale metadata
1418 new_parent_node->SetDeletedForChild(new_name);
1419 // TODO(b/169306422): Log each renamed node
1420 old_parent_node->RenameChild(name, new_name, new_parent_node);
1421 }
1422 return res;
1423 }
1424
pf_rename(fuse_req_t req,fuse_ino_t parent,const char * name,fuse_ino_t new_parent,const char * new_name,unsigned int flags)1425 static void pf_rename(fuse_req_t req, fuse_ino_t parent, const char* name, fuse_ino_t new_parent,
1426 const char* new_name, unsigned int flags) {
1427 int res = do_rename(req, parent, name, new_parent, new_name, flags);
1428 fuse_reply_err(req, res);
1429 }
1430
1431 /*
1432 static void pf_link(fuse_req_t req, fuse_ino_t ino, fuse_ino_t new_parent,
1433 const char* new_name)
1434 {
1435 cout << "TODO:" << __func__;
1436 }
1437 */
1438
create_handle_for_node(struct fuse * fuse,const string & path,int fd,uid_t uid,uid_t transforms_uid,node * node,const RedactionInfo * ri,const bool allow_passthrough,const bool open_info_direct_io,int * keep_cache)1439 static handle* create_handle_for_node(struct fuse* fuse, const string& path, int fd, uid_t uid,
1440 uid_t transforms_uid, node* node, const RedactionInfo* ri,
1441 const bool allow_passthrough, const bool open_info_direct_io,
1442 int* keep_cache) {
1443 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
1444
1445 bool redaction_needed = ri->isRedactionNeeded();
1446 handle* handle = nullptr;
1447 int transforms = node->GetTransforms();
1448 bool transforms_complete = node->IsTransformsComplete();
1449 if (transforms_uid > 0) {
1450 CHECK(transforms);
1451 }
1452
1453 if (fuse->passthrough && allow_passthrough) {
1454 *keep_cache = transforms_complete && !fuse->upstream_passthrough;
1455 // We only enabled passthrough iff these 2 conditions hold
1456 // 1. Redaction is not needed
1457 // 2. Node transforms are completed, e.g transcoding.
1458 // (2) is important because we transcode lazily (on the first read) and with passthrough,
1459 // we will never get a read into the FUSE daemon, so passthrough would have returned
1460 // arbitrary bytes the first time around. However, if we ensure that transforms are
1461 // completed, then it's safe to use passthrough. Additionally, transcoded nodes never
1462 // require redaction so (2) implies (1)
1463 handle = new struct handle(fd, ri, !open_info_direct_io /* cached */,
1464 !redaction_needed && transforms_complete /* passthrough */, uid,
1465 transforms_uid);
1466 } else {
1467 // Without fuse->passthrough, we don't want to use the FUSE VFS cache in two cases:
1468 // 1. When redaction is needed because app A with EXIF access might access
1469 // a region that should have been redacted for app B without EXIF access, but app B on
1470 // a subsequent read, will be able to see the EXIF data because the read request for
1471 // that region will be served from cache and not get to the FUSE daemon
1472 // 2. When the file has a read or write lock on it. This means that the MediaProvider
1473 // has given an fd to the lower file system to an app. There are two cases where using
1474 // the cache in this case can be a problem:
1475 // a. Writing to a FUSE fd with caching enabled will use the write-back cache and a
1476 // subsequent read from the lower fs fd will not see the write.
1477 // b. Reading from a FUSE fd with caching enabled may not see the latest writes using
1478 // the lower fs fd because those writes did not go through the FUSE layer and reads from
1479 // FUSE after that write may be served from cache
1480 bool has_redacted = node->HasRedactedCache();
1481 bool is_redaction_change =
1482 (redaction_needed && !has_redacted) || (!redaction_needed && has_redacted);
1483 bool is_cached_file_open = node->HasCachedHandle();
1484 bool direct_io = open_info_direct_io || (is_cached_file_open && is_redaction_change) ||
1485 is_file_locked(fd, path) || fuse->ShouldNotCache(path);
1486
1487 if (!is_cached_file_open && is_redaction_change) {
1488 node->SetRedactedCache(redaction_needed);
1489 // Purges stale page cache before open
1490 *keep_cache = 0;
1491 } else {
1492 *keep_cache = transforms_complete;
1493 }
1494 handle = new struct handle(fd, ri, !direct_io /* cached */, false /* passthrough */, uid,
1495 transforms_uid);
1496 }
1497
1498 node->AddHandle(handle);
1499 return handle;
1500 }
1501
do_passthrough_enable(fuse_req_t req,struct fuse_file_info * fi,unsigned int fd,node * node)1502 static bool do_passthrough_enable(fuse_req_t req, struct fuse_file_info* fi, unsigned int fd,
1503 node* node) {
1504 struct fuse* fuse = get_fuse(req);
1505
1506 if (fuse->upstream_passthrough) {
1507 int backing_id = node->GetBackingId();
1508 if (!backing_id) {
1509 backing_id = fuse_passthrough_open(req, fd);
1510 if (!backing_id) return false;
1511 // We only ever want one backing id per backed file
1512 if (!node->SetBackingId(backing_id)) {
1513 fuse_passthrough_close(req, backing_id);
1514 backing_id = node->GetBackingId();
1515 if (!backing_id) return false;
1516 }
1517 }
1518
1519 fi->backing_id = backing_id;
1520 } else {
1521 int passthrough_fh = fuse_passthrough_enable(req, fd);
1522
1523 if (passthrough_fh <= 0) {
1524 return false;
1525 }
1526
1527 fi->passthrough_fh = passthrough_fh;
1528 }
1529 return true;
1530 }
1531
parse_open_flags(const string & path,const int in_flags)1532 static OpenInfo parse_open_flags(const string& path, const int in_flags) {
1533 const bool for_write = in_flags & (O_WRONLY | O_RDWR);
1534 int out_flags = in_flags;
1535 bool direct_io = false;
1536
1537 if (in_flags & O_DIRECT) {
1538 // Set direct IO on the FUSE fs file
1539 direct_io = true;
1540
1541 if (android::base::StartsWith(path, PRIMARY_VOLUME_PREFIX)) {
1542 // Remove O_DIRECT because there are strict alignment requirements for direct IO and
1543 // there were some historical bugs affecting encrypted block devices.
1544 // Hence, this is only supported on public volumes.
1545 out_flags &= ~O_DIRECT;
1546 }
1547 }
1548 if (in_flags & O_WRONLY) {
1549 // Replace O_WRONLY with O_RDWR because even if the FUSE fd is opened write-only, the FUSE
1550 // driver might issue reads on the lower fs ith the writeback cache enabled
1551 out_flags &= ~O_WRONLY;
1552 out_flags |= O_RDWR;
1553 }
1554 if (in_flags & O_APPEND) {
1555 // Remove O_APPEND because passing it to the lower fs can lead to file corruption when
1556 // multiple FUSE threads race themselves reading. With writeback cache enabled, the FUSE
1557 // driver already handles the O_APPEND
1558 out_flags &= ~O_APPEND;
1559 }
1560
1561 return {.flags = out_flags, .for_write = for_write, .direct_io = direct_io};
1562 }
1563
fill_fuse_file_info(const handle * handle,const OpenInfo * open_info,const int keep_cache,struct fuse_file_info * fi)1564 static void fill_fuse_file_info(const handle* handle, const OpenInfo* open_info,
1565 const int keep_cache, struct fuse_file_info* fi) {
1566 fi->fh = ptr_to_id(handle);
1567 fi->keep_cache = keep_cache;
1568 fi->direct_io = !handle->cached;
1569 }
1570
pf_open(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1571 static void pf_open(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi) {
1572 ATRACE_CALL();
1573 struct fuse* fuse = get_fuse(req);
1574 node* node = fuse->FromInode(ino);
1575 if (!node) {
1576 fuse_reply_err(req, ENOENT);
1577 return;
1578 }
1579 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1580 const string& io_path = get_path(node);
1581 const string& build_path = node->BuildPath();
1582 if (!is_app_accessible_path(fuse, io_path, ctx->uid)) {
1583 fuse_reply_err(req, ENOENT);
1584 return;
1585 }
1586
1587 const OpenInfo open_info = parse_open_flags(io_path, fi->flags);
1588
1589 if (open_info.for_write && node->GetTransforms()) {
1590 TRACE_NODE(node, req) << "write with transforms";
1591 } else {
1592 TRACE_NODE(node, req) << (open_info.for_write ? "write" : "read");
1593 }
1594
1595 // Force permission check with the build path because the MediaProvider database might not be
1596 // aware of the io_path
1597 // We don't redact if the caller was granted write permission for this file
1598 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
1599 build_path, io_path, ctx->uid, ctx->pid, node->GetTransformsReason(),
1600 open_info.for_write, !open_info.for_write /* redact */,
1601 true /* log_transforms_metrics */);
1602 if (!result) {
1603 fuse_reply_err(req, EFAULT);
1604 return;
1605 }
1606
1607 if (result->status) {
1608 fuse_reply_err(req, result->status);
1609 return;
1610 }
1611
1612 int fd = -1;
1613 const bool is_fd_from_java = result->fd >= 0;
1614 if (is_fd_from_java) {
1615 fd = result->fd;
1616 TRACE_NODE(node, req) << "opened in Java";
1617 } else {
1618 fd = open(io_path.c_str(), open_info.flags);
1619 if (fd < 0) {
1620 fuse_reply_err(req, errno);
1621 return;
1622 }
1623 }
1624
1625 int keep_cache = 1;
1626 // If is_fd_from_java==true, we disallow passthrough because the fd can be pointing to the
1627 // FUSE fs if gotten from another process
1628 const handle* h = create_handle_for_node(fuse, io_path, fd, result->uid, result->transforms_uid,
1629 node, result->redaction_info.release(),
1630 /* allow_passthrough */ !is_fd_from_java,
1631 open_info.direct_io, &keep_cache);
1632 fill_fuse_file_info(h, &open_info, keep_cache, fi);
1633
1634 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
1635 // user FUSE passthrough is a conservative rule and might be dropped as
1636 // soon as demonstrated its correctness.
1637 if (h->passthrough && !do_passthrough_enable(req, fi, fd, node)) {
1638 // TODO: Should we crash here so we can find errors easily?
1639 PLOG(ERROR) << "Passthrough OPEN failed for " << io_path;
1640 fuse_reply_err(req, EFAULT);
1641 return;
1642 }
1643
1644 fuse_reply_open(req, fi);
1645 }
1646
do_read(fuse_req_t req,size_t size,off_t off,struct fuse_file_info * fi,bool direct_io)1647 static void do_read(fuse_req_t req, size_t size, off_t off, struct fuse_file_info* fi,
1648 bool direct_io) {
1649 handle* h = reinterpret_cast<handle*>(fi->fh);
1650 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(size);
1651
1652 buf.buf[0].fd = h->fd;
1653 buf.buf[0].pos = off;
1654 buf.buf[0].flags =
1655 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1656 if (direct_io) {
1657 // sdcardfs does not register splice_read_file_operations and some requests fail with EFAULT
1658 // Specifically, FUSE splice is only enabled for 8KB+ buffers, hence such reads fail
1659 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)FUSE_BUF_NO_SPLICE);
1660 } else {
1661 fuse_reply_data(req, &buf, (enum fuse_buf_copy_flags)0);
1662 }
1663 }
1664
1665 /**
1666 * Sets the parameters for a fuse_buf that reads from memory, including flags.
1667 * Makes buf->mem point to an already mapped region of zeroized memory.
1668 * This memory is read only.
1669 */
create_mem_fuse_buf(size_t size,fuse_buf * buf,struct fuse * fuse)1670 static void create_mem_fuse_buf(size_t size, fuse_buf* buf, struct fuse* fuse) {
1671 buf->size = size;
1672 buf->mem = fuse->zero_addr;
1673 buf->flags = static_cast<fuse_buf_flags>(0 /*read from fuse_buf.mem*/);
1674 buf->pos = -1;
1675 buf->fd = -1;
1676 }
1677
1678 /**
1679 * Sets the parameters for a fuse_buf that reads from file, including flags.
1680 */
create_file_fuse_buf(size_t size,off_t pos,int fd,fuse_buf * buf)1681 static void create_file_fuse_buf(size_t size, off_t pos, int fd, fuse_buf* buf) {
1682 buf->size = size;
1683 buf->fd = fd;
1684 buf->pos = pos;
1685 buf->flags = static_cast<fuse_buf_flags>(FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1686 buf->mem = nullptr;
1687 }
1688
do_read_with_redaction(fuse_req_t req,size_t size,off_t off,fuse_file_info * fi,bool direct_io)1689 static void do_read_with_redaction(fuse_req_t req, size_t size, off_t off, fuse_file_info* fi,
1690 bool direct_io) {
1691 handle* h = reinterpret_cast<handle*>(fi->fh);
1692
1693 std::vector<ReadRange> ranges;
1694 h->ri->getReadRanges(off, size, &ranges);
1695
1696 // As an optimization, return early if there are no ranges to redact.
1697 if (ranges.size() == 0) {
1698 do_read(req, size, off, fi, direct_io);
1699 return;
1700 }
1701
1702 const size_t num_bufs = ranges.size();
1703 auto bufvec_ptr = std::unique_ptr<fuse_bufvec, decltype(free)*>{
1704 reinterpret_cast<fuse_bufvec*>(
1705 malloc(sizeof(fuse_bufvec) + (num_bufs - 1) * sizeof(fuse_buf))),
1706 free};
1707 fuse_bufvec& bufvec = *bufvec_ptr;
1708
1709 // initialize bufvec
1710 bufvec.count = num_bufs;
1711 bufvec.idx = 0;
1712 bufvec.off = 0;
1713
1714 for (int i = 0; i < num_bufs; ++i) {
1715 const ReadRange& range = ranges[i];
1716 if (range.is_redaction) {
1717 create_mem_fuse_buf(range.size, &(bufvec.buf[i]), get_fuse(req));
1718 } else {
1719 create_file_fuse_buf(range.size, range.start, h->fd, &(bufvec.buf[i]));
1720 }
1721 }
1722
1723 fuse_reply_data(req, &bufvec, static_cast<fuse_buf_copy_flags>(0));
1724 }
1725
pf_read(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)1726 static void pf_read(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
1727 struct fuse_file_info* fi) {
1728 ATRACE_CALL();
1729 handle* h = reinterpret_cast<handle*>(fi->fh);
1730 if (h == nullptr) {
1731 return;
1732 }
1733 const bool direct_io = !h->cached;
1734 struct fuse* fuse = get_fuse(req);
1735
1736 node* node = fuse->FromInode(ino);
1737
1738 if (!node->IsTransformsComplete()) {
1739 if (!fuse->mp->Transform(node->BuildPath(), node->GetIoPath(), node->GetTransforms(),
1740 node->GetTransformsReason(), req->ctx.uid, h->uid,
1741 h->transforms_uid)) {
1742 fuse_reply_err(req, EFAULT);
1743 return;
1744 }
1745 node->SetTransformsComplete(true);
1746 }
1747
1748 fuse->fadviser.Record(h->fd, size);
1749
1750 if (h->ri->isRedactionNeeded()) {
1751 do_read_with_redaction(req, size, off, fi, direct_io);
1752 } else {
1753 do_read(req, size, off, fi, direct_io);
1754 }
1755 }
1756
1757 /*
1758 static void pf_write(fuse_req_t req, fuse_ino_t ino, const char* buf,
1759 size_t size, off_t off, struct fuse_file_info* fi)
1760 {
1761 cout << "TODO:" << __func__;
1762 }
1763 */
1764
pf_write_buf(fuse_req_t req,fuse_ino_t ino,struct fuse_bufvec * bufv,off_t off,struct fuse_file_info * fi)1765 static void pf_write_buf(fuse_req_t req,
1766 fuse_ino_t ino,
1767 struct fuse_bufvec* bufv,
1768 off_t off,
1769 struct fuse_file_info* fi) {
1770 ATRACE_CALL();
1771 handle* h = reinterpret_cast<handle*>(fi->fh);
1772 struct fuse_bufvec buf = FUSE_BUFVEC_INIT(fuse_buf_size(bufv));
1773 ssize_t size;
1774 struct fuse* fuse = get_fuse(req);
1775
1776 buf.buf[0].fd = h->fd;
1777 buf.buf[0].pos = off;
1778 buf.buf[0].flags =
1779 (enum fuse_buf_flags) (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK);
1780 size = fuse_buf_copy(&buf, bufv, (enum fuse_buf_copy_flags) 0);
1781
1782 if (size < 0)
1783 fuse_reply_err(req, -size);
1784 else {
1785 // Execute Record *before* fuse_reply_write to avoid the following ordering:
1786 // fuse_reply_write -> pf_release (destroy handle) -> Record (use handle after free)
1787 fuse->fadviser.Record(h->fd, size);
1788 fuse_reply_write(req, size);
1789 }
1790 }
1791 // Haven't tested this one. Not sure what calls it.
1792 #if 0
1793 static void pf_copy_file_range(fuse_req_t req, fuse_ino_t ino_in,
1794 off_t off_in, struct fuse_file_info* fi_in,
1795 fuse_ino_t ino_out, off_t off_out,
1796 struct fuse_file_info* fi_out, size_t len,
1797 int flags)
1798 {
1799 handle* h_in = reinterpret_cast<handle *>(fi_in->fh);
1800 handle* h_out = reinterpret_cast<handle *>(fi_out->fh);
1801 struct fuse_bufvec buf_in = FUSE_BUFVEC_INIT(len);
1802 struct fuse_bufvec buf_out = FUSE_BUFVEC_INIT(len);
1803 ssize_t size;
1804
1805 buf_in.buf[0].fd = h_in->fd;
1806 buf_in.buf[0].pos = off_in;
1807 buf_in.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1808
1809 buf_out.buf[0].fd = h_out->fd;
1810 buf_out.buf[0].pos = off_out;
1811 buf_out.buf[0].flags = (enum fuse_buf_flags)(FUSE_BUF_IS_FD|FUSE_BUF_FD_SEEK);
1812 size = fuse_buf_copy(&buf_out, &buf_in, (enum fuse_buf_copy_flags) 0);
1813
1814 if (size < 0) {
1815 fuse_reply_err(req, -size);
1816 }
1817
1818 fuse_reply_write(req, size);
1819 }
1820 #endif
1821
1822 /*
1823 * This function does nothing except being a placeholder to keep the FUSE
1824 * driver handling flushes on close(2).
1825 * In fact, kernels prior to 5.8 stop attempting flushing the cache on close(2)
1826 * if the .flush operation is not implemented by the FUSE daemon.
1827 * This has been fixed in the kernel by commit 614c026e8a46 ("fuse: always
1828 * flush dirty data on close(2)"), merged in Linux 5.8, but until then
1829 * userspace must mitigate this behavior by not leaving the .flush function
1830 * pointer empty.
1831 */
pf_flush(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1832 static void pf_flush(fuse_req_t req,
1833 fuse_ino_t ino,
1834 struct fuse_file_info* fi) {
1835 ATRACE_CALL();
1836 struct fuse* fuse = get_fuse(req);
1837 TRACE_NODE(nullptr, req) << "noop";
1838 fuse_reply_err(req, 0);
1839 }
1840
pf_release(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1841 static void pf_release(fuse_req_t req,
1842 fuse_ino_t ino,
1843 struct fuse_file_info* fi) {
1844 ATRACE_CALL();
1845 struct fuse* fuse = get_fuse(req);
1846
1847 node* node = fuse->FromInode(ino);
1848 handle* h = reinterpret_cast<handle*>(fi->fh);
1849 TRACE_NODE(node, req);
1850
1851 fuse->fadviser.Close(h->fd);
1852 if (node) {
1853 node->DestroyHandle(h);
1854 }
1855
1856 fuse_reply_err(req, 0);
1857 }
1858
do_sync_common(int fd,bool datasync)1859 static int do_sync_common(int fd, bool datasync) {
1860 int res = datasync ? fdatasync(fd) : fsync(fd);
1861
1862 if (res == -1) return errno;
1863 return 0;
1864 }
1865
pf_fsync(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1866 static void pf_fsync(fuse_req_t req,
1867 fuse_ino_t ino,
1868 int datasync,
1869 struct fuse_file_info* fi) {
1870 ATRACE_CALL();
1871 handle* h = reinterpret_cast<handle*>(fi->fh);
1872 int err = do_sync_common(h->fd, datasync);
1873
1874 fuse_reply_err(req, err);
1875 }
1876
pf_fsyncdir(fuse_req_t req,fuse_ino_t ino,int datasync,struct fuse_file_info * fi)1877 static void pf_fsyncdir(fuse_req_t req,
1878 fuse_ino_t ino,
1879 int datasync,
1880 struct fuse_file_info* fi) {
1881 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1882 int err = do_sync_common(dirfd(h->d), datasync);
1883
1884 fuse_reply_err(req, err);
1885 }
1886
pf_opendir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)1887 static void pf_opendir(fuse_req_t req,
1888 fuse_ino_t ino,
1889 struct fuse_file_info* fi) {
1890 ATRACE_CALL();
1891 struct fuse* fuse = get_fuse(req);
1892 node* node = fuse->FromInode(ino);
1893 if (!node) {
1894 fuse_reply_err(req, ENOENT);
1895 return;
1896 }
1897 const struct fuse_ctx* ctx = fuse_req_ctx(req);
1898 const string path = node->BuildPath();
1899 if (!is_app_accessible_path(fuse, path, ctx->uid)) {
1900 fuse_reply_err(req, ENOENT);
1901 return;
1902 }
1903
1904 TRACE_NODE(node, req);
1905
1906 int status = fuse->mp->IsOpendirAllowed(path, ctx->uid, /* forWrite */ false);
1907 if (status) {
1908 fuse_reply_err(req, status);
1909 return;
1910 }
1911
1912 DIR* dir = opendir(path.c_str());
1913 if (!dir) {
1914 fuse_reply_err(req, errno);
1915 return;
1916 }
1917
1918 dirhandle* h = new dirhandle(dir);
1919 node->AddDirHandle(h);
1920
1921 fi->fh = ptr_to_id(h);
1922 fuse_reply_open(req, fi);
1923 }
1924
1925 #define READDIR_BUF 32768LU
1926
do_readdir_common(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi,bool plus)1927 static void do_readdir_common(fuse_req_t req,
1928 fuse_ino_t ino,
1929 size_t size,
1930 off_t off,
1931 struct fuse_file_info* fi,
1932 bool plus) {
1933 struct fuse* fuse = get_fuse(req);
1934 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
1935 size_t len = std::min<size_t>(size, READDIR_BUF);
1936 char buf[READDIR_BUF];
1937 size_t used = 0;
1938 std::shared_ptr<DirectoryEntry> de;
1939
1940 struct fuse_entry_param e;
1941 size_t entry_size = 0;
1942
1943 node* node = fuse->FromInode(ino);
1944 if (!node) {
1945 fuse_reply_err(req, ENOENT);
1946 return;
1947 }
1948 const string path = node->BuildPath();
1949 if (!is_app_accessible_path(fuse, path, req->ctx.uid)) {
1950 fuse_reply_err(req, ENOENT);
1951 return;
1952 }
1953
1954 TRACE_NODE(node, req);
1955
1956 // We don't return EACCES for compatibility with the previous implementation.
1957 // It just ignored entries causing EACCES.
1958 if (!is_user_accessible_path(req, fuse, path)) {
1959 fuse_reply_buf(req, buf, used);
1960 return;
1961 }
1962
1963 // Get all directory entries from MediaProvider on first readdir() call of
1964 // directory handle. h->next_off = 0 indicates that current readdir() call
1965 // is first readdir() call for the directory handle, Avoid multiple JNI calls
1966 // for single directory handle.
1967 if (h->next_off == 0) {
1968 h->de = fuse->mp->GetDirectoryEntries(req->ctx.uid, path, h->d);
1969 }
1970 // If the last entry in the previous readdir() call was rejected due to
1971 // buffer capacity constraints, update directory offset to start from
1972 // previously rejected entry. Directory offset can also change if there was
1973 // a seekdir() on the given directory handle.
1974 if (off != h->next_off) {
1975 h->next_off = off;
1976 }
1977 const int num_directory_entries = h->de.size();
1978 // Check for errors. Any error/exception occurred while obtaining directory
1979 // entries will be indicated by marking first directory entry name as empty
1980 // string. In the erroneous case corresponding d_type will hold error number.
1981 if (num_directory_entries && h->de[0]->d_name.empty()) {
1982 fuse_reply_err(req, h->de[0]->d_type);
1983 return;
1984 }
1985
1986 while (h->next_off < num_directory_entries) {
1987 de = h->de[h->next_off];
1988 entry_size = 0;
1989 h->next_off++;
1990 if (plus) {
1991 int error_code = 0;
1992 // Skip validating user and app access as they are already performed on parent node
1993 if (do_lookup(req, ino, de->d_name.c_str(), &e, &error_code, FuseOp::readdir, false)) {
1994 entry_size = fuse_add_direntry_plus(req, buf + used, len - used, de->d_name.c_str(),
1995 &e, h->next_off);
1996 } else {
1997 // Ignore lookup errors on
1998 // 1. non-existing files returned from MediaProvider database.
1999 // 2. path that doesn't match FuseDaemon UID and calling uid.
2000 // 3. EIO / EINVAL may be returned on filesystem errors; try to
2001 // keep going to show other files in the directory.
2002
2003 if (error_code == ENOENT || error_code == EPERM || error_code == EACCES
2004 || error_code == EIO || error_code == EINVAL) continue;
2005 fuse_reply_err(req, error_code);
2006 return;
2007 }
2008 } else {
2009 // This should never happen because we have readdir_plus enabled without adaptive
2010 // readdir_plus, FUSE_CAP_READDIRPLUS_AUTO
2011 LOG(WARNING) << "Handling plain readdir for " << de->d_name << ". Invalid d_ino";
2012 e.attr.st_ino = FUSE_UNKNOWN_INO;
2013 e.attr.st_mode = de->d_type << 12;
2014 entry_size = fuse_add_direntry(req, buf + used, len - used, de->d_name.c_str(), &e.attr,
2015 h->next_off);
2016 }
2017 // If buffer in fuse_add_direntry[_plus] is not large enough then
2018 // the entry is not added to buffer but the size of the entry is still
2019 // returned. Check available buffer size + returned entry size is less
2020 // than actual buffer size to confirm entry is added to buffer.
2021 if (used + entry_size > len) {
2022 // When an entry is rejected, lookup called by readdir_plus will not be tracked by
2023 // kernel. Call forget on the rejected node to decrement the reference count.
2024 if (plus && e.ino > 0) {
2025 do_forget(req, fuse, e.ino, 1);
2026 }
2027 break;
2028 }
2029 used += entry_size;
2030 }
2031 fuse_reply_buf(req, buf, used);
2032 }
2033
pf_readdir(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2034 static void pf_readdir(fuse_req_t req, fuse_ino_t ino, size_t size, off_t off,
2035 struct fuse_file_info* fi) {
2036 ATRACE_CALL();
2037 do_readdir_common(req, ino, size, off, fi, false);
2038 }
2039
round_up(off_t o,size_t s)2040 static off_t round_up(off_t o, size_t s) {
2041 return (o + s - 1) / s * s;
2042 }
2043
pf_readdir_postfilter(fuse_req_t req,fuse_ino_t ino,uint32_t error_in,off_t off_in,off_t off_out,size_t size_out,const void * dirents_in,struct fuse_file_info * fi)2044 static void pf_readdir_postfilter(fuse_req_t req, fuse_ino_t ino, uint32_t error_in, off_t off_in,
2045 off_t off_out, size_t size_out, const void* dirents_in,
2046 struct fuse_file_info* fi) {
2047 struct fuse* fuse = get_fuse(req);
2048 char buf[READDIR_BUF];
2049 struct fuse_read_out* fro = (struct fuse_read_out*)(buf);
2050 size_t used = 0;
2051 bool redacted = false;
2052 char* dirents_out = (char*)(fro + 1);
2053
2054 ATRACE_CALL();
2055 node* node = fuse->FromInode(ino);
2056 if (!node) {
2057 fuse_reply_err(req, ENOENT);
2058 return;
2059 }
2060
2061 TRACE_NODE(node, req);
2062 const string path = node->BuildPath();
2063
2064 *fro = (struct fuse_read_out){
2065 .offset = (uint64_t)off_out,
2066 };
2067
2068 for (off_t in = 0; in < size_out;) {
2069 struct fuse_dirent* dirent_in = (struct fuse_dirent*)((char*)dirents_in + in);
2070 struct fuse_dirent* dirent_out = (struct fuse_dirent*)((char*)dirents_out + used);
2071 struct stat stats;
2072 int err;
2073
2074 std::string child_name(dirent_in->name, dirent_in->namelen);
2075 std::string child_path = path + "/" + child_name;
2076
2077 in += sizeof(*dirent_in) + round_up(dirent_in->namelen, sizeof(uint64_t));
2078 err = stat(child_path.c_str(), &stats);
2079 if (err == 0 &&
2080 ((stats.st_mode & 0001) || ((stats.st_mode & 0010) && req->ctx.gid == stats.st_gid) ||
2081 ((stats.st_mode & 0100) && req->ctx.uid == stats.st_uid) ||
2082 fuse->mp->isUidAllowedAccessToDataOrObbPath(req->ctx.uid, child_path) ||
2083 child_name == ".nomedia")) {
2084 *dirent_out = *dirent_in;
2085 strcpy(dirent_out->name, child_name.c_str());
2086 used += sizeof(*dirent_out) + round_up(dirent_out->namelen, sizeof(uint64_t));
2087 } else {
2088 redacted = true;
2089 }
2090 }
2091 if (redacted && used == 0) fro->again = 1;
2092 fuse_reply_buf(req, buf, sizeof(*fro) + used);
2093 }
2094
pf_readdirplus(fuse_req_t req,fuse_ino_t ino,size_t size,off_t off,struct fuse_file_info * fi)2095 static void pf_readdirplus(fuse_req_t req,
2096 fuse_ino_t ino,
2097 size_t size,
2098 off_t off,
2099 struct fuse_file_info* fi) {
2100 ATRACE_CALL();
2101 do_readdir_common(req, ino, size, off, fi, true);
2102 }
2103
pf_releasedir(fuse_req_t req,fuse_ino_t ino,struct fuse_file_info * fi)2104 static void pf_releasedir(fuse_req_t req,
2105 fuse_ino_t ino,
2106 struct fuse_file_info* fi) {
2107 ATRACE_CALL();
2108 struct fuse* fuse = get_fuse(req);
2109
2110 node* node = fuse->FromInode(ino);
2111
2112 dirhandle* h = reinterpret_cast<dirhandle*>(fi->fh);
2113 TRACE_NODE(node, req);
2114 if (node) {
2115 node->DestroyDirHandle(h);
2116 }
2117
2118 fuse_reply_err(req, 0);
2119 }
2120
pf_statfs(fuse_req_t req,fuse_ino_t ino)2121 static void pf_statfs(fuse_req_t req, fuse_ino_t ino) {
2122 ATRACE_CALL();
2123 struct statvfs st;
2124 struct fuse* fuse = get_fuse(req);
2125
2126 if (statvfs(fuse->root->GetName().c_str(), &st))
2127 fuse_reply_err(req, errno);
2128 else
2129 fuse_reply_statfs(req, &st);
2130 }
2131 /*
2132 static void pf_setxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2133 const char* value, size_t size, int flags)
2134 {
2135 cout << "TODO:" << __func__;
2136 }
2137
2138 static void pf_getxattr(fuse_req_t req, fuse_ino_t ino, const char* name,
2139 size_t size)
2140 {
2141 cout << "TODO:" << __func__;
2142 }
2143
2144 static void pf_listxattr(fuse_req_t req, fuse_ino_t ino, size_t size)
2145 {
2146 cout << "TODO:" << __func__;
2147 }
2148
2149 static void pf_removexattr(fuse_req_t req, fuse_ino_t ino, const char* name)
2150 {
2151 cout << "TODO:" << __func__;
2152 }*/
2153
pf_access(fuse_req_t req,fuse_ino_t ino,int mask)2154 static void pf_access(fuse_req_t req, fuse_ino_t ino, int mask) {
2155 ATRACE_CALL();
2156 struct fuse* fuse = get_fuse(req);
2157
2158 node* node = fuse->FromInode(ino);
2159 if (!node) {
2160 fuse_reply_err(req, ENOENT);
2161 return;
2162 }
2163 const string path = node->BuildPath();
2164 if (path != PRIMARY_VOLUME_PREFIX && !is_app_accessible_path(fuse, path, req->ctx.uid)) {
2165 fuse_reply_err(req, ENOENT);
2166 return;
2167 }
2168 TRACE_NODE(node, req);
2169
2170 // exists() checks are always allowed.
2171 if (mask == F_OK) {
2172 int res = access(path.c_str(), F_OK);
2173 fuse_reply_err(req, res ? errno : 0);
2174 return;
2175 }
2176 struct stat stat;
2177 if (lstat(path.c_str(), &stat)) {
2178 // File doesn't exist
2179 fuse_reply_err(req, ENOENT);
2180 return;
2181 }
2182
2183 // For read and write permission checks we go to MediaProvider.
2184 int status = 0;
2185 bool for_write = mask & W_OK;
2186 bool is_directory = S_ISDIR(stat.st_mode);
2187 if (is_directory) {
2188 if (path == PRIMARY_VOLUME_PREFIX && mask == X_OK) {
2189 // Special case for this path: apps should be allowed to enter it,
2190 // but not list directory contents (which would be user numbers).
2191 int res = access(path.c_str(), X_OK);
2192 fuse_reply_err(req, res ? errno : 0);
2193 return;
2194 }
2195 status = fuse->mp->IsOpendirAllowed(path, req->ctx.uid, for_write);
2196 } else {
2197 if (mask & X_OK) {
2198 // Fuse is mounted with MS_NOEXEC.
2199 fuse_reply_err(req, EACCES);
2200 return;
2201 }
2202
2203 std::unique_ptr<FileOpenResult> result = fuse->mp->OnFileOpen(
2204 path, path, req->ctx.uid, req->ctx.pid, node->GetTransformsReason(), for_write,
2205 false /* redact */, false /* log_transforms_metrics */);
2206 if (!result) {
2207 status = EFAULT;
2208 } else if (result->status) {
2209 status = EACCES;
2210 }
2211 }
2212
2213 fuse_reply_err(req, status);
2214 }
2215
pf_create(fuse_req_t req,fuse_ino_t parent,const char * name,mode_t mode,struct fuse_file_info * fi)2216 static void pf_create(fuse_req_t req,
2217 fuse_ino_t parent,
2218 const char* name,
2219 mode_t mode,
2220 struct fuse_file_info* fi) {
2221 ATRACE_CALL();
2222 struct fuse* fuse = get_fuse(req);
2223 node* parent_node = fuse->FromInode(parent);
2224 if (!parent_node) {
2225 fuse_reply_err(req, ENOENT);
2226 return;
2227 }
2228 const string parent_path = parent_node->BuildPath();
2229 if (!is_app_accessible_path(fuse, parent_path, req->ctx.uid)) {
2230 fuse_reply_err(req, ENOENT);
2231 return;
2232 }
2233
2234 TRACE_NODE(parent_node, req);
2235
2236 const string child_path = parent_path + "/" + name;
2237
2238 const OpenInfo open_info = parse_open_flags(child_path, fi->flags);
2239
2240 int mp_return_code = fuse->mp->InsertFile(child_path.c_str(), req->ctx.uid);
2241 if (mp_return_code) {
2242 fuse_reply_err(req, mp_return_code);
2243 return;
2244 }
2245
2246 mode = (mode & (~0777)) | 0664;
2247 int fd = open(child_path.c_str(), open_info.flags, mode);
2248 if (fd < 0) {
2249 int error_code = errno;
2250 // We've already inserted the file into the MP database before the
2251 // failed open(), so that needs to be rolled back here.
2252 fuse->mp->DeleteFile(child_path.c_str(), req->ctx.uid);
2253 fuse_reply_err(req, error_code);
2254 return;
2255 }
2256
2257 int error_code = 0;
2258 struct fuse_entry_param e;
2259 node* node = make_node_entry(req, parent_node, name, parent_path, child_path, &e, &error_code,
2260 FuseOp::create);
2261 TRACE_NODE(node, req);
2262 if (!node) {
2263 CHECK(error_code != 0);
2264 fuse_reply_err(req, error_code);
2265 return;
2266 }
2267
2268 // Let MediaProvider know we've created a new file
2269 fuse->mp->OnFileCreated(child_path);
2270
2271 // TODO(b/147274248): Assume there will be no EXIF to redact.
2272 // This prevents crashing during reads but can be a security hole if a malicious app opens an fd
2273 // to the file before all the EXIF content is written. We could special case reads before the
2274 // first close after a file has just been created.
2275 int keep_cache = 1;
2276 const handle* h = create_handle_for_node(
2277 fuse, child_path, fd, req->ctx.uid, 0 /* transforms_uid */, node, new RedactionInfo(),
2278 /* allow_passthrough */ true, open_info.direct_io, &keep_cache);
2279 fill_fuse_file_info(h, &open_info, keep_cache, fi);
2280
2281 // TODO(b/173190192) ensuring that h->cached must be enabled in order to
2282 // user FUSE passthrough is a conservative rule and might be dropped as
2283 // soon as demonstrated its correctness.
2284 if (h->passthrough && !do_passthrough_enable(req, fi, fd, node)) {
2285 PLOG(ERROR) << "Passthrough CREATE failed for " << child_path;
2286 fuse_reply_err(req, EFAULT);
2287 return;
2288 }
2289
2290 fuse_reply_create(req, &e, fi);
2291 }
2292 /*
2293 static void pf_getlk(fuse_req_t req, fuse_ino_t ino,
2294 struct fuse_file_info* fi, struct flock* lock)
2295 {
2296 cout << "TODO:" << __func__;
2297 }
2298
2299 static void pf_setlk(fuse_req_t req, fuse_ino_t ino,
2300 struct fuse_file_info* fi,
2301 struct flock* lock, int sleep)
2302 {
2303 cout << "TODO:" << __func__;
2304 }
2305
2306 static void pf_bmap(fuse_req_t req, fuse_ino_t ino, size_t blocksize,
2307 uint64_t idx)
2308 {
2309 cout << "TODO:" << __func__;
2310 }
2311
2312 static void pf_ioctl(fuse_req_t req, fuse_ino_t ino, unsigned int cmd,
2313 void* arg, struct fuse_file_info* fi, unsigned flags,
2314 const void* in_buf, size_t in_bufsz, size_t out_bufsz)
2315 {
2316 cout << "TODO:" << __func__;
2317 }
2318
2319 static void pf_poll(fuse_req_t req, fuse_ino_t ino, struct fuse_file_info* fi,
2320 struct fuse_pollhandle* ph)
2321 {
2322 cout << "TODO:" << __func__;
2323 }
2324
2325 static void pf_retrieve_reply(fuse_req_t req, void* cookie, fuse_ino_t ino,
2326 off_t offset, struct fuse_bufvec* bufv)
2327 {
2328 cout << "TODO:" << __func__;
2329 }
2330
2331 static void pf_flock(fuse_req_t req, fuse_ino_t ino,
2332 struct fuse_file_info* fi, int op)
2333 {
2334 cout << "TODO:" << __func__;
2335 }
2336
2337 static void pf_fallocate(fuse_req_t req, fuse_ino_t ino, int mode,
2338 off_t offset, off_t length, struct fuse_file_info* fi)
2339 {
2340 cout << "TODO:" << __func__;
2341 }
2342 */
2343
2344 static struct fuse_lowlevel_ops ops{
2345 .init = pf_init, .destroy = pf_destroy, .lookup = pf_lookup,
2346 .lookup_postfilter = pf_lookup_postfilter, .forget = pf_forget, .getattr = pf_getattr,
2347 .setattr = pf_setattr, .canonical_path = pf_canonical_path, .mknod = pf_mknod,
2348 .mkdir = pf_mkdir, .unlink = pf_unlink, .rmdir = pf_rmdir,
2349 /*.symlink = pf_symlink,*/
2350 .rename = pf_rename,
2351 /*.link = pf_link,*/
2352 .open = pf_open, .read = pf_read,
2353 /*.write = pf_write,*/
2354 .flush = pf_flush, .release = pf_release, .fsync = pf_fsync, .opendir = pf_opendir,
2355 .readdir = pf_readdir, .readdirpostfilter = pf_readdir_postfilter, .releasedir = pf_releasedir,
2356 .fsyncdir = pf_fsyncdir, .statfs = pf_statfs,
2357 /*.setxattr = pf_setxattr,
2358 .getxattr = pf_getxattr,
2359 .listxattr = pf_listxattr,
2360 .removexattr = pf_removexattr,*/
2361 .access = pf_access, .create = pf_create,
2362 /*.getlk = pf_getlk,
2363 .setlk = pf_setlk,
2364 .bmap = pf_bmap,
2365 .ioctl = pf_ioctl,
2366 .poll = pf_poll,*/
2367 .write_buf = pf_write_buf,
2368 /*.retrieve_reply = pf_retrieve_reply,*/
2369 .forget_multi = pf_forget_multi,
2370 /*.flock = pf_flock,*/
2371 .fallocate = pf_fallocate, .readdirplus = pf_readdirplus,
2372 /*.copy_file_range = pf_copy_file_range,*/
2373 };
2374
2375 static struct fuse_loop_config config = {
2376 .clone_fd = 1,
2377 .max_idle_threads = 10,
2378 };
2379
2380 static std::unordered_map<enum fuse_log_level, enum android_LogPriority> fuse_to_android_loglevel({
2381 {FUSE_LOG_EMERG, ANDROID_LOG_FATAL},
2382 {FUSE_LOG_ALERT, ANDROID_LOG_ERROR},
2383 {FUSE_LOG_CRIT, ANDROID_LOG_ERROR},
2384 {FUSE_LOG_ERR, ANDROID_LOG_ERROR},
2385 {FUSE_LOG_WARNING, ANDROID_LOG_WARN},
2386 {FUSE_LOG_NOTICE, ANDROID_LOG_INFO},
2387 {FUSE_LOG_INFO, ANDROID_LOG_DEBUG},
2388 {FUSE_LOG_DEBUG, ANDROID_LOG_VERBOSE},
2389 });
2390
fuse_logger(enum fuse_log_level level,const char * fmt,va_list ap)2391 static void fuse_logger(enum fuse_log_level level, const char* fmt, va_list ap) {
2392 __android_log_vprint(fuse_to_android_loglevel.at(level), LIBFUSE_LOG_TAG, fmt, ap);
2393 }
2394
ShouldOpenWithFuse(int fd,bool for_read,const std::string & path)2395 bool FuseDaemon::ShouldOpenWithFuse(int fd, bool for_read, const std::string& path) {
2396 if (fuse->passthrough) {
2397 // Always open with FUSE if passthrough is enabled. This avoids the delicate file lock
2398 // acquisition below to ensure VFS cache consistency and doesn't impact filesystem
2399 // performance since read(2)/write(2) happen in the kernel
2400 return true;
2401 }
2402
2403 bool use_fuse = false;
2404
2405 if (active.load(std::memory_order_acquire)) {
2406 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2407 const node* node = node::LookupAbsolutePath(fuse->root, path);
2408 if (node && node->HasCachedHandle()) {
2409 use_fuse = true;
2410 } else {
2411 // If we are unable to set a lock, we should use fuse since we can't track
2412 // when all fd references (including dups) are closed. This can happen when
2413 // we try to set a write lock twice on the same file
2414 use_fuse = set_file_lock(fd, for_read, path);
2415 }
2416 } else {
2417 LOG(WARNING) << "FUSE daemon is inactive. Cannot open file with FUSE";
2418 }
2419
2420 return use_fuse;
2421 }
2422
UsesFusePassthrough() const2423 bool FuseDaemon::UsesFusePassthrough() const {
2424 return fuse->passthrough;
2425 }
2426
InvalidateFuseDentryCache(const std::string & path)2427 void FuseDaemon::InvalidateFuseDentryCache(const std::string& path) {
2428 LOG(VERBOSE) << "Invalidating FUSE dentry cache";
2429 if (active.load(std::memory_order_acquire)) {
2430 string name;
2431 fuse_ino_t parent;
2432 fuse_ino_t child;
2433 {
2434 std::lock_guard<std::recursive_mutex> guard(fuse->lock);
2435 const node* node = node::LookupAbsolutePath(fuse->root, path);
2436 if (node) {
2437 name = node->GetName();
2438 child = fuse->ToInode(const_cast<class node*>(node));
2439 parent = fuse->ToInode(node->GetParent());
2440 }
2441 }
2442
2443 if (!name.empty()) {
2444 std::thread t([=]() { fuse_inval(fuse->se, parent, child, name, path); });
2445 t.detach();
2446 }
2447 } else {
2448 LOG(WARNING) << "FUSE daemon is inactive. Cannot invalidate dentry";
2449 }
2450 }
2451
FuseDaemon(JNIEnv * env,jobject mediaProvider)2452 FuseDaemon::FuseDaemon(JNIEnv* env, jobject mediaProvider) : mp(env, mediaProvider),
2453 active(false), fuse(nullptr) {}
2454
IsStarted() const2455 bool FuseDaemon::IsStarted() const {
2456 return active.load(std::memory_order_acquire);
2457 }
2458
IsPropertySet(const char * name,bool & value)2459 static bool IsPropertySet(const char* name, bool& value) {
2460 if (android::base::GetProperty(name, "") == "") return false;
2461
2462 value = android::base::GetBoolProperty(name, false);
2463 LOG(INFO) << "fuse-bpf is " << (value ? "enabled" : "disabled") << " because of property "
2464 << name;
2465 return true;
2466 }
2467
IsFuseBpfEnabled()2468 bool IsFuseBpfEnabled() {
2469 // ro.fuse.bpf.is_running may not be set when first reading this property, so we have to
2470 // reproduce the vold/Utils.cpp:isFuseBpfEnabled() logic here
2471
2472 bool is_enabled;
2473 if (IsPropertySet("ro.fuse.bpf.is_running", is_enabled)) return is_enabled;
2474 if (IsPropertySet("persist.sys.fuse.bpf.override", is_enabled)) return is_enabled;
2475 if (IsPropertySet("ro.fuse.bpf.enabled", is_enabled)) return is_enabled;
2476
2477 // If the kernel has fuse-bpf, /sys/fs/fuse/features/fuse_bpf will exist and have the contents
2478 // 'supported\n' - see fs/fuse/inode.c in the kernel source
2479 string contents;
2480 const char* filename = "/sys/fs/fuse/features/fuse_bpf";
2481 if (!android::base::ReadFileToString(filename, &contents)) {
2482 LOG(INFO) << "fuse-bpf is disabled because " << filename << " cannot be read";
2483 return false;
2484 }
2485
2486 if (contents == "supported\n") {
2487 LOG(INFO) << "fuse-bpf is enabled because " << filename << " reads 'supported'";
2488 return true;
2489 } else {
2490 LOG(INFO) << "fuse-bpf is disabled because " << filename << " does not read 'supported'";
2491 return false;
2492 }
2493 }
2494
Start(android::base::unique_fd fd,const std::string & path,const bool uncached_mode,const std::vector<std::string> & supported_transcoding_relative_paths,const std::vector<std::string> & supported_uncached_relative_paths)2495 void FuseDaemon::Start(android::base::unique_fd fd, const std::string& path,
2496 const bool uncached_mode,
2497 const std::vector<std::string>& supported_transcoding_relative_paths,
2498 const std::vector<std::string>& supported_uncached_relative_paths) {
2499 android::base::SetDefaultTag(LOG_TAG);
2500
2501 struct fuse_args args;
2502 struct fuse_cmdline_opts opts;
2503
2504 struct stat stat;
2505
2506 if (lstat(path.c_str(), &stat)) {
2507 PLOG(ERROR) << "ERROR: failed to stat source " << path;
2508 return;
2509 }
2510
2511 if (!S_ISDIR(stat.st_mode)) {
2512 PLOG(ERROR) << "ERROR: source is not a directory";
2513 return;
2514 }
2515
2516 args = FUSE_ARGS_INIT(0, nullptr);
2517 if (fuse_opt_add_arg(&args, path.c_str()) || fuse_opt_add_arg(&args, "-odebug") ||
2518 fuse_opt_add_arg(&args, ("-omax_read=" + std::to_string(MAX_READ_SIZE)).c_str())) {
2519 LOG(ERROR) << "ERROR: failed to set options";
2520 return;
2521 }
2522
2523 bool bpf_enabled = IsFuseBpfEnabled();
2524 android::base::unique_fd bpf_fd(-1);
2525 if (bpf_enabled) {
2526 bpf_fd.reset(android::bpf::retrieveProgram(FUSE_BPF_PROG_PATH));
2527 if (!bpf_fd.ok()) {
2528 int error = errno;
2529 PLOG(ERROR) << "Failed to fetch BPF prog fd: " << error;
2530 bpf_enabled = false;
2531 } else {
2532 LOG(INFO) << "Using FUSE BPF, BPF prog fd fetched";
2533 }
2534 }
2535
2536 if (!bpf_enabled) {
2537 LOG(INFO) << "Not using FUSE BPF";
2538 }
2539
2540 struct fuse fuse_default(path, stat.st_ino, uncached_mode, bpf_enabled, std::move(bpf_fd),
2541 supported_transcoding_relative_paths,
2542 supported_uncached_relative_paths);
2543 fuse_default.mp = ∓
2544 // fuse_default is stack allocated, but it's safe to save it as an instance variable because
2545 // this method blocks and FuseDaemon#active tells if we are currently blocking
2546 fuse = &fuse_default;
2547
2548 // Used by pf_read: redacted ranges are represented by zeroized ranges of bytes,
2549 // so we mmap the maximum length of redacted ranges in the beginning and save memory allocations
2550 // on each read.
2551 fuse_default.zero_addr = static_cast<char*>(mmap(
2552 NULL, MAX_READ_SIZE, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE, /*fd*/ -1, /*off*/ 0));
2553 if (fuse_default.zero_addr == MAP_FAILED) {
2554 LOG(FATAL) << "mmap failed - could not start fuse! errno = " << errno;
2555 }
2556
2557 // Custom logging for libfuse
2558 if (android::base::GetBoolProperty("persist.sys.fuse.log", false)) {
2559 fuse_set_log_func(fuse_logger);
2560 }
2561
2562 if (MY_USER_ID != 0 && mp.IsAppCloneUser(MY_USER_ID)) {
2563 // Disable dentry caching for the app clone user
2564 fuse->disable_dentry_cache = true;
2565 }
2566
2567 fuse->passthrough = android::base::GetBoolProperty("persist.sys.fuse.passthrough.enable", false);
2568 if (fuse->passthrough) {
2569 LOG(INFO) << "Using FUSE passthrough";
2570 }
2571
2572 struct fuse_session
2573 * se = fuse_session_new(&args, &ops, sizeof(ops), &fuse_default);
2574 if (!se) {
2575 PLOG(ERROR) << "Failed to create session ";
2576 return;
2577 }
2578 fuse_default.se = se;
2579 fuse_default.active = &active;
2580 se->fd = fd.release(); // libfuse owns the FD now
2581 se->mountpoint = strdup(path.c_str());
2582
2583 // Single thread. Useful for debugging
2584 // fuse_session_loop(se);
2585 // Multi-threaded
2586 LOG(INFO) << "Starting fuse...";
2587 fuse_session_loop_mt(se, &config);
2588 fuse->active->store(false, std::memory_order_release);
2589 LOG(INFO) << "Ending fuse...";
2590
2591 if (munmap(fuse_default.zero_addr, MAX_READ_SIZE)) {
2592 PLOG(ERROR) << "munmap failed!";
2593 }
2594
2595 fuse_opt_free_args(&args);
2596 fuse_session_destroy(se);
2597 LOG(INFO) << "Ended fuse";
2598 return;
2599 }
2600
CheckFdAccess(int fd,uid_t uid) const2601 std::unique_ptr<FdAccessResult> FuseDaemon::CheckFdAccess(int fd, uid_t uid) const {
2602 struct stat s;
2603 memset(&s, 0, sizeof(s));
2604 if (fstat(fd, &s) < 0) {
2605 PLOG(DEBUG) << "CheckFdAccess fstat failed.";
2606 return std::make_unique<FdAccessResult>(string(), false);
2607 }
2608
2609 ino_t ino = s.st_ino;
2610 dev_t dev = s.st_dev;
2611
2612 dev_t fuse_dev = fuse->dev.load(std::memory_order_acquire);
2613 if (dev != fuse_dev) {
2614 PLOG(DEBUG) << "CheckFdAccess FUSE device id does not match.";
2615 return std::make_unique<FdAccessResult>(string(), false);
2616 }
2617
2618 const node* node = node::LookupInode(fuse->root, ino);
2619 if (!node) {
2620 PLOG(DEBUG) << "CheckFdAccess no node found with given ino";
2621 return std::make_unique<FdAccessResult>(string(), false);
2622 }
2623
2624 return node->CheckHandleForUid(uid);
2625 }
2626
InitializeDeviceId(const std::string & path)2627 void FuseDaemon::InitializeDeviceId(const std::string& path) {
2628 struct stat stat;
2629
2630 if (lstat(path.c_str(), &stat)) {
2631 PLOG(ERROR) << "InitializeDeviceId failed to stat given path " << path;
2632 return;
2633 }
2634
2635 fuse->dev.store(stat.st_dev, std::memory_order_release);
2636 }
2637
SetupLevelDbConnection(const std::string & instance_name)2638 void FuseDaemon::SetupLevelDbConnection(const std::string& instance_name) {
2639 if (CheckLevelDbConnection(instance_name)) {
2640 LOG(DEBUG) << "Leveldb connection already exists for :" << instance_name;
2641 return;
2642 }
2643
2644 std::string leveldbPath =
2645 "/data/media/" + MY_USER_ID_STRING + "/.transforms/recovery/leveldb-" + instance_name;
2646 leveldb::Options options;
2647 options.create_if_missing = true;
2648 leveldb::DB* leveldb;
2649 leveldb::Status status = leveldb::DB::Open(options, leveldbPath, &leveldb);
2650 if (status.ok()) {
2651 fuse->level_db_connection_map.insert(
2652 std::pair<std::string, leveldb::DB*>(instance_name, leveldb));
2653 LOG(INFO) << "Leveldb connection established for :" << instance_name;
2654 } else {
2655 LOG(ERROR) << "Leveldb connection failed for :" << instance_name
2656 << " with error:" << status.ToString();
2657 }
2658 }
2659
SetupLevelDbInstances()2660 void FuseDaemon::SetupLevelDbInstances() {
2661 if (android::base::StartsWith(fuse->root->GetIoPath(), PRIMARY_VOLUME_PREFIX)) {
2662 // Setup leveldb instance for both external primary and internal volume.
2663 fuse->level_db_mutex.lock();
2664 // Create level db instance for internal volume
2665 SetupLevelDbConnection(mediaprovider::fuse::VOLUME_INTERNAL);
2666 // Create level db instance for external primary volume
2667 SetupLevelDbConnection(VOLUME_EXTERNAL_PRIMARY);
2668 // Create level db instance to store owner id to owner package name and vice versa relation
2669 SetupLevelDbConnection(OWNERSHIP_RELATION);
2670 fuse->level_db_mutex.unlock();
2671 }
2672 }
2673
SetupPublicVolumeLevelDbInstance(const std::string & volume_name)2674 void FuseDaemon::SetupPublicVolumeLevelDbInstance(const std::string& volume_name) {
2675 // Setup leveldb instance for both external primary and internal volume.
2676 fuse->level_db_mutex.lock();
2677 // Create level db instance for public volume
2678 SetupLevelDbConnection(volume_name);
2679 fuse->level_db_mutex.unlock();
2680 }
2681
deriveVolumeName(const std::string & path)2682 std::string deriveVolumeName(const std::string& path) {
2683 std::string volume_name = mediaprovider::fuse::getVolumeNameFromPath(path);
2684 if (volume_name.empty()) {
2685 LOG(ERROR) << "Invalid input URI for extracting volume name." << path;
2686 } else {
2687 LOG(DEBUG) << "Volume name from input path: " << path << " , volName: " + volume_name;
2688 }
2689 return volume_name;
2690 }
2691
DeleteFromLevelDb(const std::string & key)2692 void FuseDaemon::DeleteFromLevelDb(const std::string& key) {
2693 fuse->level_db_mutex.lock();
2694 std::string volume_name = deriveVolumeName(key);
2695 if (!CheckLevelDbConnection(volume_name)) {
2696 fuse->level_db_mutex.unlock();
2697 LOG(ERROR) << "DeleteFromLevelDb: Missing leveldb connection.";
2698 return;
2699 }
2700
2701 leveldb::Status status;
2702 status = fuse->level_db_connection_map[volume_name]->Delete(leveldb::WriteOptions(), key);
2703 if (!status.ok()) {
2704 LOG(ERROR) << "Failure in leveldb delete for key: " << key
2705 << " from volume:" << volume_name;
2706 }
2707 fuse->level_db_mutex.unlock();
2708 }
2709
InsertInLevelDb(const std::string & volume_name,const std::string & key,const std::string & value)2710 void FuseDaemon::InsertInLevelDb(const std::string& volume_name, const std::string& key,
2711 const std::string& value) {
2712 fuse->level_db_mutex.lock();
2713 if (!CheckLevelDbConnection(volume_name)) {
2714 fuse->level_db_mutex.unlock();
2715 LOG(ERROR) << "InsertInLevelDb: Missing leveldb connection.";
2716 return;
2717 }
2718
2719 leveldb::Status status;
2720 status = fuse->level_db_connection_map[volume_name]->Put(leveldb::WriteOptions(), key,
2721 value);
2722 fuse->level_db_mutex.unlock();
2723 if (!status.ok()) {
2724 LOG(ERROR) << "Failure in leveldb insert for key: " << key
2725 << " in volume:" << volume_name;
2726 LOG(ERROR) << status.ToString();
2727 }
2728 }
2729
ReadFilePathsFromLevelDb(const std::string & volume_name,const std::string & last_read_value,int limit)2730 std::vector<std::string> FuseDaemon::ReadFilePathsFromLevelDb(const std::string& volume_name,
2731 const std::string& last_read_value,
2732 int limit) {
2733 fuse->level_db_mutex.lock();
2734 int counter = 0;
2735 std::vector<std::string> file_paths;
2736
2737 if (!CheckLevelDbConnection(volume_name)) {
2738 fuse->level_db_mutex.unlock();
2739 LOG(ERROR) << "ReadFilePathsFromLevelDb: Missing leveldb connection";
2740 return file_paths;
2741 }
2742
2743 leveldb::Iterator* it =
2744 fuse->level_db_connection_map[volume_name]->NewIterator(leveldb::ReadOptions());
2745 if (android::base::EqualsIgnoreCase(last_read_value, "")) {
2746 it->SeekToFirst();
2747 } else {
2748 // Start after last read value
2749 leveldb::Slice slice = last_read_value;
2750 it->Seek(slice);
2751 it->Next();
2752 }
2753 for (; it->Valid() && counter < limit; it->Next()) {
2754 file_paths.push_back(it->key().ToString());
2755 counter++;
2756 }
2757 fuse->level_db_mutex.unlock();
2758 return file_paths;
2759 }
2760
ReadBackedUpDataFromLevelDb(const std::string & filePath)2761 std::string FuseDaemon::ReadBackedUpDataFromLevelDb(const std::string& filePath) {
2762 fuse->level_db_mutex.lock();
2763 std::string data = "";
2764 std::string volume_name = deriveVolumeName(filePath);
2765 if (!CheckLevelDbConnection(volume_name)) {
2766 fuse->level_db_mutex.unlock();
2767 LOG(ERROR) << "ReadBackedUpDataFromLevelDb: Missing leveldb connection.";
2768 return data;
2769 }
2770
2771 leveldb::Status status = fuse->level_db_connection_map[volume_name]->Get(
2772 leveldb::ReadOptions(), filePath, &data);
2773 fuse->level_db_mutex.unlock();
2774
2775 if (status.IsNotFound()) {
2776 LOG(VERBOSE) << "Key is not found in leveldb: " << filePath << " " << status.ToString();
2777 } else if (!status.ok()) {
2778 LOG(WARNING) << "Failure in leveldb read for key: " << filePath << " "
2779 << status.ToString();
2780 }
2781 return data;
2782 }
2783
ReadOwnership(const std::string & key)2784 std::string FuseDaemon::ReadOwnership(const std::string& key) {
2785 fuse->level_db_mutex.lock();
2786 // Return empty string if key not found
2787 std::string data = "";
2788 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2789 fuse->level_db_mutex.unlock();
2790 LOG(ERROR) << "ReadOwnership: Missing leveldb connection.";
2791 return data;
2792 }
2793
2794 leveldb::Status status = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Get(
2795 leveldb::ReadOptions(), key, &data);
2796 fuse->level_db_mutex.unlock();
2797
2798 if (status.IsNotFound()) {
2799 LOG(VERBOSE) << "Key is not found in leveldb: " << key << " " << status.ToString();
2800 } else if (!status.ok()) {
2801 LOG(WARNING) << "Failure in leveldb read for key: " << key << " " << status.ToString();
2802 }
2803
2804 return data;
2805 }
2806
CreateOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2807 void FuseDaemon::CreateOwnerIdRelation(const std::string& ownerId,
2808 const std::string& ownerPackageIdentifier) {
2809 fuse->level_db_mutex.lock();
2810 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2811 fuse->level_db_mutex.unlock();
2812 LOG(ERROR) << "CreateOwnerIdRelation: Missing leveldb connection.";
2813 return;
2814 }
2815
2816 leveldb::Status status1, status2;
2817 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2818 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2819 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2820 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2821 if (!status1.ok() || !status2.ok()) {
2822 // If both inserts did not go through, remove both.
2823 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2824 ownerId);
2825 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2826 ownerPackageIdentifier);
2827 LOG(ERROR) << "Failure in leveldb insert for owner_id: " << ownerId
2828 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2829 }
2830 fuse->level_db_mutex.unlock();
2831 }
2832
RemoveOwnerIdRelation(const std::string & ownerId,const std::string & ownerPackageIdentifier)2833 void FuseDaemon::RemoveOwnerIdRelation(const std::string& ownerId,
2834 const std::string& ownerPackageIdentifier) {
2835 fuse->level_db_mutex.lock();
2836 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2837 fuse->level_db_mutex.unlock();
2838 LOG(ERROR) << "RemoveOwnerIdRelation: Missing leveldb connection.";
2839 return;
2840 }
2841
2842 leveldb::Status status1, status2;
2843 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2844 ownerId);
2845 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Delete(leveldb::WriteOptions(),
2846 ownerPackageIdentifier);
2847 if (status1.ok() && status2.ok()) {
2848 LOG(INFO) << "Successfully deleted rows in leveldb for owner_id: " << ownerId
2849 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2850 } else {
2851 // If both deletes did not go through, revert both.
2852 status1 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2853 leveldb::WriteOptions(), ownerId, ownerPackageIdentifier);
2854 status2 = fuse->level_db_connection_map[OWNERSHIP_RELATION]->Put(
2855 leveldb::WriteOptions(), ownerPackageIdentifier, ownerId);
2856 LOG(ERROR) << "Failure in leveldb delete for owner_id: " << ownerId
2857 << " and ownerPackageIdentifier: " << ownerPackageIdentifier;
2858 }
2859 fuse->level_db_mutex.unlock();
2860 }
2861
GetOwnerRelationship()2862 std::map<std::string, std::string> FuseDaemon::GetOwnerRelationship() {
2863 fuse->level_db_mutex.lock();
2864 std::map<std::string, std::string> resultMap;
2865 if (!CheckLevelDbConnection(OWNERSHIP_RELATION)) {
2866 fuse->level_db_mutex.unlock();
2867 LOG(ERROR) << "GetOwnerRelationship: Missing leveldb connection.";
2868 return resultMap;
2869 }
2870
2871 leveldb::Status status;
2872 // Get the key-value pairs from the database.
2873 leveldb::Iterator* it =
2874 fuse->level_db_connection_map[OWNERSHIP_RELATION]->NewIterator(leveldb::ReadOptions());
2875 for (it->SeekToFirst(); it->Valid(); it->Next()) {
2876 std::string key = it->key().ToString();
2877 std::string value = it->value().ToString();
2878 resultMap.insert(std::pair<std::string, std::string>(key, value));
2879 }
2880
2881 fuse->level_db_mutex.unlock();
2882 return resultMap;
2883 }
2884
CheckLevelDbConnection(const std::string & instance_name)2885 bool FuseDaemon::CheckLevelDbConnection(const std::string& instance_name) {
2886 if (fuse->level_db_connection_map.find(instance_name) == fuse->level_db_connection_map.end()) {
2887 LOG(ERROR) << "Leveldb setup is missing for: " << instance_name;
2888 return false;
2889 }
2890 return true;
2891 }
2892
2893 } //namespace fuse
2894 } // namespace mediaprovider
2895