1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <errno.h>
18 #include <fcntl.h>
19 #include <stdint.h>
20 #include <string.h>
21 #include <sys/mman.h>
22 #include <sys/ptrace.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <sys/uio.h>
26 #include <unistd.h>
27
28 #include <algorithm>
29 #include <memory>
30 #include <mutex>
31 #include <optional>
32 #include <string>
33
34 #include <android-base/unique_fd.h>
35
36 #include <unwindstack/Log.h>
37 #include <unwindstack/Memory.h>
38
39 #include "MemoryBuffer.h"
40 #include "MemoryCache.h"
41 #include "MemoryFileAtOffset.h"
42 #include "MemoryLocal.h"
43 #include "MemoryLocalUnsafe.h"
44 #include "MemoryOffline.h"
45 #include "MemoryOfflineBuffer.h"
46 #include "MemoryRange.h"
47 #include "MemoryRemote.h"
48
49 namespace unwindstack {
50
ProcessVmRead(pid_t pid,uint64_t remote_src,void * dst,size_t len)51 static size_t ProcessVmRead(pid_t pid, uint64_t remote_src, void* dst, size_t len) {
52
53 // Split up the remote read across page boundaries.
54 // From the manpage:
55 // A partial read/write may result if one of the remote_iov elements points to an invalid
56 // memory region in the remote process.
57 //
58 // Partial transfers apply at the granularity of iovec elements. These system calls won't
59 // perform a partial transfer that splits a single iovec element.
60 constexpr size_t kMaxIovecs = 64;
61 struct iovec src_iovs[kMaxIovecs];
62
63 uint64_t cur = remote_src;
64 size_t total_read = 0;
65 while (len > 0) {
66 struct iovec dst_iov = {
67 .iov_base = &reinterpret_cast<uint8_t*>(dst)[total_read], .iov_len = len,
68 };
69
70 size_t iovecs_used = 0;
71 while (len > 0) {
72 if (iovecs_used == kMaxIovecs) {
73 break;
74 }
75
76 // struct iovec uses void* for iov_base.
77 if (cur >= UINTPTR_MAX) {
78 errno = EFAULT;
79 return total_read;
80 }
81
82 src_iovs[iovecs_used].iov_base = reinterpret_cast<void*>(cur);
83
84 uintptr_t misalignment = cur & (getpagesize() - 1);
85 size_t iov_len = getpagesize() - misalignment;
86 iov_len = std::min(iov_len, len);
87
88 len -= iov_len;
89 if (__builtin_add_overflow(cur, iov_len, &cur)) {
90 errno = EFAULT;
91 return total_read;
92 }
93
94 src_iovs[iovecs_used].iov_len = iov_len;
95 ++iovecs_used;
96 }
97
98 ssize_t rc = process_vm_readv(pid, &dst_iov, 1, src_iovs, iovecs_used, 0);
99 if (rc == -1) {
100 return total_read;
101 }
102 total_read += rc;
103 }
104 return total_read;
105 }
106
PtraceReadLong(pid_t pid,uint64_t addr,long * value)107 static bool PtraceReadLong(pid_t pid, uint64_t addr, long* value) {
108 // ptrace() returns -1 and sets errno when the operation fails.
109 // To disambiguate -1 from a valid result, we clear errno beforehand.
110 errno = 0;
111 *value = ptrace(PTRACE_PEEKTEXT, pid, reinterpret_cast<void*>(addr), nullptr);
112 if (*value == -1 && errno) {
113 return false;
114 }
115 return true;
116 }
117
PtraceRead(pid_t pid,uint64_t addr,void * dst,size_t bytes)118 static size_t PtraceRead(pid_t pid, uint64_t addr, void* dst, size_t bytes) {
119 // Make sure that there is no overflow.
120 uint64_t max_size;
121 if (__builtin_add_overflow(addr, bytes, &max_size)) {
122 return 0;
123 }
124
125 size_t bytes_read = 0;
126 long data;
127 size_t align_bytes = addr & (sizeof(long) - 1);
128 if (align_bytes != 0) {
129 if (!PtraceReadLong(pid, addr & ~(sizeof(long) - 1), &data)) {
130 return 0;
131 }
132 size_t copy_bytes = std::min(sizeof(long) - align_bytes, bytes);
133 memcpy(dst, reinterpret_cast<uint8_t*>(&data) + align_bytes, copy_bytes);
134 addr += copy_bytes;
135 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + copy_bytes);
136 bytes -= copy_bytes;
137 bytes_read += copy_bytes;
138 }
139
140 for (size_t i = 0; i < bytes / sizeof(long); i++) {
141 if (!PtraceReadLong(pid, addr, &data)) {
142 return bytes_read;
143 }
144 memcpy(dst, &data, sizeof(long));
145 dst = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(dst) + sizeof(long));
146 addr += sizeof(long);
147 bytes_read += sizeof(long);
148 }
149
150 size_t left_over = bytes & (sizeof(long) - 1);
151 if (left_over) {
152 if (!PtraceReadLong(pid, addr, &data)) {
153 return bytes_read;
154 }
155 memcpy(dst, &data, left_over);
156 bytes_read += left_over;
157 }
158 return bytes_read;
159 }
160
ReadFully(uint64_t addr,void * dst,size_t size)161 bool Memory::ReadFully(uint64_t addr, void* dst, size_t size) {
162 size_t rc = Read(addr, dst, size);
163 return rc == size;
164 }
165
ReadString(uint64_t addr,std::string * dst,size_t max_read)166 bool Memory::ReadString(uint64_t addr, std::string* dst, size_t max_read) {
167 char buffer[256]; // Large enough for 99% of symbol names.
168 size_t size = 0; // Number of bytes which were read into the buffer.
169 for (size_t offset = 0; offset < max_read; offset += size) {
170 // Look for null-terminator first, so we can allocate string of exact size.
171 // If we know the end of valid memory range, do the reads in larger blocks.
172 size_t read = std::min(sizeof(buffer), max_read - offset);
173 size = Read(addr + offset, buffer, read);
174 if (size == 0) {
175 return false; // We have not found end of string yet and we can not read more data.
176 }
177 size_t length = strnlen(buffer, size); // Index of the null-terminator.
178 if (length < size) {
179 // We found the null-terminator. Allocate the string and set its content.
180 if (offset == 0) {
181 // We did just single read, so the buffer already contains the whole string.
182 dst->assign(buffer, length);
183 return true;
184 } else {
185 // The buffer contains only the last block. Read the whole string again.
186 dst->assign(offset + length, '\0');
187 return ReadFully(addr, dst->data(), dst->size());
188 }
189 }
190 }
191 return false;
192 }
193
CreateFileMemory(const std::string & path,uint64_t offset,uint64_t size)194 std::shared_ptr<Memory> Memory::CreateFileMemory(const std::string& path, uint64_t offset,
195 uint64_t size) {
196 auto memory = std::make_shared<MemoryFileAtOffset>();
197
198 if (memory->Init(path, offset, size)) {
199 return memory;
200 }
201
202 return nullptr;
203 }
204
CreateProcessMemoryLocalUnsafe()205 std::shared_ptr<Memory> Memory::CreateProcessMemoryLocalUnsafe() {
206 return std::shared_ptr<Memory>(new MemoryLocalUnsafe());
207 }
208
CreateProcessMemory(pid_t pid)209 std::shared_ptr<Memory> Memory::CreateProcessMemory(pid_t pid) {
210 if (pid == getpid()) {
211 return std::shared_ptr<Memory>(new MemoryLocal());
212 }
213 return std::shared_ptr<Memory>(new MemoryRemote(pid));
214 }
215
CreateProcessMemoryCached(pid_t pid)216 std::shared_ptr<Memory> Memory::CreateProcessMemoryCached(pid_t pid) {
217 if (pid == getpid()) {
218 return std::shared_ptr<Memory>(new MemoryCache(new MemoryLocal()));
219 }
220 return std::shared_ptr<Memory>(new MemoryCache(new MemoryRemote(pid)));
221 }
222
CreateProcessMemoryThreadCached(pid_t pid)223 std::shared_ptr<Memory> Memory::CreateProcessMemoryThreadCached(pid_t pid) {
224 if (pid == getpid()) {
225 return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryLocal()));
226 }
227 return std::shared_ptr<Memory>(new MemoryThreadCache(new MemoryRemote(pid)));
228 }
229
CreateOfflineMemory(const uint8_t * data,uint64_t start,uint64_t end)230 std::shared_ptr<Memory> Memory::CreateOfflineMemory(const uint8_t* data, uint64_t start,
231 uint64_t end) {
232 return std::shared_ptr<Memory>(new MemoryOfflineBuffer(data, start, end));
233 }
234
Read(uint64_t addr,void * dst,size_t size)235 size_t MemoryBuffer::Read(uint64_t addr, void* dst, size_t size) {
236 if (addr < offset_) {
237 return 0;
238 }
239 addr -= offset_;
240 size_t raw_size = raw_.size();
241 if (addr >= raw_size) {
242 return 0;
243 }
244
245 size_t bytes_left = raw_size - static_cast<size_t>(addr);
246 size_t actual_len = std::min(bytes_left, size);
247 memcpy(dst, &raw_[addr], actual_len);
248 return actual_len;
249 }
250
GetPtr(size_t addr)251 uint8_t* MemoryBuffer::GetPtr(size_t addr) {
252 if (addr < offset_) {
253 return nullptr;
254 }
255 addr -= offset_;
256 if (addr < raw_.size()) {
257 return &raw_[addr];
258 }
259 return nullptr;
260 }
261
~MemoryFileAtOffset()262 MemoryFileAtOffset::~MemoryFileAtOffset() {
263 Clear();
264 }
265
Clear()266 void MemoryFileAtOffset::Clear() {
267 if (data_) {
268 munmap(&data_[-offset_], size_ + offset_);
269 data_ = nullptr;
270 size_ = 0;
271 }
272 }
273
Init(const std::string & file,uint64_t offset,uint64_t size)274 bool MemoryFileAtOffset::Init(const std::string& file, uint64_t offset, uint64_t size) {
275 // Clear out any previous data if it exists.
276 Clear();
277
278 android::base::unique_fd fd(TEMP_FAILURE_RETRY(open(file.c_str(), O_RDONLY | O_CLOEXEC)));
279 if (fd == -1) {
280 return false;
281 }
282 struct stat buf;
283 if (fstat(fd, &buf) == -1) {
284 return false;
285 }
286 if (offset >= static_cast<uint64_t>(buf.st_size)) {
287 return false;
288 }
289
290 offset_ = offset & (getpagesize() - 1);
291 uint64_t aligned_offset = offset & ~(getpagesize() - 1);
292 if (aligned_offset > static_cast<uint64_t>(buf.st_size) ||
293 offset > static_cast<uint64_t>(buf.st_size)) {
294 return false;
295 }
296
297 size_ = buf.st_size - aligned_offset;
298 uint64_t max_size;
299 if (!__builtin_add_overflow(size, offset_, &max_size) && max_size < size_) {
300 // Truncate the mapped size.
301 size_ = max_size;
302 }
303 void* map = mmap(nullptr, size_, PROT_READ, MAP_PRIVATE, fd, aligned_offset);
304 if (map == MAP_FAILED) {
305 return false;
306 }
307
308 data_ = &reinterpret_cast<uint8_t*>(map)[offset_];
309 size_ -= offset_;
310
311 return true;
312 }
313
Read(uint64_t addr,void * dst,size_t size)314 size_t MemoryFileAtOffset::Read(uint64_t addr, void* dst, size_t size) {
315 if (addr >= size_) {
316 return 0;
317 }
318
319 size_t bytes_left = size_ - static_cast<size_t>(addr);
320 const unsigned char* actual_base = static_cast<const unsigned char*>(data_) + addr;
321 size_t actual_len = std::min(bytes_left, size);
322
323 memcpy(dst, actual_base, actual_len);
324 return actual_len;
325 }
326
Read(uint64_t addr,void * dst,size_t size)327 size_t MemoryRemote::Read(uint64_t addr, void* dst, size_t size) {
328 #if !defined(__LP64__)
329 // Cannot read an address greater than 32 bits in a 32 bit context.
330 if (addr > UINT32_MAX) {
331 return 0;
332 }
333 #endif
334
335 size_t (*read_func)(pid_t, uint64_t, void*, size_t) =
336 reinterpret_cast<size_t (*)(pid_t, uint64_t, void*, size_t)>(read_redirect_func_.load());
337 if (read_func != nullptr) {
338 return read_func(pid_, addr, dst, size);
339 } else {
340 // Prefer process_vm_read, try it first. If it doesn't work, use the
341 // ptrace function. If at least one of them returns at least some data,
342 // set that as the permanent function to use.
343 // This assumes that if process_vm_read works once, it will continue
344 // to work.
345 size_t bytes = ProcessVmRead(pid_, addr, dst, size);
346 if (bytes > 0) {
347 read_redirect_func_ = reinterpret_cast<uintptr_t>(ProcessVmRead);
348 return bytes;
349 }
350 bytes = PtraceRead(pid_, addr, dst, size);
351 if (bytes > 0) {
352 read_redirect_func_ = reinterpret_cast<uintptr_t>(PtraceRead);
353 }
354 return bytes;
355 }
356 }
357
Read(uint64_t addr,void * dst,size_t size)358 size_t MemoryLocal::Read(uint64_t addr, void* dst, size_t size) {
359 return ProcessVmRead(getpid(), addr, dst, size);
360 }
361
MemoryRange(const std::shared_ptr<Memory> & memory,uint64_t begin,uint64_t length,uint64_t offset)362 MemoryRange::MemoryRange(const std::shared_ptr<Memory>& memory, uint64_t begin, uint64_t length,
363 uint64_t offset)
364 : memory_(memory), begin_(begin), length_(length), offset_(offset) {}
365
Read(uint64_t addr,void * dst,size_t size)366 size_t MemoryRange::Read(uint64_t addr, void* dst, size_t size) {
367 if (addr < offset_) {
368 return 0;
369 }
370
371 uint64_t read_offset = addr - offset_;
372 if (read_offset >= length_) {
373 return 0;
374 }
375
376 uint64_t read_length = std::min(static_cast<uint64_t>(size), length_ - read_offset);
377 uint64_t read_addr;
378 if (__builtin_add_overflow(read_offset, begin_, &read_addr)) {
379 return 0;
380 }
381
382 return memory_->Read(read_addr, dst, read_length);
383 }
384
Insert(MemoryRange * memory)385 bool MemoryRanges::Insert(MemoryRange* memory) {
386 uint64_t last_addr;
387 if (__builtin_add_overflow(memory->offset(), memory->length(), &last_addr)) {
388 // This should never happen in the real world. However, it is possible
389 // that an offset in a mapped in segment could be crafted such that
390 // this value overflows. In that case, clamp the value to the max uint64
391 // value.
392 last_addr = UINT64_MAX;
393 }
394 auto entry = maps_.try_emplace(last_addr, memory);
395 if (entry.second) {
396 return true;
397 }
398 delete memory;
399 return false;
400 }
401
Read(uint64_t addr,void * dst,size_t size)402 size_t MemoryRanges::Read(uint64_t addr, void* dst, size_t size) {
403 auto entry = maps_.upper_bound(addr);
404 if (entry != maps_.end()) {
405 return entry->second->Read(addr, dst, size);
406 }
407 return 0;
408 }
409
Init(const std::string & file,uint64_t offset)410 bool MemoryOffline::Init(const std::string& file, uint64_t offset) {
411 auto memory_file = std::make_shared<MemoryFileAtOffset>();
412 if (!memory_file->Init(file, offset)) {
413 return false;
414 }
415
416 // The first uint64_t value is the start of memory.
417 uint64_t start;
418 if (!memory_file->ReadFully(0, &start, sizeof(start))) {
419 return false;
420 }
421
422 uint64_t size = memory_file->Size();
423 if (__builtin_sub_overflow(size, sizeof(start), &size)) {
424 return false;
425 }
426
427 memory_ = std::make_unique<MemoryRange>(memory_file, sizeof(start), size, start);
428 return true;
429 }
430
Init(const std::string & file,uint64_t offset,uint64_t start,uint64_t size)431 bool MemoryOffline::Init(const std::string& file, uint64_t offset, uint64_t start, uint64_t size) {
432 auto memory_file = std::make_shared<MemoryFileAtOffset>();
433 if (!memory_file->Init(file, offset)) {
434 return false;
435 }
436
437 memory_ = std::make_unique<MemoryRange>(memory_file, 0, size, start);
438 return true;
439 }
440
Read(uint64_t addr,void * dst,size_t size)441 size_t MemoryOffline::Read(uint64_t addr, void* dst, size_t size) {
442 if (!memory_) {
443 return 0;
444 }
445
446 return memory_->Read(addr, dst, size);
447 }
448
MemoryOfflineBuffer(const uint8_t * data,uint64_t start,uint64_t end)449 MemoryOfflineBuffer::MemoryOfflineBuffer(const uint8_t* data, uint64_t start, uint64_t end)
450 : data_(data), start_(start), end_(end) {}
451
Reset(const uint8_t * data,uint64_t start,uint64_t end)452 void MemoryOfflineBuffer::Reset(const uint8_t* data, uint64_t start, uint64_t end) {
453 data_ = data;
454 start_ = start;
455 end_ = end;
456 }
457
Read(uint64_t addr,void * dst,size_t size)458 size_t MemoryOfflineBuffer::Read(uint64_t addr, void* dst, size_t size) {
459 if (addr < start_ || addr >= end_) {
460 return 0;
461 }
462
463 size_t read_length = std::min(size, static_cast<size_t>(end_ - addr));
464 memcpy(dst, &data_[addr - start_], read_length);
465 return read_length;
466 }
467
~MemoryOfflineParts()468 MemoryOfflineParts::~MemoryOfflineParts() {
469 for (auto memory : memories_) {
470 delete memory;
471 }
472 }
473
Read(uint64_t addr,void * dst,size_t size)474 size_t MemoryOfflineParts::Read(uint64_t addr, void* dst, size_t size) {
475 if (memories_.empty()) {
476 return 0;
477 }
478
479 // Do a read on each memory object, no support for reading across the
480 // different memory objects.
481 for (MemoryOffline* memory : memories_) {
482 size_t bytes = memory->Read(addr, dst, size);
483 if (bytes != 0) {
484 return bytes;
485 }
486 }
487 return 0;
488 }
489
InternalCachedRead(uint64_t addr,void * dst,size_t size,CacheDataType * cache)490 size_t MemoryCacheBase::InternalCachedRead(uint64_t addr, void* dst, size_t size,
491 CacheDataType* cache) {
492 uint64_t addr_page = addr >> kCacheBits;
493 auto entry = cache->find(addr_page);
494 uint8_t* cache_dst;
495 if (entry != cache->end()) {
496 cache_dst = entry->second;
497 } else {
498 cache_dst = (*cache)[addr_page];
499 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
500 // Erase the entry.
501 cache->erase(addr_page);
502 return impl_->Read(addr, dst, size);
503 }
504 }
505 size_t max_read = ((addr_page + 1) << kCacheBits) - addr;
506 if (size <= max_read) {
507 memcpy(dst, &cache_dst[addr & kCacheMask], size);
508 return size;
509 }
510
511 // The read crossed into another cached entry, since a read can only cross
512 // into one extra cached page, duplicate the code rather than looping.
513 memcpy(dst, &cache_dst[addr & kCacheMask], max_read);
514 dst = &reinterpret_cast<uint8_t*>(dst)[max_read];
515 addr_page++;
516
517 entry = cache->find(addr_page);
518 if (entry != cache->end()) {
519 cache_dst = entry->second;
520 } else {
521 cache_dst = (*cache)[addr_page];
522 if (!impl_->ReadFully(addr_page << kCacheBits, cache_dst, kCacheSize)) {
523 // Erase the entry.
524 cache->erase(addr_page);
525 return impl_->Read(addr_page << kCacheBits, dst, size - max_read) + max_read;
526 }
527 }
528 memcpy(dst, cache_dst, size - max_read);
529 return size;
530 }
531
Clear()532 void MemoryCache::Clear() {
533 std::lock_guard<std::mutex> lock(cache_lock_);
534 cache_.clear();
535 }
536
CachedRead(uint64_t addr,void * dst,size_t size)537 size_t MemoryCache::CachedRead(uint64_t addr, void* dst, size_t size) {
538 // Use a single lock since this object is not designed to be performant
539 // for multiple object reading from multiple threads.
540 std::lock_guard<std::mutex> lock(cache_lock_);
541
542 return InternalCachedRead(addr, dst, size, &cache_);
543 }
544
MemoryThreadCache(Memory * memory)545 MemoryThreadCache::MemoryThreadCache(Memory* memory) : MemoryCacheBase(memory) {
546 thread_cache_ = std::make_optional<pthread_t>();
547 if (pthread_key_create(&*thread_cache_, [](void* memory) {
548 CacheDataType* cache = reinterpret_cast<CacheDataType*>(memory);
549 delete cache;
550 }) != 0) {
551 Log::AsyncSafe("Failed to create pthread key.");
552 thread_cache_.reset();
553 }
554 }
555
~MemoryThreadCache()556 MemoryThreadCache::~MemoryThreadCache() {
557 if (thread_cache_) {
558 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
559 delete cache;
560 pthread_key_delete(*thread_cache_);
561 }
562 }
563
CachedRead(uint64_t addr,void * dst,size_t size)564 size_t MemoryThreadCache::CachedRead(uint64_t addr, void* dst, size_t size) {
565 if (!thread_cache_) {
566 return impl_->Read(addr, dst, size);
567 }
568
569 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
570 if (cache == nullptr) {
571 cache = new CacheDataType;
572 pthread_setspecific(*thread_cache_, cache);
573 }
574
575 return InternalCachedRead(addr, dst, size, cache);
576 }
577
Clear()578 void MemoryThreadCache::Clear() {
579 if (!thread_cache_) {
580 return;
581 }
582
583 CacheDataType* cache = reinterpret_cast<CacheDataType*>(pthread_getspecific(*thread_cache_));
584 if (cache != nullptr) {
585 delete cache;
586 pthread_setspecific(*thread_cache_, nullptr);
587 }
588 }
589
Read(uint64_t addr,void * dst,size_t size)590 size_t MemoryLocalUnsafe::Read(uint64_t addr, void* dst, size_t size) {
591 void* raw_ptr = reinterpret_cast<void*>(addr);
592 memcpy(dst, raw_ptr, size);
593 return size;
594 }
595
596 } // namespace unwindstack
597