1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "berberis/tiny_loader/tiny_loader.h"
18
19 #include <elf.h>
20 #include <fcntl.h>
21 #include <inttypes.h>
22 #include <sys/param.h>
23 #include <sys/stat.h>
24 #include <sys/user.h>
25 #include <unistd.h>
26
27 #include <cstddef>
28 #include <tuple>
29
30 #include "berberis/base/bit_util.h"
31 #include "berberis/base/checks.h"
32 #include "berberis/base/mapped_file_fragment.h"
33 #include "berberis/base/page_size.h"
34 #include "berberis/base/prctl_helpers.h"
35 #include "berberis/base/scoped_fd.h"
36 #include "berberis/base/stringprintf.h"
37
38 #define MAYBE_MAP_FLAG(x, from, to) (((x) & (from)) ? (to) : 0)
39 #define PFLAGS_TO_PROT(x) \
40 (MAYBE_MAP_FLAG((x), PF_X, PROT_EXEC) | MAYBE_MAP_FLAG((x), PF_R, PROT_READ) | \
41 MAYBE_MAP_FLAG((x), PF_W, PROT_WRITE))
42
43 namespace {
44
set_error_msg(std::string * error_msg,const char * format,...)45 void set_error_msg(std::string* error_msg, const char* format, ...) {
46 if (error_msg == nullptr) {
47 return;
48 }
49
50 va_list ap;
51 va_start(ap, format);
52 berberis::StringAppendV(error_msg, format, ap);
53 va_end(ap);
54 }
55
56 template <typename T>
page_align_down(T addr)57 constexpr T page_align_down(T addr) {
58 return berberis::AlignDown(addr, berberis::kPageSize);
59 }
60
61 template <typename T>
page_align_up(T addr)62 constexpr T page_align_up(T addr) {
63 return berberis::AlignUp(addr, berberis::kPageSize);
64 }
65
66 template <typename T>
page_offset(T addr)67 constexpr T page_offset(T addr) {
68 return addr - page_align_down(addr);
69 }
70
EiClassString(int elf_class)71 const char* EiClassString(int elf_class) {
72 switch (elf_class) {
73 case ELFCLASSNONE:
74 return "ELFCLASSNONE";
75 case ELFCLASS32:
76 return "ELFCLASS32";
77 case ELFCLASS64:
78 return "ELFCLASS64";
79 default:
80 return "(unknown)";
81 }
82 }
83
84 // Returns the size of the extent of all the possibly non-contiguous
85 // loadable segments in an ELF program header table. This corresponds
86 // to the page-aligned size in bytes that needs to be reserved in the
87 // process' address space. If there are no loadable segments, 0 is
88 // returned.
89 //
90 // If out_min_vaddr or out_max_vaddr are not null, they will be
91 // set to the minimum and maximum addresses of pages to be reserved,
92 // or 0 if there is nothing to load.
phdr_table_get_load_size(const ElfPhdr * phdr_table,size_t phdr_count,ElfAddr * out_min_vaddr)93 size_t phdr_table_get_load_size(const ElfPhdr* phdr_table, size_t phdr_count,
94 ElfAddr* out_min_vaddr) {
95 ElfAddr min_vaddr = UINTPTR_MAX;
96 ElfAddr max_vaddr = 0;
97
98 bool found_pt_load = false;
99 for (size_t i = 0; i < phdr_count; ++i) {
100 const ElfPhdr* phdr = &phdr_table[i];
101
102 if (phdr->p_type != PT_LOAD) {
103 continue;
104 }
105 found_pt_load = true;
106
107 if (phdr->p_vaddr < min_vaddr) {
108 min_vaddr = phdr->p_vaddr;
109 }
110
111 if (phdr->p_vaddr + phdr->p_memsz > max_vaddr) {
112 max_vaddr = phdr->p_vaddr + phdr->p_memsz;
113 }
114 }
115 if (!found_pt_load) {
116 min_vaddr = 0;
117 }
118
119 min_vaddr = page_align_down(min_vaddr);
120 max_vaddr = page_align_up(max_vaddr);
121
122 if (out_min_vaddr != nullptr) {
123 *out_min_vaddr = min_vaddr;
124 }
125 return max_vaddr - min_vaddr;
126 }
127
128 class TinyElfLoader {
129 public:
130 explicit TinyElfLoader(const char* name);
131
132 std::tuple<bool, size_t> CalculateLoadSize(const char* path);
133
134 bool LoadFromFile(const char* path,
135 size_t align,
136 TinyLoader::mmap64_fn_t mmap64_fn,
137 TinyLoader::munmap_fn_t munmap_fn,
138 LoadedElfFile* loaded_elf_file);
139
140 bool LoadFromMemory(void* load_addr, size_t load_size, LoadedElfFile* loaded_elf_file);
141
error_msg() const142 const std::string& error_msg() const { return error_msg_; }
143
144 private:
145 // Returns success, fd and file_size.
146 std::tuple<bool, int, size_t> OpenFile(const char* path);
147 bool CheckElfHeader(const ElfEhdr* header);
148 bool ReadElfHeader(int fd, ElfEhdr* header);
149 bool ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
150 const ElfPhdr** phdr_table, size_t* phdr_num);
151
152 bool ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr, size_t load_size,
153 const ElfPhdr** phdr_table, size_t* phdr_num);
154
155 bool ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
156 TinyLoader::mmap64_fn_t mmap64_fn, TinyLoader::munmap_fn_t munmap_fn,
157 void** load_start, size_t* load_size, uintptr_t* load_bias);
158
159 bool LoadSegments(int fd, size_t file_size, ElfHalf e_type, const ElfPhdr* phdr_table,
160 size_t phdr_num, size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
161 TinyLoader::munmap_fn_t munmap_fn, void** load_start, size_t* load_size);
162
163 bool FindDynamicSegment(const ElfEhdr* header);
164 bool InitializeFields(const ElfEhdr* header);
165
166 bool Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file);
167
168 static bool CheckFileRange(off64_t file_size, ElfAddr offset, size_t size, size_t alignment);
169 static bool CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset, size_t size,
170 size_t alignment);
171 uint8_t* Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn);
172
173 bool did_load_;
174
175 const char* name_;
176
177 MappedFileFragment phdr_fragment_;
178
179 // Loaded phdr
180 const ElfPhdr* loaded_phdr_;
181 size_t loaded_phdr_num_;
182
183 ElfAddr load_bias_;
184
185 void* entry_point_;
186
187 // Loaded dynamic section
188 const ElfDyn* dynamic_;
189
190 // Fields needed for symbol lookup
191 bool has_gnu_hash_;
192 size_t gnu_nbucket_;
193 uint32_t* gnu_bucket_;
194 uint32_t* gnu_chain_;
195 uint32_t gnu_maskwords_;
196 uint32_t gnu_shift2_;
197 ElfAddr* gnu_bloom_filter_;
198
199 uint32_t sysv_nbucket_;
200 uint32_t sysv_nchain_;
201 uint32_t* sysv_bucket_;
202 uint32_t* sysv_chain_;
203
204 ElfSym* symtab_;
205
206 const char* strtab_;
207 size_t strtab_size_;
208
209 std::string error_msg_;
210 };
211
TinyElfLoader(const char * name)212 TinyElfLoader::TinyElfLoader(const char* name)
213 : did_load_(false),
214 name_(name),
215 loaded_phdr_(nullptr),
216 loaded_phdr_num_(0),
217 load_bias_(0),
218 entry_point_(nullptr),
219 dynamic_(nullptr),
220 has_gnu_hash_(false),
221 gnu_nbucket_(0),
222 gnu_bucket_(nullptr),
223 gnu_chain_(nullptr),
224 gnu_maskwords_(0),
225 gnu_shift2_(0),
226 gnu_bloom_filter_(nullptr),
227 sysv_nbucket_(0),
228 sysv_nchain_(0),
229 sysv_bucket_(nullptr),
230 sysv_chain_(nullptr),
231 symtab_(nullptr),
232 strtab_(nullptr),
233 strtab_size_(0) {}
234
CheckElfHeader(const ElfEhdr * header)235 bool TinyElfLoader::CheckElfHeader(const ElfEhdr* header) {
236 if (memcmp(header->e_ident, ELFMAG, SELFMAG) != 0) {
237 set_error_msg(&error_msg_, "\"%s\" has bad ELF magic", name_);
238 return false;
239 }
240
241 int elf_class = header->e_ident[EI_CLASS];
242 if (elf_class != kSupportedElfClass) {
243 set_error_msg(&error_msg_, "\"%s\" %s is not supported, expected %s.", name_,
244 EiClassString(elf_class), EiClassString(kSupportedElfClass));
245 return false;
246 }
247
248 if (header->e_ident[EI_DATA] != ELFDATA2LSB) {
249 set_error_msg(&error_msg_, "\"%s\" not little-endian: %d", name_, header->e_ident[EI_DATA]);
250 return false;
251 }
252
253 if (header->e_version != EV_CURRENT) {
254 set_error_msg(&error_msg_, "\"%s\" has unexpected e_version: %d", name_, header->e_version);
255 return false;
256 }
257
258 if (header->e_shentsize != sizeof(ElfShdr)) {
259 set_error_msg(&error_msg_, "\"%s\" has unsupported e_shentsize: 0x%x (expected 0x%zx)", name_,
260 header->e_shentsize, sizeof(ElfShdr));
261 return false;
262 }
263
264 if (header->e_shstrndx == 0) {
265 set_error_msg(&error_msg_, "\"%s\" has invalid e_shstrndx", name_);
266 return false;
267 }
268
269 // Like the kernel, we only accept program header tables that
270 // are smaller than 64KiB.
271 if (header->e_phnum < 1 || header->e_phnum > 65536 / sizeof(ElfPhdr)) {
272 set_error_msg(&error_msg_, "\"%s\" has invalid e_phnum: %zd", name_, header->e_phnum);
273 return false;
274 }
275
276 return true;
277 }
278
ReadElfHeader(int fd,ElfEhdr * header)279 bool TinyElfLoader::ReadElfHeader(int fd, ElfEhdr* header) {
280 ssize_t rc = TEMP_FAILURE_RETRY(pread64(fd, header, sizeof(*header), 0));
281 if (rc < 0) {
282 set_error_msg(&error_msg_, "can't read file \"%s\": %s", name_, strerror(errno));
283 return false;
284 }
285
286 if (rc != sizeof(*header)) {
287 set_error_msg(&error_msg_, "\"%s\" is too small to be an ELF executable: only found %zd bytes",
288 name_, static_cast<size_t>(rc));
289 return false;
290 }
291
292 return CheckElfHeader(header);
293 }
294
CheckFileRange(off64_t file_size,ElfAddr offset,size_t size,size_t alignment)295 bool TinyElfLoader::CheckFileRange(off64_t file_size, ElfAddr offset, size_t size,
296 size_t alignment) {
297 off64_t range_start = offset;
298 off64_t range_end;
299
300 return offset > 0 && !__builtin_add_overflow(range_start, size, &range_end) &&
301 (range_start < file_size) && (range_end <= file_size) && ((offset % alignment) == 0);
302 }
303
CheckMemoryRange(uintptr_t load_addr,size_t load_size,ElfAddr offset,size_t size,size_t alignment)304 bool TinyElfLoader::CheckMemoryRange(uintptr_t load_addr, size_t load_size, ElfAddr offset,
305 size_t size, size_t alignment) {
306 uintptr_t dummy;
307 uintptr_t offset_end;
308
309 return offset < load_size && !__builtin_add_overflow(load_addr, load_size, &dummy) &&
310 !__builtin_add_overflow(offset, size, &offset_end) && offset_end <= load_size &&
311 ((offset % alignment) == 0);
312 }
313
ReadProgramHeadersFromFile(const ElfEhdr * header,int fd,off64_t file_size,const ElfPhdr ** phdr_table,size_t * phdr_num)314 bool TinyElfLoader::ReadProgramHeadersFromFile(const ElfEhdr* header, int fd, off64_t file_size,
315 const ElfPhdr** phdr_table, size_t* phdr_num) {
316 size_t phnum = header->e_phnum;
317 size_t size = phnum * sizeof(ElfPhdr);
318
319 if (!CheckFileRange(file_size, header->e_phoff, size, alignof(ElfPhdr))) {
320 set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
321 static_cast<size_t>(header->e_phoff), size);
322 return false;
323 }
324
325 if (!phdr_fragment_.Map(fd, 0, header->e_phoff, size)) {
326 set_error_msg(&error_msg_, "\"%s\" phdr mmap failed: %s", name_, strerror(errno));
327 return false;
328 }
329
330 *phdr_table = static_cast<ElfPhdr*>(phdr_fragment_.data());
331 *phdr_num = phnum;
332 return true;
333 }
334
ReadProgramHeadersFromMemory(const ElfEhdr * header,uintptr_t load_addr,size_t load_size,const ElfPhdr ** phdr_table,size_t * phdr_num)335 bool TinyElfLoader::ReadProgramHeadersFromMemory(const ElfEhdr* header, uintptr_t load_addr,
336 size_t load_size, const ElfPhdr** phdr_table,
337 size_t* phdr_num) {
338 size_t phnum = header->e_phnum;
339 size_t size = phnum * sizeof(ElfPhdr);
340
341 if (!CheckMemoryRange(load_addr, load_size, header->e_phoff, size, alignof(ElfPhdr))) {
342 set_error_msg(&error_msg_, "\"%s\" has invalid phdr offset/size: %zu/%zu", name_,
343 static_cast<size_t>(header->e_phoff), size);
344 return false;
345 }
346
347 *phdr_table = reinterpret_cast<const ElfPhdr*>(load_addr + header->e_phoff);
348 *phdr_num = phnum;
349 return true;
350 }
351
Reserve(void * hint,size_t size,TinyLoader::mmap64_fn_t mmap64_fn)352 uint8_t* TinyElfLoader::Reserve(void* hint, size_t size, TinyLoader::mmap64_fn_t mmap64_fn) {
353 int mmap_flags = MAP_PRIVATE | MAP_ANONYMOUS;
354
355 void* mmap_ptr = mmap64_fn(hint, size, PROT_NONE, mmap_flags, -1, 0);
356 if (mmap_ptr == MAP_FAILED) {
357 return nullptr;
358 }
359
360 return reinterpret_cast<uint8_t*>(mmap_ptr);
361 }
362
ReserveAddressSpace(ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size,uintptr_t * load_bias)363 bool TinyElfLoader::ReserveAddressSpace(ElfHalf e_type, const ElfPhdr* phdr_table, size_t phdr_num,
364 size_t align, TinyLoader::mmap64_fn_t mmap64_fn,
365 TinyLoader::munmap_fn_t munmap_fn, void** load_start,
366 size_t* load_size, uintptr_t* load_bias) {
367 ElfAddr min_vaddr;
368 size_t size = phdr_table_get_load_size(phdr_table, phdr_num, &min_vaddr);
369 if (size == 0) {
370 set_error_msg(&error_msg_, "\"%s\" has no loadable segments", name_);
371 return false;
372 }
373
374 uint8_t* addr = reinterpret_cast<uint8_t*>(min_vaddr);
375 uint8_t* start;
376
377 if (e_type == ET_EXEC) {
378 // Reserve with hint.
379 start = Reserve(addr, size, mmap64_fn);
380 if (start != addr) {
381 if (start != nullptr) {
382 munmap_fn(start, size);
383 }
384 set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space at %p for \"%s\"",
385 size, addr, name_);
386
387 return false;
388 }
389 } else if (align <= berberis::kPageSize) {
390 // Reserve.
391 start = Reserve(nullptr, size, mmap64_fn);
392 if (start == nullptr) {
393 set_error_msg(&error_msg_, "couldn't reserve %zd bytes of address space for \"%s\"", size,
394 name_);
395 return false;
396 }
397 } else {
398 // Reserve overaligned.
399 CHECK(berberis::IsPowerOf2(align));
400 uint8_t* unaligned_start = Reserve(nullptr, align + size, mmap64_fn);
401 if (unaligned_start == nullptr) {
402 set_error_msg(&error_msg_,
403 "couldn't reserve %zd bytes of address space aligned on %zd for \"%s\"", size,
404 align, name_);
405 return false;
406 }
407 start = berberis::AlignUp(unaligned_start, align);
408 munmap_fn(unaligned_start, start - unaligned_start);
409 munmap_fn(start + size, unaligned_start + align - start);
410 }
411
412 *load_start = start;
413 *load_size = size;
414 *load_bias = start - addr;
415 return true;
416 }
417
LoadSegments(int fd,size_t file_size,ElfHalf e_type,const ElfPhdr * phdr_table,size_t phdr_num,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,void ** load_start,size_t * load_size)418 bool TinyElfLoader::LoadSegments(int fd, size_t file_size, ElfHalf e_type,
419 const ElfPhdr* phdr_table, size_t phdr_num, size_t align,
420 TinyLoader::mmap64_fn_t mmap64_fn,
421 TinyLoader::munmap_fn_t munmap_fn, void** load_start,
422 size_t* load_size) {
423 uintptr_t load_bias = 0;
424 if (!ReserveAddressSpace(e_type, phdr_table, phdr_num, align, mmap64_fn, munmap_fn, load_start,
425 load_size, &load_bias)) {
426 return false;
427 }
428
429 for (size_t i = 0; i < phdr_num; ++i) {
430 const ElfPhdr* phdr = &phdr_table[i];
431
432 if (phdr->p_type != PT_LOAD) {
433 continue;
434 }
435
436 // Segment addresses in memory.
437 ElfAddr seg_start = phdr->p_vaddr + load_bias;
438 ElfAddr seg_end = seg_start + phdr->p_memsz;
439
440 ElfAddr seg_page_start = page_align_down(seg_start);
441 ElfAddr seg_page_end = page_align_up(seg_end);
442
443 ElfAddr seg_file_end = seg_start + phdr->p_filesz;
444
445 // File offsets.
446 ElfAddr file_start = phdr->p_offset;
447 ElfAddr file_end = file_start + phdr->p_filesz;
448
449 ElfAddr file_page_start = page_align_down(file_start);
450 ElfAddr file_length = file_end - file_page_start;
451
452 if (file_size <= 0) {
453 set_error_msg(&error_msg_, "\"%s\" invalid file size: %" PRId64, name_, file_size);
454 return false;
455 }
456
457 if (file_end > static_cast<size_t>(file_size)) {
458 set_error_msg(&error_msg_,
459 "invalid ELF file \"%s\" load segment[%zd]:"
460 " p_offset (%p) + p_filesz (%p) ( = %p) past end of file (0x%" PRIx64 ")",
461 name_, i, reinterpret_cast<void*>(phdr->p_offset),
462 reinterpret_cast<void*>(phdr->p_filesz), reinterpret_cast<void*>(file_end),
463 file_size);
464 return false;
465 }
466
467 if (file_length != 0) {
468 int prot = PFLAGS_TO_PROT(phdr->p_flags);
469 if ((prot & (PROT_EXEC | PROT_WRITE)) == (PROT_EXEC | PROT_WRITE)) {
470 set_error_msg(&error_msg_, "\"%s\": W + E load segments are not allowed", name_);
471 return false;
472 }
473
474 void* seg_addr = mmap64_fn(reinterpret_cast<void*>(seg_page_start), file_length, prot,
475 MAP_FIXED | MAP_PRIVATE, fd, file_page_start);
476 if (seg_addr == MAP_FAILED) {
477 set_error_msg(&error_msg_, "couldn't map \"%s\" segment %zd: %s", name_, i,
478 strerror(errno));
479 return false;
480 }
481 }
482
483 // if the segment is writable, and does not end on a page boundary,
484 // zero-fill it until the page limit.
485 if ((phdr->p_flags & PF_W) != 0 && page_offset(seg_file_end) > 0) {
486 memset(reinterpret_cast<void*>(seg_file_end),
487 0,
488 berberis::kPageSize - page_offset(seg_file_end));
489 }
490
491 seg_file_end = page_align_up(seg_file_end);
492
493 // seg_file_end is now the first page address after the file
494 // content. If seg_end is larger, we need to zero anything
495 // between them. This is done by using a private anonymous
496 // map for all extra pages.
497 if (seg_page_end > seg_file_end) {
498 size_t zeromap_size = seg_page_end - seg_file_end;
499 void* zeromap =
500 mmap64_fn(reinterpret_cast<void*>(seg_file_end), zeromap_size,
501 PFLAGS_TO_PROT(phdr->p_flags), MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
502 if (zeromap == MAP_FAILED) {
503 set_error_msg(&error_msg_, "couldn't zero fill \"%s\" gap: %s", name_, strerror(errno));
504 return false;
505 }
506
507 berberis::SetVmaAnonName(zeromap, zeromap_size, ".bss");
508 }
509 }
510
511 return true;
512 }
513
FindDynamicSegment(const ElfEhdr * header)514 bool TinyElfLoader::FindDynamicSegment(const ElfEhdr* header) {
515 // Static executables do not have PT_DYNAMIC
516 if (header->e_type == ET_EXEC) {
517 return true;
518 }
519
520 for (size_t i = 0; i < loaded_phdr_num_; ++i) {
521 const ElfPhdr& phdr = loaded_phdr_[i];
522 if (phdr.p_type == PT_DYNAMIC) {
523 // TODO(dimitry): Check all addresses and sizes referencing loaded segments.
524 dynamic_ = reinterpret_cast<ElfDyn*>(load_bias_ + phdr.p_vaddr);
525 return true;
526 }
527 }
528
529 set_error_msg(&error_msg_, "dynamic segment was not found in \"%s\"", name_);
530 return false;
531 }
532
InitializeFields(const ElfEhdr * header)533 bool TinyElfLoader::InitializeFields(const ElfEhdr* header) {
534 if (header->e_entry != 0) {
535 entry_point_ = reinterpret_cast<void*>(load_bias_ + header->e_entry);
536 }
537
538 // There is nothing else to do for a static executable.
539 if (header->e_type == ET_EXEC) {
540 return true;
541 }
542
543 for (const ElfDyn* d = dynamic_; d->d_tag != DT_NULL; ++d) {
544 if (d->d_tag == DT_GNU_HASH) {
545 has_gnu_hash_ = true;
546 gnu_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
547 gnu_maskwords_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[2];
548 gnu_shift2_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[3];
549 gnu_bloom_filter_ = reinterpret_cast<ElfAddr*>(load_bias_ + d->d_un.d_ptr + 16);
550 gnu_bucket_ = reinterpret_cast<uint32_t*>(gnu_bloom_filter_ + gnu_maskwords_);
551 gnu_chain_ =
552 gnu_bucket_ + gnu_nbucket_ - reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
553
554 if (!powerof2(gnu_maskwords_)) {
555 set_error_msg(&error_msg_,
556 "invalid maskwords for gnu_hash = 0x%x, in \"%s\" expecting power of two",
557 gnu_maskwords_, name_);
558
559 return false;
560 }
561
562 --gnu_maskwords_;
563 } else if (d->d_tag == DT_HASH) {
564 sysv_nbucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[0];
565 sysv_nchain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr)[1];
566 sysv_bucket_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8);
567 sysv_chain_ = reinterpret_cast<uint32_t*>(load_bias_ + d->d_un.d_ptr + 8 + sysv_nbucket_ * 4);
568 } else if (d->d_tag == DT_SYMTAB) {
569 symtab_ = reinterpret_cast<ElfSym*>(load_bias_ + d->d_un.d_ptr);
570 } else if (d->d_tag == DT_STRTAB) {
571 strtab_ = reinterpret_cast<const char*>(load_bias_ + d->d_un.d_ptr);
572 } else if (d->d_tag == DT_STRSZ) {
573 strtab_size_ = d->d_un.d_val;
574 }
575 }
576
577 if (symtab_ == nullptr) {
578 set_error_msg(&error_msg_, "missing DT_SYMTAB in \"%s\"", name_);
579 return false;
580 }
581
582 if (strtab_ == nullptr) {
583 set_error_msg(&error_msg_, "missing DT_STRTAB in \"%s\"", name_);
584 return false;
585 }
586
587 if (strtab_size_ == 0) {
588 set_error_msg(&error_msg_, "missing or invalid (0) DT_STRSZ in \"%s\"", name_);
589 return false;
590 }
591
592 return true;
593 }
594
Parse(void * load_ptr,size_t load_size,LoadedElfFile * loaded_elf_file)595 bool TinyElfLoader::Parse(void* load_ptr, size_t load_size, LoadedElfFile* loaded_elf_file) {
596 uintptr_t load_addr = reinterpret_cast<uintptr_t>(load_ptr);
597 const ElfEhdr* header = reinterpret_cast<const ElfEhdr*>(load_addr);
598 if (!CheckElfHeader(header)) {
599 return false;
600 }
601
602 if (!ReadProgramHeadersFromMemory(header, load_addr, load_size, &loaded_phdr_,
603 &loaded_phdr_num_)) {
604 return false;
605 }
606
607 ElfAddr min_vaddr;
608 phdr_table_get_load_size(loaded_phdr_, loaded_phdr_num_, &min_vaddr);
609 load_bias_ = load_addr - min_vaddr;
610
611 if (!FindDynamicSegment(header) || !InitializeFields(header)) {
612 return false;
613 }
614
615 if (has_gnu_hash_) {
616 *loaded_elf_file = LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_,
617 loaded_phdr_, loaded_phdr_num_, dynamic_, gnu_nbucket_,
618 gnu_bucket_, gnu_chain_, gnu_maskwords_, gnu_shift2_,
619 gnu_bloom_filter_, symtab_, strtab_, strtab_size_);
620 } else {
621 *loaded_elf_file =
622 LoadedElfFile(header->e_type, load_ptr, load_bias_, entry_point_, loaded_phdr_,
623 loaded_phdr_num_, dynamic_, sysv_nbucket_, sysv_nchain_, sysv_bucket_,
624 sysv_chain_, symtab_, strtab_, strtab_size_);
625 }
626 return true;
627 }
628
629 // Returns success, fd and file_size.
OpenFile(const char * path)630 std::tuple<bool, int, size_t> TinyElfLoader::OpenFile(const char* path) {
631 int fd = TEMP_FAILURE_RETRY(open(path, O_RDONLY | O_CLOEXEC));
632 if (fd == -1) {
633 set_error_msg(&error_msg_, "unable to open the file \"%s\": %s", path, strerror(errno));
634 return {false, -1, 0};
635 }
636
637 struct stat file_stat;
638 if (TEMP_FAILURE_RETRY(fstat(fd, &file_stat)) != 0) {
639 set_error_msg(
640 &error_msg_, "unable to stat file for the library \"%s\": %s", path, strerror(errno));
641 close(fd);
642 return {false, -1, 0};
643 }
644
645 return {true, fd, file_stat.st_size};
646 }
647
CalculateLoadSize(const char * path)648 std::tuple<bool, size_t> TinyElfLoader::CalculateLoadSize(const char* path) {
649 auto [is_opened, fd, file_size] = OpenFile(path);
650 if (!is_opened) {
651 return {false, 0};
652 }
653
654 berberis::ScopedFd scoped_fd(fd);
655
656 ElfEhdr header;
657 const ElfPhdr* phdr_table = nullptr;
658 size_t phdr_num = 0;
659
660 if (!ReadElfHeader(fd, &header) ||
661 !ReadProgramHeadersFromFile(&header, fd, file_size, &phdr_table, &phdr_num)) {
662 return {false, 0};
663 }
664
665 ElfAddr min_vaddr;
666 size_t size = phdr_table_get_load_size(phdr_table, phdr_num, &min_vaddr);
667 if (size == 0) {
668 set_error_msg(&error_msg_, "\"%s\" has no loadable segments", name_);
669 return {false, 0};
670 }
671
672 return {true, size};
673 }
674
LoadFromFile(const char * path,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file)675 bool TinyElfLoader::LoadFromFile(const char* path,
676 size_t align,
677 TinyLoader::mmap64_fn_t mmap64_fn,
678 TinyLoader::munmap_fn_t munmap_fn,
679 LoadedElfFile* loaded_elf_file) {
680 CHECK(!did_load_);
681 void* load_addr = nullptr;
682 size_t load_size = 0;
683 ElfEhdr header;
684 const ElfPhdr* phdr_table = nullptr;
685 size_t phdr_num = 0;
686
687 auto [is_opened, fd, file_size] = OpenFile(path);
688 if (!is_opened) {
689 return false;
690 }
691
692 berberis::ScopedFd scoped_fd(fd);
693
694 did_load_ = ReadElfHeader(fd, &header) &&
695 ReadProgramHeadersFromFile(&header, fd, file_size, &phdr_table, &phdr_num) &&
696 LoadSegments(fd, file_size, header.e_type, phdr_table, phdr_num, align, mmap64_fn,
697 munmap_fn, &load_addr, &load_size) &&
698 Parse(load_addr, load_size, loaded_elf_file);
699
700 return did_load_;
701 }
702
LoadFromMemory(void * load_addr,size_t load_size,LoadedElfFile * loaded_elf_file)703 bool TinyElfLoader::LoadFromMemory(void* load_addr, size_t load_size,
704 LoadedElfFile* loaded_elf_file) {
705 CHECK(!did_load_);
706 did_load_ = Parse(load_addr, load_size, loaded_elf_file);
707 return did_load_;
708 }
709
710 } // namespace
711
LoadFromFile(const char * path,size_t align,TinyLoader::mmap64_fn_t mmap64_fn,TinyLoader::munmap_fn_t munmap_fn,LoadedElfFile * loaded_elf_file,std::string * error_msg)712 bool TinyLoader::LoadFromFile(const char* path,
713 size_t align,
714 TinyLoader::mmap64_fn_t mmap64_fn,
715 TinyLoader::munmap_fn_t munmap_fn,
716 LoadedElfFile* loaded_elf_file,
717 std::string* error_msg) {
718 TinyElfLoader loader(path);
719
720 if (!loader.LoadFromFile(path, align, mmap64_fn, munmap_fn, loaded_elf_file)) {
721 if (error_msg != nullptr) {
722 *error_msg = loader.error_msg();
723 }
724
725 return false;
726 }
727
728 return true;
729 }
730
LoadFromMemory(const char * path,void * address,size_t size,LoadedElfFile * loaded_elf_file,std::string * error_msg)731 bool TinyLoader::LoadFromMemory(const char* path, void* address, size_t size,
732 LoadedElfFile* loaded_elf_file, std::string* error_msg) {
733 TinyElfLoader loader(path);
734 if (!loader.LoadFromMemory(address, size, loaded_elf_file)) {
735 if (error_msg != nullptr) {
736 *error_msg = loader.error_msg();
737 }
738
739 return false;
740 }
741
742 return true;
743 }
744
CalculateLoadSize(const char * path,std::string * error_msg)745 size_t TinyLoader::CalculateLoadSize(const char* path, std::string* error_msg) {
746 TinyElfLoader loader(path);
747 auto [success, size] = loader.CalculateLoadSize(path);
748 if (success) {
749 return size;
750 }
751
752 if (error_msg != nullptr) {
753 *error_msg = loader.error_msg();
754 }
755
756 return 0;
757 }
758