xref: /aosp_15_r20/external/flashrom/hwaccess_physmap.c (revision 0d6140be3aa665ecc836e8907834fcd3e3b018fc)
1 /*
2  * This file is part of the flashrom project.
3  *
4  * Copyright (C) 2009 Peter Stuge <[email protected]>
5  * Copyright (C) 2009 coresystems GmbH
6  * Copyright (C) 2010 Carl-Daniel Hailfinger
7  * Copyright (C) 2010 Rudolf Marek <[email protected]>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; version 2 of the License.
12  *
13  * This program is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16  * GNU General Public License for more details.
17  */
18 
19 #include <unistd.h>
20 #include <stdbool.h>
21 #include <stdint.h>
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <errno.h>
26 #include "flash.h"
27 #include "platform.h"
28 #include "hwaccess_physmap.h"
29 
30 #if !defined(__DJGPP__) && !defined(__LIBPAYLOAD__)
31 /* No file access needed/possible to get mmap access permissions or access MSR. */
32 #include <unistd.h>
33 #include <sys/stat.h>
34 #include <sys/types.h>
35 #include <fcntl.h>
36 #endif
37 
38 #ifdef __DJGPP__
39 #include <dpmi.h>
40 #include <malloc.h>
41 #include <sys/nearptr.h>
42 
43 #define ONE_MEGABYTE (1024 * 1024)
44 #define MEM_DEV "dpmi"
45 
46 static void *realmem_map_aligned;
47 
map_first_meg(uintptr_t phys_addr,size_t len)48 static void *map_first_meg(uintptr_t phys_addr, size_t len)
49 {
50 	void *realmem_map;
51 	size_t pagesize;
52 
53 	if (realmem_map_aligned)
54 		return realmem_map_aligned + phys_addr;
55 
56 	/* valloc() from DJGPP 2.05 does not work properly */
57 	pagesize = getpagesize();
58 
59 	realmem_map = malloc(ONE_MEGABYTE + pagesize);
60 
61 	if (!realmem_map)
62 		return ERROR_PTR;
63 
64 	realmem_map_aligned = (void *)(((size_t) realmem_map +
65 		(pagesize - 1)) & ~(pagesize - 1));
66 
67 	if (__djgpp_map_physical_memory(realmem_map_aligned, ONE_MEGABYTE, 0)) {
68 		free(realmem_map);
69 		realmem_map_aligned = NULL;
70 		return ERROR_PTR;
71 	}
72 
73 	return realmem_map_aligned + phys_addr;
74 }
75 
sys_physmap(uintptr_t phys_addr,size_t len)76 static void *sys_physmap(uintptr_t phys_addr, size_t len)
77 {
78 	int ret;
79 	__dpmi_meminfo mi;
80 
81 	/* Enable 4GB limit on DS descriptor. */
82 	if (!__djgpp_nearptr_enable())
83 		return ERROR_PTR;
84 
85 	if ((phys_addr + len - 1) < ONE_MEGABYTE) {
86 		/* We need to use another method to map first 1MB. */
87 		return map_first_meg(phys_addr, len);
88 	}
89 
90 	mi.address = phys_addr;
91 	mi.size = len;
92 	ret = __dpmi_physical_address_mapping(&mi);
93 
94 	if (ret != 0)
95 		return ERROR_PTR;
96 
97 	return (void *) mi.address + __djgpp_conventional_base;
98 }
99 
100 #define sys_physmap_rw_uncached	sys_physmap
101 #define sys_physmap_ro_cached	sys_physmap
102 
sys_physunmap_unaligned(void * virt_addr,size_t len)103 static void sys_physunmap_unaligned(void *virt_addr, size_t len)
104 {
105 	__dpmi_meminfo mi;
106 
107 	/* There is no known way to unmap the first 1 MB. The DPMI server will
108 	 * do this for us on exit.
109 	 */
110 	if ((virt_addr >= realmem_map_aligned) &&
111 	    ((virt_addr + len) <= (realmem_map_aligned + ONE_MEGABYTE))) {
112 		return;
113 	}
114 
115 	mi.address = (unsigned long) virt_addr;
116 	__dpmi_free_physical_address_mapping(&mi);
117 }
118 
119 #elif defined(__LIBPAYLOAD__)
120 #include <arch/virtual.h>
121 
122 #define MEM_DEV ""
123 
sys_physmap(uintptr_t phys_addr,size_t len)124 static void *sys_physmap(uintptr_t phys_addr, size_t len)
125 {
126 	return (void *)phys_to_virt(phys_addr);
127 }
128 
129 #define sys_physmap_rw_uncached	sys_physmap
130 #define sys_physmap_ro_cached	sys_physmap
131 
sys_physunmap_unaligned(void * virt_addr,size_t len)132 static void sys_physunmap_unaligned(void *virt_addr, size_t len)
133 {
134 }
135 #elif defined(__MACH__) && defined(__APPLE__)
136 #include <DirectHW/DirectHW.h>
137 
138 #define MEM_DEV "DirectHW"
139 
sys_physmap(uintptr_t phys_addr,size_t len)140 static void *sys_physmap(uintptr_t phys_addr, size_t len)
141 {
142 	/* The short form of ?: is a GNU extension.
143 	 * FIXME: map_physical returns NULL both for errors and for success
144 	 * if the region is mapped at virtual address zero. If in doubt, report
145 	 * an error until a better interface exists.
146 	 */
147 	return map_physical(phys_addr, len) ? : ERROR_PTR;
148 }
149 
150 /* The OS X driver does not differentiate between mapping types. */
151 #define sys_physmap_rw_uncached	sys_physmap
152 #define sys_physmap_ro_cached	sys_physmap
153 
sys_physunmap_unaligned(void * virt_addr,size_t len)154 static void sys_physunmap_unaligned(void *virt_addr, size_t len)
155 {
156 	unmap_physical(virt_addr, len);
157 }
158 
159 #else
160 #include <sys/mman.h>
161 
162 #if defined (__sun) && (defined(__i386) || defined(__amd64))
163 #  define MEM_DEV "/dev/xsvc"
164 #else
165 #  define MEM_DEV "/dev/mem"
166 #endif
167 
168 static int fd_mem = -1;
169 static int fd_mem_cached = -1;
170 
171 /* For MMIO access. Must be uncached, doesn't make sense to restrict to ro. */
sys_physmap_rw_uncached(uintptr_t phys_addr,size_t len)172 static void *sys_physmap_rw_uncached(uintptr_t phys_addr, size_t len)
173 {
174 	void *virt_addr;
175 
176 	if (-1 == fd_mem) {
177 		/* Open the memory device UNCACHED. Important for MMIO. */
178 		if (-1 == (fd_mem = open(MEM_DEV, O_RDWR | O_SYNC))) {
179 			msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
180 			return ERROR_PTR;
181 		}
182 	}
183 
184 	virt_addr = mmap(NULL, len, PROT_WRITE | PROT_READ, MAP_SHARED, fd_mem, (off_t)phys_addr);
185 	return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
186 }
187 
188 /* For reading DMI/coreboot/whatever tables. We should never write, and we
189  * do not care about caching.
190  */
sys_physmap_ro_cached(uintptr_t phys_addr,size_t len)191 static void *sys_physmap_ro_cached(uintptr_t phys_addr, size_t len)
192 {
193 	void *virt_addr;
194 
195 	if (-1 == fd_mem_cached) {
196 		/* Open the memory device CACHED. */
197 		if (-1 == (fd_mem_cached = open(MEM_DEV, O_RDWR))) {
198 			msg_perr("Critical error: open(" MEM_DEV "): %s\n", strerror(errno));
199 			return ERROR_PTR;
200 		}
201 	}
202 
203 	virt_addr = mmap(NULL, len, PROT_READ, MAP_SHARED, fd_mem_cached, (off_t)phys_addr);
204 	return MAP_FAILED == virt_addr ? ERROR_PTR : virt_addr;
205 }
206 
sys_physunmap_unaligned(void * virt_addr,size_t len)207 static void sys_physunmap_unaligned(void *virt_addr, size_t len)
208 {
209 	munmap(virt_addr, len);
210 }
211 #endif
212 
213 #define PHYSM_RW	0
214 #define PHYSM_RO	1
215 #define PHYSM_NOCLEANUP	0
216 #define PHYSM_CLEANUP	1
217 #define PHYSM_EXACT	0
218 #define PHYSM_ROUND	1
219 
220 /* Round start to nearest page boundary below and set len so that the resulting address range ends at the lowest
221  * possible page boundary where the original address range is still entirely contained. It returns the
222  * difference between the rounded start address and the original start address. */
round_to_page_boundaries(uintptr_t * start,size_t * len)223 static uintptr_t round_to_page_boundaries(uintptr_t *start, size_t *len)
224 {
225 	uintptr_t page_size = getpagesize();
226 	uintptr_t page_mask = ~(page_size-1);
227 	uintptr_t end = *start + *len;
228 	uintptr_t old_start = *start;
229 	msg_gspew("page_size=%" PRIxPTR "\n", page_size);
230 	msg_gspew("pre-rounding:  start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
231 		  PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, end);
232 	*start = *start & page_mask;
233 	end = (end + page_size - 1) & page_mask;
234 	*len = end - *start;
235 	msg_gspew("post-rounding: start=0x%0*" PRIxPTR ", len=0x%zx, end=0x%0*" PRIxPTR "\n",
236 		  PRIxPTR_WIDTH, *start, *len, PRIxPTR_WIDTH, *start + *len);
237 	return old_start - *start;
238 }
239 
240 struct undo_physmap_data {
241 	void *virt_addr;
242 	size_t len;
243 };
244 
undo_physmap(void * data)245 static int undo_physmap(void *data)
246 {
247 	if (data == NULL) {
248 		msg_perr("%s: tried to physunmap without valid data!\n", __func__);
249 		return 1;
250 	}
251 	struct undo_physmap_data *d = data;
252 	physunmap_unaligned(d->virt_addr, d->len);
253 	free(data);
254 	return 0;
255 }
256 
physmap_common(const char * descr,uintptr_t phys_addr,size_t len,bool readonly,bool autocleanup,bool round)257 static void *physmap_common(const char *descr, uintptr_t phys_addr, size_t len, bool readonly, bool autocleanup,
258 			    bool round)
259 {
260 	void *virt_addr;
261 	uintptr_t offset = 0;
262 
263 	if (len == 0) {
264 		msg_pspew("Not mapping %s, zero size at 0x%0*" PRIxPTR ".\n", descr, PRIxPTR_WIDTH, phys_addr);
265 		return ERROR_PTR;
266 	}
267 
268 	if (round)
269 		offset = round_to_page_boundaries(&phys_addr, &len);
270 
271 	if (readonly)
272 		virt_addr = sys_physmap_ro_cached(phys_addr, len);
273 	else
274 		virt_addr = sys_physmap_rw_uncached(phys_addr, len);
275 
276 	if (ERROR_PTR == virt_addr) {
277 		if (NULL == descr)
278 			descr = "memory";
279 		msg_perr("Error accessing %s, 0x%zx bytes at 0x%0*" PRIxPTR "\n",
280 			 descr, len, PRIxPTR_WIDTH, phys_addr);
281 		msg_perr(MEM_DEV " mmap failed: %s\n", strerror(errno));
282 #ifdef __linux__
283 		if (EINVAL == errno) {
284 			msg_perr("In Linux this error can be caused by the CONFIG_NONPROMISC_DEVMEM (<2.6.27),\n");
285 			msg_perr("CONFIG_STRICT_DEVMEM (>=2.6.27) and CONFIG_X86_PAT kernel options.\n");
286 			msg_perr("Please check if either is enabled in your kernel before reporting a failure.\n");
287 			msg_perr("You can override CONFIG_X86_PAT at boot with the nopat kernel parameter but\n");
288 			msg_perr("disabling the other option unfortunately requires a kernel recompile. Sorry!\n");
289 		}
290 #elif defined (__OpenBSD__)
291 		msg_perr("Please set securelevel=-1 in /etc/rc.securelevel "
292 			 "and reboot, or reboot into\n"
293 			 "single user mode.\n");
294 #endif
295 		return ERROR_PTR;
296 	}
297 
298 	if (autocleanup) {
299 		struct undo_physmap_data *d = malloc(sizeof(*d));
300 		if (d == NULL) {
301 			msg_perr("%s: Out of memory!\n", __func__);
302 			physunmap_unaligned(virt_addr, len);
303 			return ERROR_PTR;
304 		}
305 
306 		d->virt_addr = virt_addr;
307 		d->len = len;
308 		if (register_shutdown(undo_physmap, d) != 0) {
309 			msg_perr("%s: Could not register shutdown function!\n", __func__);
310 			physunmap_unaligned(virt_addr, len);
311 			return ERROR_PTR;
312 		}
313 	}
314 
315 	return virt_addr + offset;
316 }
317 
physunmap_unaligned(void * virt_addr,size_t len)318 void physunmap_unaligned(void *virt_addr, size_t len)
319 {
320 	/* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
321 	if (virt_addr == ERROR_PTR) {
322 		msg_perr("Trying to unmap a nonexisting mapping!\n"
323 			 "Please report a bug at [email protected]\n");
324 		return;
325 	}
326 
327 	sys_physunmap_unaligned(virt_addr, len);
328 }
329 
physunmap(void * virt_addr,size_t len)330 void physunmap(void *virt_addr, size_t len)
331 {
332 	uintptr_t tmp;
333 
334 	/* No need to check for zero size, such mappings would have yielded ERROR_PTR. */
335 	if (virt_addr == ERROR_PTR) {
336 		msg_perr("Trying to unmap a nonexisting mapping!\n"
337 			 "Please report a bug at [email protected]\n");
338 		return;
339 	}
340 	tmp = (uintptr_t)virt_addr;
341 	/* We assume that the virtual address of a page-aligned physical address is page-aligned as well. By
342 	 * extension, rounding a virtual unaligned address as returned by physmap should yield the same offset
343 	 * between rounded and original virtual address as between rounded and original physical address.
344 	 */
345 	round_to_page_boundaries(&tmp, &len);
346 	virt_addr = (void *)tmp;
347 	physunmap_unaligned(virt_addr, len);
348 }
349 
physmap(const char * descr,uintptr_t phys_addr,size_t len)350 void *physmap(const char *descr, uintptr_t phys_addr, size_t len)
351 {
352 	return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_NOCLEANUP, PHYSM_ROUND);
353 }
354 
rphysmap(const char * descr,uintptr_t phys_addr,size_t len)355 void *rphysmap(const char *descr, uintptr_t phys_addr, size_t len)
356 {
357 	return physmap_common(descr, phys_addr, len, PHYSM_RW, PHYSM_CLEANUP, PHYSM_ROUND);
358 }
359 
physmap_ro(const char * descr,uintptr_t phys_addr,size_t len)360 void *physmap_ro(const char *descr, uintptr_t phys_addr, size_t len)
361 {
362 	return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_ROUND);
363 }
364 
physmap_ro_unaligned(const char * descr,uintptr_t phys_addr,size_t len)365 void *physmap_ro_unaligned(const char *descr, uintptr_t phys_addr, size_t len)
366 {
367 	return physmap_common(descr, phys_addr, len, PHYSM_RO, PHYSM_NOCLEANUP, PHYSM_EXACT);
368 }
369 
370 /* Prevent reordering and/or merging of reads/writes to hardware.
371  * Such reordering and/or merging would break device accesses which depend on the exact access order.
372  */
sync_primitive(void)373 static inline void sync_primitive(void)
374 {
375 /* This is not needed for...
376  * - x86: uses uncached accesses which have a strongly ordered memory model.
377  * - MIPS: uses uncached accesses in mode 2 on /dev/mem which has also a strongly ordered memory model.
378  * - ARM: uses a strongly ordered memory model for device memories.
379  *
380  * See also https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/tree/Documentation/memory-barriers.txt
381  */
382 // cf. http://lxr.free-electrons.com/source/arch/powerpc/include/asm/barrier.h
383 #if defined(__powerpc) || defined(__powerpc__) || defined(__powerpc64__) || defined(__POWERPC__) || \
384       defined(__ppc__) || defined(__ppc64__) || defined(_M_PPC) || defined(_ARCH_PPC) || \
385       defined(_ARCH_PPC64) || defined(__ppc)
386 	__asm__("eieio" : : : "memory");
387 #elif (__sparc__) || defined (__sparc)
388 #if defined(__sparc_v9__) || defined(__sparcv9)
389 	/* Sparc V9 CPUs support three different memory orderings that range from x86-like TSO to PowerPC-like
390 	 * RMO. The modes can be switched at runtime thus to make sure we maintain the right order of access we
391 	 * use the strongest hardware memory barriers that exist on Sparc V9. */
392 	__asm__ volatile ("membar #Sync" ::: "memory");
393 #elif defined(__sparc_v8__) || defined(__sparcv8)
394 	/* On SPARC V8 there is no RMO just PSO and that does not apply to I/O accesses... but if V8 code is run
395 	 * on V9 CPUs it might apply... or not... we issue a write barrier anyway. That's the most suitable
396 	 * operation in the V8 instruction set anyway. If you know better then please tell us. */
397 	__asm__ volatile ("stbar");
398 #else
399 	#error Unknown and/or unsupported SPARC instruction set version detected.
400 #endif
401 #endif
402 }
403 
mmio_writeb(uint8_t val,void * addr)404 void mmio_writeb(uint8_t val, void *addr)
405 {
406 	*(volatile uint8_t *) addr = val;
407 	sync_primitive();
408 }
409 
mmio_writew(uint16_t val,void * addr)410 void mmio_writew(uint16_t val, void *addr)
411 {
412 	*(volatile uint16_t *) addr = val;
413 	sync_primitive();
414 }
415 
mmio_writel(uint32_t val,void * addr)416 void mmio_writel(uint32_t val, void *addr)
417 {
418 	*(volatile uint32_t *) addr = val;
419 	sync_primitive();
420 }
421 
mmio_readb(const void * addr)422 uint8_t mmio_readb(const void *addr)
423 {
424 	return *(volatile const uint8_t *) addr;
425 }
426 
mmio_readw(const void * addr)427 uint16_t mmio_readw(const void *addr)
428 {
429 	return *(volatile const uint16_t *) addr;
430 }
431 
mmio_readl(const void * addr)432 uint32_t mmio_readl(const void *addr)
433 {
434 	return *(volatile const uint32_t *) addr;
435 }
436 
mmio_readn(const void * addr,uint8_t * buf,size_t len)437 void mmio_readn(const void *addr, uint8_t *buf, size_t len)
438 {
439 	memcpy(buf, addr, len);
440 	return;
441 }
442 
mmio_le_writeb(uint8_t val,void * addr)443 void mmio_le_writeb(uint8_t val, void *addr)
444 {
445 	mmio_writeb(cpu_to_le8(val), addr);
446 }
447 
mmio_le_writew(uint16_t val,void * addr)448 void mmio_le_writew(uint16_t val, void *addr)
449 {
450 	mmio_writew(cpu_to_le16(val), addr);
451 }
452 
mmio_le_writel(uint32_t val,void * addr)453 void mmio_le_writel(uint32_t val, void *addr)
454 {
455 	mmio_writel(cpu_to_le32(val), addr);
456 }
457 
mmio_le_readb(const void * addr)458 uint8_t mmio_le_readb(const void *addr)
459 {
460 	return le_to_cpu8(mmio_readb(addr));
461 }
462 
mmio_le_readw(const void * addr)463 uint16_t mmio_le_readw(const void *addr)
464 {
465 	return le_to_cpu16(mmio_readw(addr));
466 }
467 
mmio_le_readl(const void * addr)468 uint32_t mmio_le_readl(const void *addr)
469 {
470 	return le_to_cpu32(mmio_readl(addr));
471 }
472 
473 enum mmio_write_type {
474 	mmio_write_type_b,
475 	mmio_write_type_w,
476 	mmio_write_type_l,
477 };
478 
479 struct undo_mmio_write_data {
480 	void *addr;
481 	int reg;
482 	enum mmio_write_type type;
483 	union {
484 		uint8_t bdata;
485 		uint16_t wdata;
486 		uint32_t ldata;
487 	};
488 };
489 
undo_mmio_write(void * p)490 static int undo_mmio_write(void *p)
491 {
492 	struct undo_mmio_write_data *data = p;
493 	msg_pdbg("Restoring MMIO space at %p\n", data->addr);
494 	switch (data->type) {
495 	case mmio_write_type_b:
496 		mmio_writeb(data->bdata, data->addr);
497 		break;
498 	case mmio_write_type_w:
499 		mmio_writew(data->wdata, data->addr);
500 		break;
501 	case mmio_write_type_l:
502 		mmio_writel(data->ldata, data->addr);
503 		break;
504 	}
505 	/* p was allocated in register_undo_mmio_write. */
506 	free(p);
507 	return 0;
508 }
509 
510 #define register_undo_mmio_write(a, c)					\
511 {									\
512 	struct undo_mmio_write_data *undo_mmio_write_data;		\
513 	undo_mmio_write_data = malloc(sizeof(*undo_mmio_write_data));	\
514 	if (!undo_mmio_write_data) {					\
515 		msg_gerr("Out of memory!\n");				\
516 		exit(1);						\
517 	}								\
518 	undo_mmio_write_data->addr = a;					\
519 	undo_mmio_write_data->type = mmio_write_type_##c;		\
520 	undo_mmio_write_data->c##data = mmio_read##c(a);		\
521 	register_shutdown(undo_mmio_write, undo_mmio_write_data);	\
522 }
523 
524 #define register_undo_mmio_writeb(a) register_undo_mmio_write(a, b)
525 #define register_undo_mmio_writew(a) register_undo_mmio_write(a, w)
526 #define register_undo_mmio_writel(a) register_undo_mmio_write(a, l)
527 
rmmio_writeb(uint8_t val,void * addr)528 void rmmio_writeb(uint8_t val, void *addr)
529 {
530 	register_undo_mmio_writeb(addr);
531 	mmio_writeb(val, addr);
532 }
533 
rmmio_writew(uint16_t val,void * addr)534 void rmmio_writew(uint16_t val, void *addr)
535 {
536 	register_undo_mmio_writew(addr);
537 	mmio_writew(val, addr);
538 }
539 
rmmio_writel(uint32_t val,void * addr)540 void rmmio_writel(uint32_t val, void *addr)
541 {
542 	register_undo_mmio_writel(addr);
543 	mmio_writel(val, addr);
544 }
545 
rmmio_le_writeb(uint8_t val,void * addr)546 void rmmio_le_writeb(uint8_t val, void *addr)
547 {
548 	register_undo_mmio_writeb(addr);
549 	mmio_le_writeb(val, addr);
550 }
551 
rmmio_le_writew(uint16_t val,void * addr)552 void rmmio_le_writew(uint16_t val, void *addr)
553 {
554 	register_undo_mmio_writew(addr);
555 	mmio_le_writew(val, addr);
556 }
557 
rmmio_le_writel(uint32_t val,void * addr)558 void rmmio_le_writel(uint32_t val, void *addr)
559 {
560 	register_undo_mmio_writel(addr);
561 	mmio_le_writel(val, addr);
562 }
563 
rmmio_valb(void * addr)564 void rmmio_valb(void *addr)
565 {
566 	register_undo_mmio_writeb(addr);
567 }
568 
rmmio_valw(void * addr)569 void rmmio_valw(void *addr)
570 {
571 	register_undo_mmio_writew(addr);
572 }
573 
rmmio_vall(void * addr)574 void rmmio_vall(void *addr)
575 {
576 	register_undo_mmio_writel(addr);
577 }
578