Lines Matching +full:page +full:- +full:based

1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Based on arch/arm/include/asm/memory.h
5 * Copyright (C) 2000-2002 Russell King
8 * Note: this file should not be included by non-asm/.h files
15 #include <asm/page-def.h>
24 * VMEMMAP_SIZE - allows the whole linear region to be covered by
25 * a struct page array
27 * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE
28 * needs to cover the memory region from the beginning of the 52-bit
29 * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to
31 * of the VMEMMAP where 52-bit support is not available in hardware.
33 #define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET)
34 #define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page))
37 * PAGE_OFFSET - the virtual address of the start of the linear map, at the
39 * PAGE_END - the end of the linear map, where all other kernel mappings begin.
40 * KIMAGE_VADDR - the virtual address of the start of the kernel image.
41 * VA_BITS - the maximum number of bits for virtual addresses.
44 #define _PAGE_OFFSET(va) (-(UL(1) << (va)))
50 #define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE)
51 #define VMEMMAP_END (-UL(SZ_1G))
54 #define FIXADDR_TOP (-UL(SZ_8M))
66 #define _PAGE_END(va) (-(UL(1) << ((va) - 1)))
72 * Generic and Software Tag-Based KASAN modes require 1/8th and 1/16th of the
76 * memory address is defined based on the formula:
85 * Based on this mapping, we define two constants:
94 * KASAN_SHADOW_START is defined second based on KASAN_SHADOW_END. The shadow
103 #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) + KASAN_SHADOW_OFFSET)
104 #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (UL(1) << ((va) - KASAN_SHADOW_SCALE_SHIFT)))
113 #define DIRECT_MAP_PHYSMEM_END __pa(PAGE_END - 1)
118 * VMAP'd stacks are allocated at page granularity, so we must ensure that such
119 * stacks are a multiple of page size.
128 #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
182 * Memory types for Stage-2 translation
189 * Memory types for Stage-2 translation when ID_AA64MMFR2_EL1.FWB is 0001
190 * Stage-2 enforces Normal-WB and Device-nGnRE
203 * Open-coded (swapper_pg_dir - reserved_pg_dir) as this cannot be calculated
209 * Open-coded (swapper_pg_dir - tramp_pg_dir) as this cannot be calculated
236 #define vabits_actual (64 - ((read_tcr() >> 16) & 63))
242 /* PHYS_OFFSET - the physical address of the start of memory. */
250 return (u64)&_text - KIMAGE_VADDR; in kaslr_offset()
272 * PFNs are used to describe any physical page; this means
275 * This is the PFN of the first RAM page in the kernel
276 * direct-mapped view. We assume this is the first page
336 #define __is_lm_address(addr) (((u64)(addr) - PAGE_OFFSET) < (PAGE_END - PAGE_OFFSET))
338 #define __lm_to_phys(addr) (((addr) - PAGE_OFFSET) + PHYS_OFFSET)
339 #define __kimg_to_phys(addr) ((addr) - kimage_voffset)
356 #define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET)
362 * DMA support - see dma-mapping.h.
377 #include <asm-generic/memory_model.h>
395 * virt_to_page(x) convert a _valid_ virtual address to struct page *
410 u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\
416 u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \
417 u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \
418 (struct page *)__addr; \