xref: /aosp_15_r20/external/coreboot/src/arch/arm64/include/armv8/arch/mmu.h (revision b9411a12aaaa7e1e6a6fb7c5e057f44ee179a49c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #ifndef __ARCH_ARM64_MMU_H__
4 #define __ARCH_ARM64_MMU_H__
5 
6 #include <types.h>
7 
8 /* Memory attributes for mmap regions
9  * These attributes act as tag values for memrange regions
10  */
11 
12 /* Normal memory / device */
13 #define MA_MEM                     (1 << 0)
14 #define MA_DEV                     (0 << 0)
15 
16 /* Secure / non-secure */
17 #define MA_NS                      (1 << 1)
18 #define MA_S                       (0 << 1)
19 
20 /* Read only / Read-write */
21 #define MA_RO                      (1 << 2)
22 #define MA_RW                      (0 << 2)
23 
24 /* Non-cacheable memory. */
25 #define MA_MEM_NC                  (1 << 3)
26 
27 /* Descriptor attributes */
28 
29 #define INVALID_DESC               0x0
30 #define BLOCK_DESC                 0x1
31 #define TABLE_DESC                 0x3
32 #define PAGE_DESC                  0x3
33 #define DESC_MASK                  0x3
34 
35 /* Block descriptor */
36 #define BLOCK_NS                   (1 << 5)
37 
38 #define BLOCK_AP_RW                (0 << 7)
39 #define BLOCK_AP_RO                (1 << 7)
40 
41 #define BLOCK_ACCESS               (1 << 10)
42 
43 #define BLOCK_XN                   (1UL << 54)
44 
45 #define BLOCK_SH_SHIFT                 (8)
46 #define BLOCK_SH_NON_SHAREABLE         (0 << BLOCK_SH_SHIFT)
47 #define BLOCK_SH_UNPREDICTABLE         (1 << BLOCK_SH_SHIFT)
48 #define BLOCK_SH_OUTER_SHAREABLE       (2 << BLOCK_SH_SHIFT)
49 #define BLOCK_SH_INNER_SHAREABLE       (3 << BLOCK_SH_SHIFT)
50 
51 /* Sentinel descriptor to mark first PTE of an unused table. It must be a value
52  * that cannot occur naturally as part of a page table. (Bits [1:0] = 0b00 makes
53  * this an unmapped page, but some page attribute bits are still set.) */
54 #define UNUSED_DESC                0x6EbAAD0BBADbA6E0
55 
56 /* XLAT Table Init Attributes */
57 
58 #define VA_START                   0x0
59 #define BITS_PER_VA                48
60 /* Granule size of 4KB is being used */
61 #define GRANULE_SIZE_SHIFT         12
62 #define GRANULE_SIZE               (1 << GRANULE_SIZE_SHIFT)
63 #define XLAT_ADDR_MASK             ((1UL << BITS_PER_VA) - GRANULE_SIZE)
64 #define GRANULE_SIZE_MASK          ((1 << GRANULE_SIZE_SHIFT) - 1)
65 
66 #define BITS_RESOLVED_PER_LVL   (GRANULE_SIZE_SHIFT - 3)
67 #define L0_ADDR_SHIFT           (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 3)
68 #define L1_ADDR_SHIFT           (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 2)
69 #define L2_ADDR_SHIFT           (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 1)
70 #define L3_ADDR_SHIFT           (GRANULE_SIZE_SHIFT + BITS_RESOLVED_PER_LVL * 0)
71 
72 #define L0_ADDR_MASK     (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L0_ADDR_SHIFT)
73 #define L1_ADDR_MASK     (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L1_ADDR_SHIFT)
74 #define L2_ADDR_MASK     (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L2_ADDR_SHIFT)
75 #define L3_ADDR_MASK     (((1UL << BITS_RESOLVED_PER_LVL) - 1) << L3_ADDR_SHIFT)
76 
77 /* These macros give the size of the region addressed by each entry of a xlat
78    table at any given level */
79 #define L3_XLAT_SIZE               (1UL << L3_ADDR_SHIFT)
80 #define L2_XLAT_SIZE               (1UL << L2_ADDR_SHIFT)
81 #define L1_XLAT_SIZE               (1UL << L1_ADDR_SHIFT)
82 #define L0_XLAT_SIZE               (1UL << L0_ADDR_SHIFT)
83 
84 /* Block indices required for MAIR */
85 #define BLOCK_INDEX_MEM_DEV_NGNRNE 0
86 #define BLOCK_INDEX_MEM_DEV_NGNRE  1
87 #define BLOCK_INDEX_MEM_DEV_GRE    2
88 #define BLOCK_INDEX_MEM_NORMAL_NC  3
89 #define BLOCK_INDEX_MEM_NORMAL     4
90 
91 #define BLOCK_INDEX_MASK           0x7
92 #define BLOCK_INDEX_SHIFT          2
93 
94 /* MAIR attributes */
95 #define MAIR_ATTRIBUTES            ((0x00 << (BLOCK_INDEX_MEM_DEV_NGNRNE*8)) | \
96 				    (0x04 << (BLOCK_INDEX_MEM_DEV_NGNRE*8))  | \
97 				    (0x0c << (BLOCK_INDEX_MEM_DEV_GRE*8))    | \
98 				    (0x44 << (BLOCK_INDEX_MEM_NORMAL_NC*8))  | \
99 				    (0xffUL << (BLOCK_INDEX_MEM_NORMAL*8)))
100 
101 /* TCR attributes */
102 #define TCR_TOSZ                   (64 - BITS_PER_VA)
103 
104 #define TCR_IRGN0_SHIFT            8
105 #define TCR_IRGN0_NM_NC            (0x00 << TCR_IRGN0_SHIFT)
106 #define TCR_IRGN0_NM_WBWAC         (0x01 << TCR_IRGN0_SHIFT)
107 #define TCR_IRGN0_NM_WTC           (0x02 << TCR_IRGN0_SHIFT)
108 #define TCR_IRGN0_NM_WBNWAC        (0x03 << TCR_IRGN0_SHIFT)
109 
110 #define TCR_ORGN0_SHIFT            10
111 #define TCR_ORGN0_NM_NC            (0x00 << TCR_ORGN0_SHIFT)
112 #define TCR_ORGN0_NM_WBWAC         (0x01 << TCR_ORGN0_SHIFT)
113 #define TCR_ORGN0_NM_WTC           (0x02 << TCR_ORGN0_SHIFT)
114 #define TCR_ORGN0_NM_WBNWAC        (0x03 << TCR_ORGN0_SHIFT)
115 
116 #define TCR_SH0_SHIFT              12
117 #define TCR_SH0_NC                 (0x0 << TCR_SH0_SHIFT)
118 #define TCR_SH0_OS                 (0x2 << TCR_SH0_SHIFT)
119 #define TCR_SH0_IS                 (0x3 << TCR_SH0_SHIFT)
120 
121 #define TCR_TG0_SHIFT              14
122 #define TCR_TG0_4KB                (0x0 << TCR_TG0_SHIFT)
123 #define TCR_TG0_64KB               (0x1 << TCR_TG0_SHIFT)
124 #define TCR_TG0_16KB               (0x2 << TCR_TG0_SHIFT)
125 
126 #define TCR_PS_SHIFT               16
127 #define TCR_PS_4GB                 (0x0 << TCR_PS_SHIFT)
128 #define TCR_PS_64GB                (0x1 << TCR_PS_SHIFT)
129 #define TCR_PS_1TB                 (0x2 << TCR_PS_SHIFT)
130 #define TCR_PS_4TB                 (0x3 << TCR_PS_SHIFT)
131 #define TCR_PS_16TB                (0x4 << TCR_PS_SHIFT)
132 #define TCR_PS_256TB               (0x5 << TCR_PS_SHIFT)
133 
134 #define TCR_TBI_SHIFT              20
135 #define TCR_TBI_USED               (0x0 << TCR_TBI_SHIFT)
136 #define TCR_TBI_IGNORED            (0x1 << TCR_TBI_SHIFT)
137 
138 struct mmu_context {
139 	uint64_t mair;         /* MAIR attributes */
140 	uint64_t tcr;          /* TCR Attributes */
141 };
142 
143 /* Initialize MMU registers and page table memory region. */
144 void mmu_init(void);
145 /* Desc : Save mmu context (registers and ttbr base/size). */
146 void mmu_save_context(struct mmu_context *mmu_context);
147 /* Desc : Restore mmu context using input backed-up context */
148 void mmu_restore_context(const struct mmu_context *mmu_context);
149 /* Change a memory type for a range of bytes at runtime. */
150 void mmu_config_range(void *start, size_t size, uint64_t tag);
151 /* Enable the MMU (need previous mmu_init() and configured ranges!). */
152 void mmu_enable(void);
153 /* Disable the MMU (which also disables dcache but not icache). */
154 void mmu_disable(void);
155 
156 #endif /* __ARCH_ARM64_MMU_H__ */
157