1 /* SPDX-License-Identifier: GPL-2.0-only */
2
3 #include <arch/encoding.h>
4 #include <stdint.h>
5 #include <arch/pmp.h>
6 #include <console/console.h>
7 #include <commonlib/helpers.h>
8
9 #define GRANULE (1 << PMP_SHIFT)
10
11 /*
12 * This structure is used to temporarily record PMP
13 * configuration information.
14 */
15 struct pmpcfg {
16 /* used to record the value of pmpcfg[i] */
17 uintptr_t cfg;
18 /*
19 * When generating a TOR type configuration,
20 * the previous entry needs to record the starting address.
21 * used to record the value of pmpaddr[i - 1]
22 */
23 uintptr_t previous_address;
24 /* used to record the value of pmpaddr[i] */
25 uintptr_t address;
26 };
27
28 /* This variable is used to record which entries have been used. */
29 static uintptr_t pmp_entry_used_mask;
30
31 /* The architectural spec says that up to 16 PMP entries are
32 * available.
33 * "Up to 16 PMP entries are supported. If any PMP entries are
34 * implemented, then all PMP CSRs must be implemented,
35 * but all PMP CSR fields are WARL and may be hardwired to zero."
36 */
pmp_entries_num(void)37 int pmp_entries_num(void)
38 {
39 return 16;
40 }
41
42 /* helper function used to read pmpcfg[idx] */
read_pmpcfg(int idx)43 static uintptr_t read_pmpcfg(int idx)
44 {
45 #if __riscv_xlen == 32
46 int shift = 8 * (idx & 3);
47 switch (idx >> 2) {
48 case 0:
49 return (read_csr(pmpcfg0) >> shift) & 0xff;
50 case 1:
51 return (read_csr(pmpcfg1) >> shift) & 0xff;
52 case 2:
53 return (read_csr(pmpcfg2) >> shift) & 0xff;
54 case 3:
55 return (read_csr(pmpcfg3) >> shift) & 0xff;
56 }
57 #elif __riscv_xlen == 64
58 int shift = 8 * (idx & 7);
59 switch (idx >> 3) {
60 case 0:
61 return (read_csr(pmpcfg0) >> shift) & 0xff;
62 case 1:
63 return (read_csr(pmpcfg2) >> shift) & 0xff;
64 }
65 #endif
66 return -1;
67 }
68
69 /* helper function used to write pmpcfg[idx] */
write_pmpcfg(int idx,uintptr_t cfg)70 static void write_pmpcfg(int idx, uintptr_t cfg)
71 {
72 uintptr_t old;
73 uintptr_t new;
74 #if __riscv_xlen == 32
75 int shift = 8 * (idx & 3);
76 switch (idx >> 2) {
77 case 0:
78 old = read_csr(pmpcfg0);
79 new = (old & ~((uintptr_t)0xff << shift))
80 | ((cfg & 0xff) << shift);
81 write_csr(pmpcfg0, new);
82 break;
83 case 1:
84 old = read_csr(pmpcfg1);
85 new = (old & ~((uintptr_t)0xff << shift))
86 | ((cfg & 0xff) << shift);
87 write_csr(pmpcfg1, new);
88 break;
89 case 2:
90 old = read_csr(pmpcfg2);
91 new = (old & ~((uintptr_t)0xff << shift))
92 | ((cfg & 0xff) << shift);
93 write_csr(pmpcfg2, new);
94 break;
95 case 3:
96 old = read_csr(pmpcfg3);
97 new = (old & ~((uintptr_t)0xff << shift))
98 | ((cfg & 0xff) << shift);
99 write_csr(pmpcfg3, new);
100 break;
101 }
102 #elif __riscv_xlen == 64
103 int shift = 8 * (idx & 7);
104 switch (idx >> 3) {
105 case 0:
106 old = read_csr(pmpcfg0);
107 new = (old & ~((uintptr_t)0xff << shift))
108 | ((cfg & 0xff) << shift);
109 write_csr(pmpcfg0, new);
110 printk(BIOS_INFO, "%s(%d, %lx) = %lx\n", __func__, idx, cfg, read_csr(pmpcfg0));
111 break;
112 case 1:
113 old = read_csr(pmpcfg2);
114 new = (old & ~((uintptr_t)0xff << shift))
115 | ((cfg & 0xff) << shift);
116 write_csr(pmpcfg2, new);
117 printk(BIOS_INFO, "%s(%d, %lx) = %lx\n", __func__, idx, cfg, read_csr(pmpcfg2));
118 break;
119 }
120 #endif
121 if (read_pmpcfg(idx) != cfg) {
122 printk(BIOS_WARNING, "%s: PMPcfg%d: Wrote %lx, read %lx\n", __func__, idx, cfg, read_pmpcfg(idx));
123 die("PMPcfg write failed");
124 }
125 }
126
127 /* helper function used to read pmpaddr[idx] */
read_pmpaddr(int idx)128 static uintptr_t read_pmpaddr(int idx)
129 {
130 switch (idx) {
131 case 0:
132 return read_csr(pmpaddr0);
133 case 1:
134 return read_csr(pmpaddr1);
135 case 2:
136 return read_csr(pmpaddr2);
137 case 3:
138 return read_csr(pmpaddr3);
139 case 4:
140 return read_csr(pmpaddr4);
141 case 5:
142 return read_csr(pmpaddr5);
143 case 6:
144 return read_csr(pmpaddr6);
145 case 7:
146 return read_csr(pmpaddr7);
147 case 8:
148 return read_csr(pmpaddr8);
149 case 9:
150 return read_csr(pmpaddr9);
151 case 10:
152 return read_csr(pmpaddr10);
153 case 11:
154 return read_csr(pmpaddr11);
155 case 12:
156 return read_csr(pmpaddr12);
157 case 13:
158 return read_csr(pmpaddr13);
159 case 14:
160 return read_csr(pmpaddr14);
161 case 15:
162 return read_csr(pmpaddr15);
163 }
164 return -1;
165 }
166
167 /* helper function used to write pmpaddr[idx] */
write_pmpaddr(int idx,uintptr_t val)168 static void write_pmpaddr(int idx, uintptr_t val)
169 {
170 switch (idx) {
171 case 0:
172 write_csr(pmpaddr0, val);
173 break;
174 case 1:
175 write_csr(pmpaddr1, val);
176 break;
177 case 2:
178 write_csr(pmpaddr2, val);
179 break;
180 case 3:
181 write_csr(pmpaddr3, val);
182 break;
183 case 4:
184 write_csr(pmpaddr4, val);
185 break;
186 case 5:
187 write_csr(pmpaddr5, val);
188 break;
189 case 6:
190 write_csr(pmpaddr6, val);
191 break;
192 case 7:
193 write_csr(pmpaddr7, val);
194 break;
195 case 8:
196 write_csr(pmpaddr8, val);
197 break;
198 case 9:
199 write_csr(pmpaddr9, val);
200 break;
201 case 10:
202 write_csr(pmpaddr10, val);
203 break;
204 case 11:
205 write_csr(pmpaddr11, val);
206 break;
207 case 12:
208 write_csr(pmpaddr12, val);
209 break;
210 case 13:
211 write_csr(pmpaddr13, val);
212 break;
213 case 14:
214 write_csr(pmpaddr14, val);
215 break;
216 case 15:
217 write_csr(pmpaddr15, val);
218 break;
219 }
220
221 printk(BIOS_INFO, "%s(%d, %lx) = %lx\n", __func__, idx, val, read_pmpaddr(idx));
222 /* The PMP is not required to return what we wrote. On some SoC, many bits are cleared. */
223 if (read_pmpaddr(idx) != val) {
224 printk(BIOS_WARNING, "%s: PMPaddr%d: Wrote %lx, read %lx\n", __func__,
225 idx, val, read_pmpaddr(idx));
226 }
227 }
228
229 /* Generate a PMP configuration for all memory */
generate_pmp_all(struct pmpcfg * p)230 static void generate_pmp_all(struct pmpcfg *p)
231 {
232 p->cfg = PMP_NAPOT | PMP_R | PMP_W | PMP_X;
233 p->previous_address = 0;
234 p->address = (uintptr_t) -1;
235 }
236
237 /* Generate a PMP configuration of type NA4/NAPOT */
generate_pmp_napot(struct pmpcfg * p,uintptr_t base,uintptr_t size,u8 flags)238 static void generate_pmp_napot(struct pmpcfg *p, uintptr_t base, uintptr_t size, u8 flags)
239 {
240 flags = flags & (PMP_R | PMP_W | PMP_X | PMP_L);
241 p->cfg = flags | (size > GRANULE ? PMP_NAPOT : PMP_NA4);
242 p->previous_address = 0;
243 p->address = (base + (size / 2 - 1));
244 }
245
246 /* Generate a PMP configuration of type TOR */
generate_pmp_range(struct pmpcfg * p,uintptr_t base,uintptr_t size,u8 flags)247 static void generate_pmp_range(struct pmpcfg *p, uintptr_t base, uintptr_t size, u8 flags)
248 {
249 flags = flags & (PMP_R | PMP_W | PMP_X | PMP_L);
250 p->cfg = flags | PMP_TOR;
251 p->previous_address = base;
252 p->address = (base + size);
253 }
254
255 /*
256 * Generate a PMP configuration.
257 * reminder: base and size are 34 bit numbers on RV32.
258 */
generate_pmp(struct pmpcfg * p,u64 base,u64 size,u8 flags)259 static int generate_pmp(struct pmpcfg *p, u64 base, u64 size, u8 flags)
260 {
261 /* Convert the byte address and byte size to units of 32-bit words */
262 uintptr_t b = (uintptr_t) base >> PMP_SHIFT, s = (uintptr_t) size >> PMP_SHIFT;
263 #if __riscv_xlen == 32
264 /* verify that base + size fits in 34 bits */
265 if ((base + size - 1) >> 34) {
266 printk(BIOS_EMERG, "%s: base (%llx) + size (%llx) - 1 is more than 34 bits\n",
267 __func__, base, size);
268 return 1;
269 }
270 #endif
271 /* if base is -1, that means "match all" */
272 if (base == (u64)-1) {
273 generate_pmp_all(p);
274 } else if (IS_POWER_OF_2(size) && (size >= 4) && ((base & (size - 1)) == 0)) {
275 generate_pmp_napot(p, b, s, flags);
276 } else {
277 generate_pmp_range(p, b, s, flags);
278 }
279 return 0;
280 }
281
282 /*
283 * find empty PMP entry by type
284 * TOR type configuration requires two consecutive PMP entries,
285 * others requires one.
286 */
find_empty_pmp_entry(int is_range)287 static int find_empty_pmp_entry(int is_range)
288 {
289 int free_entries = 0;
290 for (int i = 0; i < pmp_entries_num(); i++) {
291 if (pmp_entry_used_mask & (1 << i))
292 free_entries = 0;
293 else
294 free_entries++;
295 if (is_range && (free_entries == 2))
296 return i;
297 if (!is_range && (free_entries == 1))
298 return i;
299 }
300 die("Too many PMP configurations, no free entries can be used!");
301 return -1;
302 }
303
304 /*
305 * mark PMP entry has be used
306 * this function need be used with find_entry_pmp_entry
307 *
308 * n = find_empty_pmp_entry(is_range)
309 * ... // PMP set operate
310 * mask_pmp_entry_used(n);
311 */
mask_pmp_entry_used(int idx)312 static void mask_pmp_entry_used(int idx)
313 {
314 pmp_entry_used_mask |= 1 << idx;
315 }
316
317 /* reset PMP setting */
reset_pmp(void)318 void reset_pmp(void)
319 {
320 for (int i = 0; i < pmp_entries_num(); i++) {
321 if (read_pmpcfg(i) & PMP_L)
322 die("Some PMP configurations are locked and cannot be reset!");
323 write_pmpcfg(i, 0);
324 write_pmpaddr(i, 0);
325 }
326 }
327
328 /*
329 * set up PMP record
330 * Why are these u64 and not uintptr_t?
331 * because, per the spec:
332 * The Sv32 page-based virtual-memory scheme described in Section 4.3
333 * supports 34-bit physical addresses for RV32, so the PMP scheme must
334 * support addresses wider than XLEN for RV32.
335 * Yes, in RV32, these are 34-bit numbers.
336 * Rather than require every future user of these to remember that,
337 * this ABI is 64 bits.
338 * generate_pmp will check for out of range values.
339 */
setup_pmp(u64 base,u64 size,u8 flags)340 void setup_pmp(u64 base, u64 size, u8 flags)
341 {
342 struct pmpcfg p;
343 int is_range, n;
344
345 if (generate_pmp(&p, base, size, flags))
346 return;
347
348 is_range = ((p.cfg & PMP_A) == PMP_TOR);
349
350 n = find_empty_pmp_entry(is_range);
351
352 /*
353 * NOTE! you MUST write the cfg register first, or on (e.g.)
354 * the SiFive FU740, it will not take all the bits.
355 * This is different than QEMU. NASTY!
356 */
357 write_pmpcfg(n, p.cfg);
358
359 write_pmpaddr(n, p.address);
360 if (is_range)
361 write_pmpaddr(n - 1, p.previous_address);
362
363 mask_pmp_entry_used(n);
364 if (is_range)
365 mask_pmp_entry_used(n - 1);
366 }
367
368 /*
369 * close_pmp will "close" the pmp.
370 * This consists of adding the "match every address" entry.
371 * This should be the last pmp function that is called.
372 * Because we can not be certain that there is not some reason for it
373 * NOT to be last, we do not check -- perhaps, later, a check would
374 * make sense, but, for now, we do not check.
375 * If previous code has used up all pmp entries, print a warning
376 * and continue.
377 * The huge constant for the memory size may seem a bit odd here.
378 * Recall that PMP is to protect a *limited* number of M mode
379 * memory ranges from S and U modes. Therefore, the last range
380 * entry should cover all possible addresses, up to
381 * an architectural limit. It is entirely acceptable
382 * for it to cover memory that does not exist -- PMP
383 * protects M mode, nothing more.
384 * Think of this range as the final catch-all else
385 * in an if-then-else.
386 */
close_pmp(void)387 void close_pmp(void)
388 {
389 setup_pmp((u64)-1, 0, PMP_R|PMP_W|PMP_X);
390 }
391