1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2021 Heiko Stuebner <[email protected]>
4  */
5 
6 #include <linux/bug.h>
7 #include <linux/kernel.h>
8 #include <linux/memory.h>
9 #include <linux/module.h>
10 #include <linux/string.h>
11 #include <linux/uaccess.h>
12 #include <asm/alternative.h>
13 #include <asm/bugs.h>
14 #include <asm/cacheflush.h>
15 #include <asm/cpufeature.h>
16 #include <asm/dma-noncoherent.h>
17 #include <asm/errata_list.h>
18 #include <asm/hwprobe.h>
19 #include <asm/io.h>
20 #include <asm/text-patching.h>
21 #include <asm/vendorid_list.h>
22 #include <asm/vendor_extensions.h>
23 
24 #define CSR_TH_SXSTATUS		0x5c0
25 #define SXSTATUS_MAEE		_AC(0x200000, UL)
26 
errata_probe_mae(unsigned int stage,unsigned long arch_id,unsigned long impid)27 static bool errata_probe_mae(unsigned int stage,
28 			     unsigned long arch_id, unsigned long impid)
29 {
30 	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_MAE))
31 		return false;
32 
33 	if (arch_id != 0 || impid != 0)
34 		return false;
35 
36 	if (stage != RISCV_ALTERNATIVES_EARLY_BOOT &&
37 	    stage != RISCV_ALTERNATIVES_MODULE)
38 		return false;
39 
40 	if (!(csr_read(CSR_TH_SXSTATUS) & SXSTATUS_MAEE))
41 		return false;
42 
43 	return true;
44 }
45 
46 /*
47  * th.dcache.ipa rs1 (invalidate, physical address)
48  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
49  *   0000001    01010      rs1       000      00000  0001011
50  * th.dcache.iva rs1 (invalidate, virtual address)
51  *   0000001    00110      rs1       000      00000  0001011
52  *
53  * th.dcache.cpa rs1 (clean, physical address)
54  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
55  *   0000001    01001      rs1       000      00000  0001011
56  * th.dcache.cva rs1 (clean, virtual address)
57  *   0000001    00101      rs1       000      00000  0001011
58  *
59  * th.dcache.cipa rs1 (clean then invalidate, physical address)
60  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
61  *   0000001    01011      rs1       000      00000  0001011
62  * th.dcache.civa rs1 (clean then invalidate, virtual address)
63  *   0000001    00111      rs1       000      00000  0001011
64  *
65  * th.sync.s (make sure all cache operations finished)
66  * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
67  *   0000000    11001     00000      000      00000  0001011
68  */
69 #define THEAD_INVAL_A0	".long 0x02a5000b"
70 #define THEAD_CLEAN_A0	".long 0x0295000b"
71 #define THEAD_FLUSH_A0	".long 0x02b5000b"
72 #define THEAD_SYNC_S	".long 0x0190000b"
73 
74 #define THEAD_CMO_OP(_op, _start, _size, _cachesize)			\
75 asm volatile("mv a0, %1\n\t"						\
76 	     "j 2f\n\t"							\
77 	     "3:\n\t"							\
78 	     THEAD_##_op##_A0 "\n\t"					\
79 	     "add a0, a0, %0\n\t"					\
80 	     "2:\n\t"							\
81 	     "bltu a0, %2, 3b\n\t"					\
82 	     THEAD_SYNC_S						\
83 	     : : "r"(_cachesize),					\
84 		 "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)),	\
85 		 "r"((unsigned long)(_start) + (_size))			\
86 	     : "a0")
87 
thead_errata_cache_inv(phys_addr_t paddr,size_t size)88 static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
89 {
90 	THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
91 }
92 
thead_errata_cache_wback(phys_addr_t paddr,size_t size)93 static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
94 {
95 	THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
96 }
97 
thead_errata_cache_wback_inv(phys_addr_t paddr,size_t size)98 static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
99 {
100 	THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
101 }
102 
103 static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
104 	.wback = &thead_errata_cache_wback,
105 	.inv = &thead_errata_cache_inv,
106 	.wback_inv = &thead_errata_cache_wback_inv,
107 };
108 
errata_probe_cmo(unsigned int stage,unsigned long arch_id,unsigned long impid)109 static bool errata_probe_cmo(unsigned int stage,
110 			     unsigned long arch_id, unsigned long impid)
111 {
112 	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_CMO))
113 		return false;
114 
115 	if (arch_id != 0 || impid != 0)
116 		return false;
117 
118 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
119 		return false;
120 
121 	if (stage == RISCV_ALTERNATIVES_BOOT) {
122 		riscv_cbom_block_size = L1_CACHE_BYTES;
123 		riscv_noncoherent_supported();
124 		riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
125 	}
126 
127 	return true;
128 }
129 
errata_probe_pmu(unsigned int stage,unsigned long arch_id,unsigned long impid)130 static bool errata_probe_pmu(unsigned int stage,
131 			     unsigned long arch_id, unsigned long impid)
132 {
133 	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_PMU))
134 		return false;
135 
136 	/* target-c9xx cores report arch_id and impid as 0 */
137 	if (arch_id != 0 || impid != 0)
138 		return false;
139 
140 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
141 		return false;
142 
143 	return true;
144 }
145 
errata_probe_ghostwrite(unsigned int stage,unsigned long arch_id,unsigned long impid)146 static bool errata_probe_ghostwrite(unsigned int stage,
147 				    unsigned long arch_id, unsigned long impid)
148 {
149 	if (!IS_ENABLED(CONFIG_ERRATA_THEAD_GHOSTWRITE))
150 		return false;
151 
152 	/*
153 	 * target-c9xx cores report arch_id and impid as 0
154 	 *
155 	 * While ghostwrite may not affect all c9xx cores that implement
156 	 * xtheadvector, there is no futher granularity than c9xx. Assume
157 	 * vulnerable for this entire class of processors when xtheadvector is
158 	 * enabled.
159 	 */
160 	if (arch_id != 0 || impid != 0)
161 		return false;
162 
163 	if (stage != RISCV_ALTERNATIVES_EARLY_BOOT)
164 		return false;
165 
166 	ghostwrite_set_vulnerable();
167 
168 	return true;
169 }
170 
thead_errata_probe(unsigned int stage,unsigned long archid,unsigned long impid)171 static u32 thead_errata_probe(unsigned int stage,
172 			      unsigned long archid, unsigned long impid)
173 {
174 	u32 cpu_req_errata = 0;
175 
176 	if (errata_probe_mae(stage, archid, impid))
177 		cpu_req_errata |= BIT(ERRATA_THEAD_MAE);
178 
179 	errata_probe_cmo(stage, archid, impid);
180 
181 	if (errata_probe_pmu(stage, archid, impid))
182 		cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
183 
184 	errata_probe_ghostwrite(stage, archid, impid);
185 
186 	return cpu_req_errata;
187 }
188 
thead_errata_patch_func(struct alt_entry * begin,struct alt_entry * end,unsigned long archid,unsigned long impid,unsigned int stage)189 void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
190 			     unsigned long archid, unsigned long impid,
191 			     unsigned int stage)
192 {
193 	struct alt_entry *alt;
194 	u32 cpu_req_errata = thead_errata_probe(stage, archid, impid);
195 	u32 tmp;
196 	void *oldptr, *altptr;
197 
198 	BUILD_BUG_ON(ERRATA_THEAD_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE);
199 
200 	for (alt = begin; alt < end; alt++) {
201 		if (alt->vendor_id != THEAD_VENDOR_ID)
202 			continue;
203 		if (alt->patch_id >= ERRATA_THEAD_NUMBER)
204 			continue;
205 
206 		tmp = (1U << alt->patch_id);
207 		if (cpu_req_errata & tmp) {
208 			oldptr = ALT_OLD_PTR(alt);
209 			altptr = ALT_ALT_PTR(alt);
210 
211 			/* On vm-alternatives, the mmu isn't running yet */
212 			if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) {
213 				memcpy(oldptr, altptr, alt->alt_len);
214 			} else {
215 				mutex_lock(&text_mutex);
216 				patch_text_nosync(oldptr, altptr, alt->alt_len);
217 				mutex_unlock(&text_mutex);
218 			}
219 		}
220 	}
221 
222 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
223 		local_flush_icache_all();
224 }
225