1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The hwprobe interface, for allowing userspace to probe to see which features
4  * are supported by the hardware.  See Documentation/arch/riscv/hwprobe.rst for
5  * more details.
6  */
7 #include <linux/syscalls.h>
8 #include <asm/cacheflush.h>
9 #include <asm/cpufeature.h>
10 #include <asm/hwprobe.h>
11 #include <asm/processor.h>
12 #include <asm/delay.h>
13 #include <asm/sbi.h>
14 #include <asm/switch_to.h>
15 #include <asm/uaccess.h>
16 #include <asm/unistd.h>
17 #include <asm/vector.h>
18 #include <asm/vendor_extensions/thead_hwprobe.h>
19 #include <vdso/vsyscall.h>
20 
21 
hwprobe_arch_id(struct riscv_hwprobe * pair,const struct cpumask * cpus)22 static void hwprobe_arch_id(struct riscv_hwprobe *pair,
23 			    const struct cpumask *cpus)
24 {
25 	u64 id = -1ULL;
26 	bool first = true;
27 	int cpu;
28 
29 	for_each_cpu(cpu, cpus) {
30 		u64 cpu_id;
31 
32 		switch (pair->key) {
33 		case RISCV_HWPROBE_KEY_MVENDORID:
34 			cpu_id = riscv_cached_mvendorid(cpu);
35 			break;
36 		case RISCV_HWPROBE_KEY_MIMPID:
37 			cpu_id = riscv_cached_mimpid(cpu);
38 			break;
39 		case RISCV_HWPROBE_KEY_MARCHID:
40 			cpu_id = riscv_cached_marchid(cpu);
41 			break;
42 		}
43 
44 		if (first) {
45 			id = cpu_id;
46 			first = false;
47 		}
48 
49 		/*
50 		 * If there's a mismatch for the given set, return -1 in the
51 		 * value.
52 		 */
53 		if (id != cpu_id) {
54 			id = -1ULL;
55 			break;
56 		}
57 	}
58 
59 	pair->value = id;
60 }
61 
hwprobe_isa_ext0(struct riscv_hwprobe * pair,const struct cpumask * cpus)62 static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
63 			     const struct cpumask *cpus)
64 {
65 	int cpu;
66 	u64 missing = 0;
67 
68 	pair->value = 0;
69 	if (has_fpu())
70 		pair->value |= RISCV_HWPROBE_IMA_FD;
71 
72 	if (riscv_isa_extension_available(NULL, c))
73 		pair->value |= RISCV_HWPROBE_IMA_C;
74 
75 	if (has_vector() && riscv_isa_extension_available(NULL, v))
76 		pair->value |= RISCV_HWPROBE_IMA_V;
77 
78 	/*
79 	 * Loop through and record extensions that 1) anyone has, and 2) anyone
80 	 * doesn't have.
81 	 */
82 	for_each_cpu(cpu, cpus) {
83 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
84 
85 #define EXT_KEY(ext)									\
86 	do {										\
87 		if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext))	\
88 			pair->value |= RISCV_HWPROBE_EXT_##ext;				\
89 		else									\
90 			missing |= RISCV_HWPROBE_EXT_##ext;				\
91 	} while (false)
92 
93 		/*
94 		 * Only use EXT_KEY() for extensions which can be exposed to userspace,
95 		 * regardless of the kernel's configuration, as no other checks, besides
96 		 * presence in the hart_isa bitmap, are made.
97 		 */
98 		EXT_KEY(ZACAS);
99 		EXT_KEY(ZAWRS);
100 		EXT_KEY(ZBA);
101 		EXT_KEY(ZBB);
102 		EXT_KEY(ZBC);
103 		EXT_KEY(ZBKB);
104 		EXT_KEY(ZBKC);
105 		EXT_KEY(ZBKX);
106 		EXT_KEY(ZBS);
107 		EXT_KEY(ZCA);
108 		EXT_KEY(ZCB);
109 		EXT_KEY(ZCMOP);
110 		EXT_KEY(ZICBOZ);
111 		EXT_KEY(ZICOND);
112 		EXT_KEY(ZIHINTNTL);
113 		EXT_KEY(ZIHINTPAUSE);
114 		EXT_KEY(ZIMOP);
115 		EXT_KEY(ZKND);
116 		EXT_KEY(ZKNE);
117 		EXT_KEY(ZKNH);
118 		EXT_KEY(ZKSED);
119 		EXT_KEY(ZKSH);
120 		EXT_KEY(ZKT);
121 		EXT_KEY(ZTSO);
122 
123 		/*
124 		 * All the following extensions must depend on the kernel
125 		 * support of V.
126 		 */
127 		if (has_vector()) {
128 			EXT_KEY(ZVBB);
129 			EXT_KEY(ZVBC);
130 			EXT_KEY(ZVE32F);
131 			EXT_KEY(ZVE32X);
132 			EXT_KEY(ZVE64D);
133 			EXT_KEY(ZVE64F);
134 			EXT_KEY(ZVE64X);
135 			EXT_KEY(ZVFH);
136 			EXT_KEY(ZVFHMIN);
137 			EXT_KEY(ZVKB);
138 			EXT_KEY(ZVKG);
139 			EXT_KEY(ZVKNED);
140 			EXT_KEY(ZVKNHA);
141 			EXT_KEY(ZVKNHB);
142 			EXT_KEY(ZVKSED);
143 			EXT_KEY(ZVKSH);
144 			EXT_KEY(ZVKT);
145 		}
146 
147 		if (has_fpu()) {
148 			EXT_KEY(ZCD);
149 			EXT_KEY(ZCF);
150 			EXT_KEY(ZFA);
151 			EXT_KEY(ZFH);
152 			EXT_KEY(ZFHMIN);
153 		}
154 
155 		if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM))
156 			EXT_KEY(SUPM);
157 #undef EXT_KEY
158 	}
159 
160 	/* Now turn off reporting features if any CPU is missing it. */
161 	pair->value &= ~missing;
162 }
163 
hwprobe_ext0_has(const struct cpumask * cpus,unsigned long ext)164 static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
165 {
166 	struct riscv_hwprobe pair;
167 
168 	hwprobe_isa_ext0(&pair, cpus);
169 	return (pair.value & ext);
170 }
171 
172 #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
hwprobe_misaligned(const struct cpumask * cpus)173 static u64 hwprobe_misaligned(const struct cpumask *cpus)
174 {
175 	int cpu;
176 	u64 perf = -1ULL;
177 
178 	for_each_cpu(cpu, cpus) {
179 		int this_perf = per_cpu(misaligned_access_speed, cpu);
180 
181 		if (perf == -1ULL)
182 			perf = this_perf;
183 
184 		if (perf != this_perf) {
185 			perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
186 			break;
187 		}
188 	}
189 
190 	if (perf == -1ULL)
191 		return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
192 
193 	return perf;
194 }
195 #else
hwprobe_misaligned(const struct cpumask * cpus)196 static u64 hwprobe_misaligned(const struct cpumask *cpus)
197 {
198 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
199 		return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
200 
201 	if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
202 		return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
203 
204 	return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
205 }
206 #endif
207 
208 #ifdef CONFIG_RISCV_VECTOR_MISALIGNED
hwprobe_vec_misaligned(const struct cpumask * cpus)209 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
210 {
211 	int cpu;
212 	u64 perf = -1ULL;
213 
214 	/* Return if supported or not even if speed wasn't probed */
215 	for_each_cpu(cpu, cpus) {
216 		int this_perf = per_cpu(vector_misaligned_access, cpu);
217 
218 		if (perf == -1ULL)
219 			perf = this_perf;
220 
221 		if (perf != this_perf) {
222 			perf = RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
223 			break;
224 		}
225 	}
226 
227 	if (perf == -1ULL)
228 		return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
229 
230 	return perf;
231 }
232 #else
hwprobe_vec_misaligned(const struct cpumask * cpus)233 static u64 hwprobe_vec_misaligned(const struct cpumask *cpus)
234 {
235 	if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_VECTOR_UNALIGNED_ACCESS))
236 		return RISCV_HWPROBE_MISALIGNED_VECTOR_FAST;
237 
238 	if (IS_ENABLED(CONFIG_RISCV_SLOW_VECTOR_UNALIGNED_ACCESS))
239 		return RISCV_HWPROBE_MISALIGNED_VECTOR_SLOW;
240 
241 	return RISCV_HWPROBE_MISALIGNED_VECTOR_UNKNOWN;
242 }
243 #endif
244 
hwprobe_one_pair(struct riscv_hwprobe * pair,const struct cpumask * cpus)245 static void hwprobe_one_pair(struct riscv_hwprobe *pair,
246 			     const struct cpumask *cpus)
247 {
248 	switch (pair->key) {
249 	case RISCV_HWPROBE_KEY_MVENDORID:
250 	case RISCV_HWPROBE_KEY_MARCHID:
251 	case RISCV_HWPROBE_KEY_MIMPID:
252 		hwprobe_arch_id(pair, cpus);
253 		break;
254 	/*
255 	 * The kernel already assumes that the base single-letter ISA
256 	 * extensions are supported on all harts, and only supports the
257 	 * IMA base, so just cheat a bit here and tell that to
258 	 * userspace.
259 	 */
260 	case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
261 		pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
262 		break;
263 
264 	case RISCV_HWPROBE_KEY_IMA_EXT_0:
265 		hwprobe_isa_ext0(pair, cpus);
266 		break;
267 
268 	case RISCV_HWPROBE_KEY_CPUPERF_0:
269 	case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
270 		pair->value = hwprobe_misaligned(cpus);
271 		break;
272 
273 	case RISCV_HWPROBE_KEY_MISALIGNED_VECTOR_PERF:
274 		pair->value = hwprobe_vec_misaligned(cpus);
275 		break;
276 
277 	case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
278 		pair->value = 0;
279 		if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
280 			pair->value = riscv_cboz_block_size;
281 		break;
282 	case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
283 		pair->value = user_max_virt_addr();
284 		break;
285 
286 	case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
287 		pair->value = riscv_timebase;
288 		break;
289 
290 	case RISCV_HWPROBE_KEY_VENDOR_EXT_THEAD_0:
291 		hwprobe_isa_vendor_ext_thead_0(pair, cpus);
292 		break;
293 
294 	/*
295 	 * For forward compatibility, unknown keys don't fail the whole
296 	 * call, but get their element key set to -1 and value set to 0
297 	 * indicating they're unrecognized.
298 	 */
299 	default:
300 		pair->key = -1;
301 		pair->value = 0;
302 		break;
303 	}
304 }
305 
hwprobe_get_values(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)306 static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
307 			      size_t pair_count, size_t cpusetsize,
308 			      unsigned long __user *cpus_user,
309 			      unsigned int flags)
310 {
311 	size_t out;
312 	int ret;
313 	cpumask_t cpus;
314 
315 	/* Check the reserved flags. */
316 	if (flags != 0)
317 		return -EINVAL;
318 
319 	/*
320 	 * The interface supports taking in a CPU mask, and returns values that
321 	 * are consistent across that mask. Allow userspace to specify NULL and
322 	 * 0 as a shortcut to all online CPUs.
323 	 */
324 	cpumask_clear(&cpus);
325 	if (!cpusetsize && !cpus_user) {
326 		cpumask_copy(&cpus, cpu_online_mask);
327 	} else {
328 		if (cpusetsize > cpumask_size())
329 			cpusetsize = cpumask_size();
330 
331 		ret = copy_from_user(&cpus, cpus_user, cpusetsize);
332 		if (ret)
333 			return -EFAULT;
334 
335 		/*
336 		 * Userspace must provide at least one online CPU, without that
337 		 * there's no way to define what is supported.
338 		 */
339 		cpumask_and(&cpus, &cpus, cpu_online_mask);
340 		if (cpumask_empty(&cpus))
341 			return -EINVAL;
342 	}
343 
344 	for (out = 0; out < pair_count; out++, pairs++) {
345 		struct riscv_hwprobe pair;
346 
347 		if (get_user(pair.key, &pairs->key))
348 			return -EFAULT;
349 
350 		pair.value = 0;
351 		hwprobe_one_pair(&pair, &cpus);
352 		ret = put_user(pair.key, &pairs->key);
353 		if (ret == 0)
354 			ret = put_user(pair.value, &pairs->value);
355 
356 		if (ret)
357 			return -EFAULT;
358 	}
359 
360 	return 0;
361 }
362 
hwprobe_get_cpus(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)363 static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
364 			    size_t pair_count, size_t cpusetsize,
365 			    unsigned long __user *cpus_user,
366 			    unsigned int flags)
367 {
368 	cpumask_t cpus, one_cpu;
369 	bool clear_all = false;
370 	size_t i;
371 	int ret;
372 
373 	if (flags != RISCV_HWPROBE_WHICH_CPUS)
374 		return -EINVAL;
375 
376 	if (!cpusetsize || !cpus_user)
377 		return -EINVAL;
378 
379 	if (cpusetsize > cpumask_size())
380 		cpusetsize = cpumask_size();
381 
382 	ret = copy_from_user(&cpus, cpus_user, cpusetsize);
383 	if (ret)
384 		return -EFAULT;
385 
386 	if (cpumask_empty(&cpus))
387 		cpumask_copy(&cpus, cpu_online_mask);
388 
389 	cpumask_and(&cpus, &cpus, cpu_online_mask);
390 
391 	cpumask_clear(&one_cpu);
392 
393 	for (i = 0; i < pair_count; i++) {
394 		struct riscv_hwprobe pair, tmp;
395 		int cpu;
396 
397 		ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
398 		if (ret)
399 			return -EFAULT;
400 
401 		if (!riscv_hwprobe_key_is_valid(pair.key)) {
402 			clear_all = true;
403 			pair = (struct riscv_hwprobe){ .key = -1, };
404 			ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
405 			if (ret)
406 				return -EFAULT;
407 		}
408 
409 		if (clear_all)
410 			continue;
411 
412 		tmp = (struct riscv_hwprobe){ .key = pair.key, };
413 
414 		for_each_cpu(cpu, &cpus) {
415 			cpumask_set_cpu(cpu, &one_cpu);
416 
417 			hwprobe_one_pair(&tmp, &one_cpu);
418 
419 			if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
420 				cpumask_clear_cpu(cpu, &cpus);
421 
422 			cpumask_clear_cpu(cpu, &one_cpu);
423 		}
424 	}
425 
426 	if (clear_all)
427 		cpumask_clear(&cpus);
428 
429 	ret = copy_to_user(cpus_user, &cpus, cpusetsize);
430 	if (ret)
431 		return -EFAULT;
432 
433 	return 0;
434 }
435 
do_riscv_hwprobe(struct riscv_hwprobe __user * pairs,size_t pair_count,size_t cpusetsize,unsigned long __user * cpus_user,unsigned int flags)436 static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
437 			    size_t pair_count, size_t cpusetsize,
438 			    unsigned long __user *cpus_user,
439 			    unsigned int flags)
440 {
441 	if (flags & RISCV_HWPROBE_WHICH_CPUS)
442 		return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
443 					cpus_user, flags);
444 
445 	return hwprobe_get_values(pairs, pair_count, cpusetsize,
446 				  cpus_user, flags);
447 }
448 
449 #ifdef CONFIG_MMU
450 
init_hwprobe_vdso_data(void)451 static int __init init_hwprobe_vdso_data(void)
452 {
453 	struct vdso_data *vd = __arch_get_k_vdso_data();
454 	struct arch_vdso_time_data *avd = &vd->arch_data;
455 	u64 id_bitsmash = 0;
456 	struct riscv_hwprobe pair;
457 	int key;
458 
459 	/*
460 	 * Initialize vDSO data with the answers for the "all CPUs" case, to
461 	 * save a syscall in the common case.
462 	 */
463 	for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
464 		pair.key = key;
465 		hwprobe_one_pair(&pair, cpu_online_mask);
466 
467 		WARN_ON_ONCE(pair.key < 0);
468 
469 		avd->all_cpu_hwprobe_values[key] = pair.value;
470 		/*
471 		 * Smash together the vendor, arch, and impl IDs to see if
472 		 * they're all 0 or any negative.
473 		 */
474 		if (key <= RISCV_HWPROBE_KEY_MIMPID)
475 			id_bitsmash |= pair.value;
476 	}
477 
478 	/*
479 	 * If the arch, vendor, and implementation ID are all the same across
480 	 * all harts, then assume all CPUs are the same, and allow the vDSO to
481 	 * answer queries for arbitrary masks. However if all values are 0 (not
482 	 * populated) or any value returns -1 (varies across CPUs), then the
483 	 * vDSO should defer to the kernel for exotic cpu masks.
484 	 */
485 	avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
486 	return 0;
487 }
488 
489 arch_initcall_sync(init_hwprobe_vdso_data);
490 
491 #endif /* CONFIG_MMU */
492 
SYSCALL_DEFINE5(riscv_hwprobe,struct riscv_hwprobe __user *,pairs,size_t,pair_count,size_t,cpusetsize,unsigned long __user *,cpus,unsigned int,flags)493 SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
494 		size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
495 		cpus, unsigned int, flags)
496 {
497 	return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
498 				cpus, flags);
499 }
500