1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access to PCI I/O memory from user space programs.
4  *
5  * Copyright IBM Corp. 2014
6  * Author(s): Alexey Ishchuk <[email protected]>
7  */
8 #include <linux/kernel.h>
9 #include <linux/syscalls.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <asm/asm-extable.h>
15 #include <asm/pci_io.h>
16 #include <asm/pci_debug.h>
17 #include <asm/asm.h>
18 
zpci_err_mmio(u8 cc,u8 status,u64 offset)19 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
20 {
21 	struct {
22 		u64 offset;
23 		u8 cc;
24 		u8 status;
25 	} data = {offset, cc, status};
26 
27 	zpci_err_hex(&data, sizeof(data));
28 }
29 
__pcistb_mio_inuser(void __iomem * ioaddr,const void __user * src,u64 len,u8 * status)30 static inline int __pcistb_mio_inuser(
31 		void __iomem *ioaddr, const void __user *src,
32 		u64 len, u8 *status)
33 {
34 	int cc, exception;
35 
36 	exception = 1;
37 	asm volatile (
38 		"	sacf	256\n"
39 		"0:	.insn	rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
40 		"1:	lhi	%[exc],0\n"
41 		"2:	sacf	768\n"
42 		CC_IPM(cc)
43 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
44 		: CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
45 		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
46 		: CC_CLOBBER_LIST("memory"));
47 	*status = len >> 24 & 0xff;
48 	return exception ? -ENXIO : CC_TRANSFORM(cc);
49 }
50 
__pcistg_mio_inuser(void __iomem * ioaddr,const void __user * src,u64 ulen,u8 * status)51 static inline int __pcistg_mio_inuser(
52 		void __iomem *ioaddr, const void __user *src,
53 		u64 ulen, u8 *status)
54 {
55 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
56 	int cc, exception;
57 	u64 val = 0;
58 	u64 cnt = ulen;
59 	u8 tmp;
60 
61 	/*
62 	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
63 	 * a register, then store it to PCI at @ioaddr while in secondary
64 	 * address space. pcistg then uses the user mappings.
65 	 */
66 	exception = 1;
67 	asm volatile (
68 		"	sacf	256\n"
69 		"0:	llgc	%[tmp],0(%[src])\n"
70 		"4:	sllg	%[val],%[val],8\n"
71 		"	aghi	%[src],1\n"
72 		"	ogr	%[val],%[tmp]\n"
73 		"	brctg	%[cnt],0b\n"
74 		"1:	.insn	rre,0xb9d40000,%[val],%[ioaddr_len]\n"
75 		"2:	lhi	%[exc],0\n"
76 		"3:	sacf	768\n"
77 		CC_IPM(cc)
78 		EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
79 		: [src] "+a" (src), [cnt] "+d" (cnt),
80 		  [val] "+d" (val), [tmp] "=d" (tmp), [exc] "+d" (exception),
81 		  CC_OUT(cc, cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
82 		:
83 		: CC_CLOBBER_LIST("memory"));
84 	*status = ioaddr_len.odd >> 24 & 0xff;
85 
86 	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
87 	/* did we read everything from user memory? */
88 	if (!cc && cnt != 0)
89 		cc = -EFAULT;
90 
91 	return cc;
92 }
93 
__memcpy_toio_inuser(void __iomem * dst,const void __user * src,size_t n)94 static inline int __memcpy_toio_inuser(void __iomem *dst,
95 				   const void __user *src, size_t n)
96 {
97 	int size, rc = 0;
98 	u8 status = 0;
99 
100 	if (!src)
101 		return -EINVAL;
102 
103 	while (n > 0) {
104 		size = zpci_get_max_io_size((u64 __force) dst,
105 					    (u64 __force) src, n,
106 					    ZPCI_MAX_WRITE_SIZE);
107 		if (size > 8) /* main path */
108 			rc = __pcistb_mio_inuser(dst, src, size, &status);
109 		else
110 			rc = __pcistg_mio_inuser(dst, src, size, &status);
111 		if (rc)
112 			break;
113 		src += size;
114 		dst += size;
115 		n -= size;
116 	}
117 	if (rc)
118 		zpci_err_mmio(rc, status, (__force u64) dst);
119 	return rc;
120 }
121 
SYSCALL_DEFINE3(s390_pci_mmio_write,unsigned long,mmio_addr,const void __user *,user_buffer,size_t,length)122 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
123 		const void __user *, user_buffer, size_t, length)
124 {
125 	struct follow_pfnmap_args args = { };
126 	u8 local_buf[64];
127 	void __iomem *io_addr;
128 	void *buf;
129 	struct vm_area_struct *vma;
130 	long ret;
131 
132 	if (!zpci_is_enabled())
133 		return -ENODEV;
134 
135 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
136 		return -EINVAL;
137 
138 	/*
139 	 * We only support write access to MIO capable devices if we are on
140 	 * a MIO enabled system. Otherwise we would have to check for every
141 	 * address if it is a special ZPCI_ADDR and would have to do
142 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
143 	 * ISM devices are the only devices without MIO support and there is no
144 	 * known need for accessing these from userspace.
145 	 */
146 	if (static_branch_likely(&have_mio)) {
147 		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
148 					user_buffer,
149 					length);
150 		return ret;
151 	}
152 
153 	if (length > 64) {
154 		buf = kmalloc(length, GFP_KERNEL);
155 		if (!buf)
156 			return -ENOMEM;
157 	} else
158 		buf = local_buf;
159 
160 	ret = -EFAULT;
161 	if (copy_from_user(buf, user_buffer, length))
162 		goto out_free;
163 
164 	mmap_read_lock(current->mm);
165 	ret = -EINVAL;
166 	vma = vma_lookup(current->mm, mmio_addr);
167 	if (!vma)
168 		goto out_unlock_mmap;
169 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
170 		goto out_unlock_mmap;
171 	ret = -EACCES;
172 	if (!(vma->vm_flags & VM_WRITE))
173 		goto out_unlock_mmap;
174 
175 	args.address = mmio_addr;
176 	args.vma = vma;
177 	ret = follow_pfnmap_start(&args);
178 	if (ret) {
179 		fixup_user_fault(current->mm, mmio_addr, FAULT_FLAG_WRITE, NULL);
180 		ret = follow_pfnmap_start(&args);
181 		if (ret)
182 			goto out_unlock_mmap;
183 	}
184 
185 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
186 			(mmio_addr & ~PAGE_MASK));
187 
188 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
189 		goto out_unlock_pt;
190 
191 	ret = zpci_memcpy_toio(io_addr, buf, length);
192 out_unlock_pt:
193 	follow_pfnmap_end(&args);
194 out_unlock_mmap:
195 	mmap_read_unlock(current->mm);
196 out_free:
197 	if (buf != local_buf)
198 		kfree(buf);
199 	return ret;
200 }
201 
__pcilg_mio_inuser(void __user * dst,const void __iomem * ioaddr,u64 ulen,u8 * status)202 static inline int __pcilg_mio_inuser(
203 		void __user *dst, const void __iomem *ioaddr,
204 		u64 ulen, u8 *status)
205 {
206 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
207 	u64 cnt = ulen;
208 	int shift = ulen * 8;
209 	int cc, exception;
210 	u64 val, tmp;
211 
212 	/*
213 	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
214 	 * user space) into a register using pcilg then store these bytes at
215 	 * user address @dst
216 	 */
217 	exception = 1;
218 	asm volatile (
219 		"	sacf	256\n"
220 		"0:	.insn	rre,0xb9d60000,%[val],%[ioaddr_len]\n"
221 		"1:	lhi	%[exc],0\n"
222 		"	jne	4f\n"
223 		"2:	ahi	%[shift],-8\n"
224 		"	srlg	%[tmp],%[val],0(%[shift])\n"
225 		"3:	stc	%[tmp],0(%[dst])\n"
226 		"5:	aghi	%[dst],1\n"
227 		"	brctg	%[cnt],2b\n"
228 		/*
229 		 * Use xr to clear exc and set condition code to zero
230 		 * to ensure flag output is correct for this branch.
231 		 */
232 		"	xr	%[exc],%[exc]\n"
233 		"4:	sacf	768\n"
234 		CC_IPM(cc)
235 		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
236 		: [ioaddr_len] "+&d" (ioaddr_len.pair), [exc] "+d" (exception),
237 		  CC_OUT(cc, cc), [val] "=d" (val),
238 		  [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
239 		  [shift] "+d" (shift)
240 		:
241 		: CC_CLOBBER_LIST("memory"));
242 
243 	cc = exception ? -ENXIO : CC_TRANSFORM(cc);
244 	/* did we write everything to the user space buffer? */
245 	if (!cc && cnt != 0)
246 		cc = -EFAULT;
247 
248 	*status = ioaddr_len.odd >> 24 & 0xff;
249 	return cc;
250 }
251 
__memcpy_fromio_inuser(void __user * dst,const void __iomem * src,unsigned long n)252 static inline int __memcpy_fromio_inuser(void __user *dst,
253 				     const void __iomem *src,
254 				     unsigned long n)
255 {
256 	int size, rc = 0;
257 	u8 status;
258 
259 	while (n > 0) {
260 		size = zpci_get_max_io_size((u64 __force) src,
261 					    (u64 __force) dst, n,
262 					    ZPCI_MAX_READ_SIZE);
263 		rc = __pcilg_mio_inuser(dst, src, size, &status);
264 		if (rc)
265 			break;
266 		src += size;
267 		dst += size;
268 		n -= size;
269 	}
270 	if (rc)
271 		zpci_err_mmio(rc, status, (__force u64) dst);
272 	return rc;
273 }
274 
SYSCALL_DEFINE3(s390_pci_mmio_read,unsigned long,mmio_addr,void __user *,user_buffer,size_t,length)275 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
276 		void __user *, user_buffer, size_t, length)
277 {
278 	struct follow_pfnmap_args args = { };
279 	u8 local_buf[64];
280 	void __iomem *io_addr;
281 	void *buf;
282 	struct vm_area_struct *vma;
283 	long ret;
284 
285 	if (!zpci_is_enabled())
286 		return -ENODEV;
287 
288 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
289 		return -EINVAL;
290 
291 	/*
292 	 * We only support read access to MIO capable devices if we are on
293 	 * a MIO enabled system. Otherwise we would have to check for every
294 	 * address if it is a special ZPCI_ADDR and would have to do
295 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
296 	 * ISM devices are the only devices without MIO support and there is no
297 	 * known need for accessing these from userspace.
298 	 */
299 	if (static_branch_likely(&have_mio)) {
300 		ret = __memcpy_fromio_inuser(
301 				user_buffer, (const void __iomem *)mmio_addr,
302 				length);
303 		return ret;
304 	}
305 
306 	if (length > 64) {
307 		buf = kmalloc(length, GFP_KERNEL);
308 		if (!buf)
309 			return -ENOMEM;
310 	} else {
311 		buf = local_buf;
312 	}
313 
314 	mmap_read_lock(current->mm);
315 	ret = -EINVAL;
316 	vma = vma_lookup(current->mm, mmio_addr);
317 	if (!vma)
318 		goto out_unlock_mmap;
319 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
320 		goto out_unlock_mmap;
321 	ret = -EACCES;
322 	if (!(vma->vm_flags & VM_READ))
323 		goto out_unlock_mmap;
324 
325 	args.vma = vma;
326 	args.address = mmio_addr;
327 	ret = follow_pfnmap_start(&args);
328 	if (ret) {
329 		fixup_user_fault(current->mm, mmio_addr, 0, NULL);
330 		ret = follow_pfnmap_start(&args);
331 		if (ret)
332 			goto out_unlock_mmap;
333 	}
334 
335 	io_addr = (void __iomem *)((args.pfn << PAGE_SHIFT) |
336 			(mmio_addr & ~PAGE_MASK));
337 
338 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
339 		ret = -EFAULT;
340 		goto out_unlock_pt;
341 	}
342 	ret = zpci_memcpy_fromio(buf, io_addr, length);
343 
344 out_unlock_pt:
345 	follow_pfnmap_end(&args);
346 out_unlock_mmap:
347 	mmap_read_unlock(current->mm);
348 
349 	if (!ret && copy_to_user(user_buffer, buf, length))
350 		ret = -EFAULT;
351 
352 	if (buf != local_buf)
353 		kfree(buf);
354 	return ret;
355 }
356