1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * s390 specific pci instructions
4 *
5 * Copyright IBM Corp. 2013
6 */
7
8 #include <linux/export.h>
9 #include <linux/errno.h>
10 #include <linux/delay.h>
11 #include <linux/jump_label.h>
12 #include <asm/asm-extable.h>
13 #include <asm/facility.h>
14 #include <asm/pci_insn.h>
15 #include <asm/pci_debug.h>
16 #include <asm/pci_io.h>
17 #include <asm/processor.h>
18 #include <asm/asm.h>
19
20 #define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
21
22 struct zpci_err_insn_data {
23 u8 insn;
24 u8 cc;
25 u8 status;
26 union {
27 struct {
28 u64 req;
29 u64 offset;
30 };
31 struct {
32 u64 addr;
33 u64 len;
34 };
35 };
36 } __packed;
37
zpci_err_insn_req(int lvl,u8 insn,u8 cc,u8 status,u64 req,u64 offset)38 static inline void zpci_err_insn_req(int lvl, u8 insn, u8 cc, u8 status,
39 u64 req, u64 offset)
40 {
41 struct zpci_err_insn_data data = {
42 .insn = insn, .cc = cc, .status = status,
43 .req = req, .offset = offset};
44
45 zpci_err_hex_level(lvl, &data, sizeof(data));
46 }
47
zpci_err_insn_addr(int lvl,u8 insn,u8 cc,u8 status,u64 addr,u64 len)48 static inline void zpci_err_insn_addr(int lvl, u8 insn, u8 cc, u8 status,
49 u64 addr, u64 len)
50 {
51 struct zpci_err_insn_data data = {
52 .insn = insn, .cc = cc, .status = status,
53 .addr = addr, .len = len};
54
55 zpci_err_hex_level(lvl, &data, sizeof(data));
56 }
57
58 /* Modify PCI Function Controls */
__mpcifc(u64 req,struct zpci_fib * fib,u8 * status)59 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
60 {
61 int cc;
62
63 asm volatile (
64 " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
65 CC_IPM(cc)
66 : CC_OUT(cc, cc), [req] "+d" (req), [fib] "+Q" (*fib)
67 :
68 : CC_CLOBBER);
69 *status = req >> 24 & 0xff;
70 return CC_TRANSFORM(cc);
71 }
72
zpci_mod_fc(u64 req,struct zpci_fib * fib,u8 * status)73 u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
74 {
75 bool retried = false;
76 u8 cc;
77
78 do {
79 cc = __mpcifc(req, fib, status);
80 if (cc == 2) {
81 msleep(ZPCI_INSN_BUSY_DELAY);
82 if (!retried) {
83 zpci_err_insn_req(1, 'M', cc, *status, req, 0);
84 retried = true;
85 }
86 }
87 } while (cc == 2);
88
89 if (cc)
90 zpci_err_insn_req(0, 'M', cc, *status, req, 0);
91 else if (retried)
92 zpci_err_insn_req(1, 'M', cc, *status, req, 0);
93
94 return cc;
95 }
96 EXPORT_SYMBOL_GPL(zpci_mod_fc);
97
98 /* Refresh PCI Translations */
__rpcit(u64 fn,u64 addr,u64 range,u8 * status)99 static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
100 {
101 union register_pair addr_range = {.even = addr, .odd = range};
102 int cc;
103
104 asm volatile (
105 " .insn rre,0xb9d30000,%[fn],%[addr_range]\n"
106 CC_IPM(cc)
107 : CC_OUT(cc, cc), [fn] "+d" (fn)
108 : [addr_range] "d" (addr_range.pair)
109 : CC_CLOBBER);
110 *status = fn >> 24 & 0xff;
111 return CC_TRANSFORM(cc);
112 }
113
zpci_refresh_trans(u64 fn,u64 addr,u64 range)114 int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
115 {
116 bool retried = false;
117 u8 cc, status;
118
119 do {
120 cc = __rpcit(fn, addr, range, &status);
121 if (cc == 2) {
122 udelay(ZPCI_INSN_BUSY_DELAY);
123 if (!retried) {
124 zpci_err_insn_addr(1, 'R', cc, status, addr, range);
125 retried = true;
126 }
127 }
128 } while (cc == 2);
129
130 if (cc)
131 zpci_err_insn_addr(0, 'R', cc, status, addr, range);
132 else if (retried)
133 zpci_err_insn_addr(1, 'R', cc, status, addr, range);
134
135 if (cc == 1 && (status == 4 || status == 16))
136 return -ENOMEM;
137
138 return (cc) ? -EIO : 0;
139 }
140
141 /* Set Interruption Controls */
zpci_set_irq_ctrl(u16 ctl,u8 isc,union zpci_sic_iib * iib)142 int zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
143 {
144 if (!test_facility(72))
145 return -EIO;
146
147 asm volatile(
148 ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
149 : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
150
151 return 0;
152 }
153 EXPORT_SYMBOL_GPL(zpci_set_irq_ctrl);
154
155 /* PCI Load */
____pcilg(u64 * data,u64 req,u64 offset,u8 * status)156 static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
157 {
158 union register_pair req_off = {.even = req, .odd = offset};
159 int cc, exception;
160 u64 __data;
161
162 exception = 1;
163 asm volatile (
164 " .insn rre,0xb9d20000,%[data],%[req_off]\n"
165 "0: lhi %[exc],0\n"
166 "1:\n"
167 CC_IPM(cc)
168 EX_TABLE(0b, 1b)
169 : CC_OUT(cc, cc), [data] "=d" (__data),
170 [req_off] "+d" (req_off.pair), [exc] "+d" (exception)
171 :
172 : CC_CLOBBER);
173 *status = req_off.even >> 24 & 0xff;
174 *data = __data;
175 return exception ? -ENXIO : CC_TRANSFORM(cc);
176 }
177
__pcilg(u64 * data,u64 req,u64 offset,u8 * status)178 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
179 {
180 u64 __data;
181 int cc;
182
183 cc = ____pcilg(&__data, req, offset, status);
184 if (!cc)
185 *data = __data;
186
187 return cc;
188 }
189
__zpci_load(u64 * data,u64 req,u64 offset)190 int __zpci_load(u64 *data, u64 req, u64 offset)
191 {
192 bool retried = false;
193 u8 status;
194 int cc;
195
196 do {
197 cc = __pcilg(data, req, offset, &status);
198 if (cc == 2) {
199 udelay(ZPCI_INSN_BUSY_DELAY);
200 if (!retried) {
201 zpci_err_insn_req(1, 'l', cc, status, req, offset);
202 retried = true;
203 }
204 }
205 } while (cc == 2);
206
207 if (cc)
208 zpci_err_insn_req(0, 'l', cc, status, req, offset);
209 else if (retried)
210 zpci_err_insn_req(1, 'l', cc, status, req, offset);
211
212 return (cc > 0) ? -EIO : cc;
213 }
214 EXPORT_SYMBOL_GPL(__zpci_load);
215
zpci_load_fh(u64 * data,const volatile void __iomem * addr,unsigned long len)216 static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
217 unsigned long len)
218 {
219 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
220 u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
221
222 return __zpci_load(data, req, ZPCI_OFFSET(addr));
223 }
224
__pcilg_mio(u64 * data,u64 ioaddr,u64 len,u8 * status)225 static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
226 {
227 union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
228 int cc, exception;
229 u64 __data;
230
231 exception = 1;
232 asm volatile (
233 " .insn rre,0xb9d60000,%[data],%[ioaddr_len]\n"
234 "0: lhi %[exc],0\n"
235 "1:\n"
236 CC_IPM(cc)
237 EX_TABLE(0b, 1b)
238 : CC_OUT(cc, cc), [data] "=d" (__data),
239 [ioaddr_len] "+d" (ioaddr_len.pair), [exc] "+d" (exception)
240 :
241 : CC_CLOBBER);
242 *status = ioaddr_len.odd >> 24 & 0xff;
243 *data = __data;
244 return exception ? -ENXIO : CC_TRANSFORM(cc);
245 }
246
zpci_load(u64 * data,const volatile void __iomem * addr,unsigned long len)247 int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
248 {
249 u8 status;
250 int cc;
251
252 if (!static_branch_unlikely(&have_mio))
253 return zpci_load_fh(data, addr, len);
254
255 cc = __pcilg_mio(data, (__force u64) addr, len, &status);
256 if (cc)
257 zpci_err_insn_addr(0, 'L', cc, status, (__force u64) addr, len);
258
259 return (cc > 0) ? -EIO : cc;
260 }
261 EXPORT_SYMBOL_GPL(zpci_load);
262
263 /* PCI Store */
__pcistg(u64 data,u64 req,u64 offset,u8 * status)264 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
265 {
266 union register_pair req_off = {.even = req, .odd = offset};
267 int cc, exception;
268
269 exception = 1;
270 asm volatile (
271 " .insn rre,0xb9d00000,%[data],%[req_off]\n"
272 "0: lhi %[exc],0\n"
273 "1:\n"
274 CC_IPM(cc)
275 EX_TABLE(0b, 1b)
276 : CC_OUT(cc, cc), [req_off] "+d" (req_off.pair), [exc] "+d" (exception)
277 : [data] "d" (data)
278 : CC_CLOBBER);
279 *status = req_off.even >> 24 & 0xff;
280 return exception ? -ENXIO : CC_TRANSFORM(cc);
281 }
282
__zpci_store(u64 data,u64 req,u64 offset)283 int __zpci_store(u64 data, u64 req, u64 offset)
284 {
285 bool retried = false;
286 u8 status;
287 int cc;
288
289 do {
290 cc = __pcistg(data, req, offset, &status);
291 if (cc == 2) {
292 udelay(ZPCI_INSN_BUSY_DELAY);
293 if (!retried) {
294 zpci_err_insn_req(1, 's', cc, status, req, offset);
295 retried = true;
296 }
297 }
298 } while (cc == 2);
299
300 if (cc)
301 zpci_err_insn_req(0, 's', cc, status, req, offset);
302 else if (retried)
303 zpci_err_insn_req(1, 's', cc, status, req, offset);
304
305 return (cc > 0) ? -EIO : cc;
306 }
307 EXPORT_SYMBOL_GPL(__zpci_store);
308
zpci_store_fh(const volatile void __iomem * addr,u64 data,unsigned long len)309 static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
310 unsigned long len)
311 {
312 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
313 u64 req = ZPCI_CREATE_REQ(READ_ONCE(entry->fh), entry->bar, len);
314
315 return __zpci_store(data, req, ZPCI_OFFSET(addr));
316 }
317
__pcistg_mio(u64 data,u64 ioaddr,u64 len,u8 * status)318 static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
319 {
320 union register_pair ioaddr_len = {.even = ioaddr, .odd = len};
321 int cc, exception;
322
323 exception = 1;
324 asm volatile (
325 " .insn rre,0xb9d40000,%[data],%[ioaddr_len]\n"
326 "0: lhi %[exc],0\n"
327 "1:\n"
328 CC_IPM(cc)
329 EX_TABLE(0b, 1b)
330 : CC_OUT(cc, cc), [ioaddr_len] "+d" (ioaddr_len.pair), [exc] "+d" (exception)
331 : [data] "d" (data)
332 : CC_CLOBBER_LIST("memory"));
333 *status = ioaddr_len.odd >> 24 & 0xff;
334 return exception ? -ENXIO : CC_TRANSFORM(cc);
335 }
336
zpci_store(const volatile void __iomem * addr,u64 data,unsigned long len)337 int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
338 {
339 u8 status;
340 int cc;
341
342 if (!static_branch_unlikely(&have_mio))
343 return zpci_store_fh(addr, data, len);
344
345 cc = __pcistg_mio(data, (__force u64) addr, len, &status);
346 if (cc)
347 zpci_err_insn_addr(0, 'S', cc, status, (__force u64) addr, len);
348
349 return (cc > 0) ? -EIO : cc;
350 }
351 EXPORT_SYMBOL_GPL(zpci_store);
352
353 /* PCI Store Block */
__pcistb(const u64 * data,u64 req,u64 offset,u8 * status)354 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
355 {
356 int cc, exception;
357
358 exception = 1;
359 asm volatile (
360 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
361 "0: lhi %[exc],0\n"
362 "1:\n"
363 CC_IPM(cc)
364 EX_TABLE(0b, 1b)
365 : CC_OUT(cc, cc), [req] "+d" (req), [exc] "+d" (exception)
366 : [offset] "d" (offset), [data] "Q" (*data)
367 : CC_CLOBBER);
368 *status = req >> 24 & 0xff;
369 return exception ? -ENXIO : CC_TRANSFORM(cc);
370 }
371
__zpci_store_block(const u64 * data,u64 req,u64 offset)372 int __zpci_store_block(const u64 *data, u64 req, u64 offset)
373 {
374 bool retried = false;
375 u8 status;
376 int cc;
377
378 do {
379 cc = __pcistb(data, req, offset, &status);
380 if (cc == 2) {
381 udelay(ZPCI_INSN_BUSY_DELAY);
382 if (!retried) {
383 zpci_err_insn_req(0, 'b', cc, status, req, offset);
384 retried = true;
385 }
386 }
387 } while (cc == 2);
388
389 if (cc)
390 zpci_err_insn_req(0, 'b', cc, status, req, offset);
391 else if (retried)
392 zpci_err_insn_req(1, 'b', cc, status, req, offset);
393
394 return (cc > 0) ? -EIO : cc;
395 }
396 EXPORT_SYMBOL_GPL(__zpci_store_block);
397
zpci_write_block_fh(volatile void __iomem * dst,const void * src,unsigned long len)398 static inline int zpci_write_block_fh(volatile void __iomem *dst,
399 const void *src, unsigned long len)
400 {
401 struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
402 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
403 u64 offset = ZPCI_OFFSET(dst);
404
405 return __zpci_store_block(src, req, offset);
406 }
407
__pcistb_mio(const u64 * data,u64 ioaddr,u64 len,u8 * status)408 static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
409 {
410 int cc, exception;
411
412 exception = 1;
413 asm volatile (
414 " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
415 "0: lhi %[exc],0\n"
416 "1:\n"
417 CC_IPM(cc)
418 EX_TABLE(0b, 1b)
419 : CC_OUT(cc, cc), [len] "+d" (len), [exc] "+d" (exception)
420 : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
421 : CC_CLOBBER);
422 *status = len >> 24 & 0xff;
423 return exception ? -ENXIO : CC_TRANSFORM(cc);
424 }
425
zpci_write_block(volatile void __iomem * dst,const void * src,unsigned long len)426 int zpci_write_block(volatile void __iomem *dst,
427 const void *src, unsigned long len)
428 {
429 u8 status;
430 int cc;
431
432 if (!static_branch_unlikely(&have_mio))
433 return zpci_write_block_fh(dst, src, len);
434
435 cc = __pcistb_mio(src, (__force u64) dst, len, &status);
436 if (cc)
437 zpci_err_insn_addr(0, 'B', cc, status, (__force u64) dst, len);
438
439 return (cc > 0) ? -EIO : cc;
440 }
441 EXPORT_SYMBOL_GPL(zpci_write_block);
442
__pciwb_mio(void)443 static inline void __pciwb_mio(void)
444 {
445 asm volatile (".insn rre,0xb9d50000,0,0\n");
446 }
447
zpci_barrier(void)448 void zpci_barrier(void)
449 {
450 if (static_branch_likely(&have_mio))
451 __pciwb_mio();
452 }
453 EXPORT_SYMBOL_GPL(zpci_barrier);
454