1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <[email protected]>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/io.h>
13 #include <linux/module.h>
14 #include <linux/slab.h>
15 #include <linux/pci_ids.h>
16 #include <linux/random.h>
17 
18 #include <linux/pci-epc.h>
19 #include <linux/pci-epf.h>
20 #include <linux/pci_regs.h>
21 
22 #define IRQ_TYPE_INTX			0
23 #define IRQ_TYPE_MSI			1
24 #define IRQ_TYPE_MSIX			2
25 
26 #define COMMAND_RAISE_INTX_IRQ		BIT(0)
27 #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28 #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29 #define COMMAND_READ			BIT(3)
30 #define COMMAND_WRITE			BIT(4)
31 #define COMMAND_COPY			BIT(5)
32 
33 #define STATUS_READ_SUCCESS		BIT(0)
34 #define STATUS_READ_FAIL		BIT(1)
35 #define STATUS_WRITE_SUCCESS		BIT(2)
36 #define STATUS_WRITE_FAIL		BIT(3)
37 #define STATUS_COPY_SUCCESS		BIT(4)
38 #define STATUS_COPY_FAIL		BIT(5)
39 #define STATUS_IRQ_RAISED		BIT(6)
40 #define STATUS_SRC_ADDR_INVALID		BIT(7)
41 #define STATUS_DST_ADDR_INVALID		BIT(8)
42 
43 #define FLAG_USE_DMA			BIT(0)
44 
45 #define TIMER_RESOLUTION		1
46 
47 #define CAP_UNALIGNED_ACCESS		BIT(0)
48 
49 static struct workqueue_struct *kpcitest_workqueue;
50 
51 struct pci_epf_test {
52 	void			*reg[PCI_STD_NUM_BARS];
53 	struct pci_epf		*epf;
54 	enum pci_barno		test_reg_bar;
55 	size_t			msix_table_offset;
56 	struct delayed_work	cmd_handler;
57 	struct dma_chan		*dma_chan_tx;
58 	struct dma_chan		*dma_chan_rx;
59 	struct dma_chan		*transfer_chan;
60 	dma_cookie_t		transfer_cookie;
61 	enum dma_status		transfer_status;
62 	struct completion	transfer_complete;
63 	bool			dma_supported;
64 	bool			dma_private;
65 	const struct pci_epc_features *epc_features;
66 };
67 
68 struct pci_epf_test_reg {
69 	__le32 magic;
70 	__le32 command;
71 	__le32 status;
72 	__le64 src_addr;
73 	__le64 dst_addr;
74 	__le32 size;
75 	__le32 checksum;
76 	__le32 irq_type;
77 	__le32 irq_number;
78 	__le32 flags;
79 	__le32 caps;
80 } __packed;
81 
82 static struct pci_epf_header test_header = {
83 	.vendorid	= PCI_ANY_ID,
84 	.deviceid	= PCI_ANY_ID,
85 	.baseclass_code = PCI_CLASS_OTHERS,
86 	.interrupt_pin	= PCI_INTERRUPT_INTA,
87 };
88 
89 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
90 
pci_epf_test_dma_callback(void * param)91 static void pci_epf_test_dma_callback(void *param)
92 {
93 	struct pci_epf_test *epf_test = param;
94 	struct dma_tx_state state;
95 
96 	epf_test->transfer_status =
97 		dmaengine_tx_status(epf_test->transfer_chan,
98 				    epf_test->transfer_cookie, &state);
99 	if (epf_test->transfer_status == DMA_COMPLETE ||
100 	    epf_test->transfer_status == DMA_ERROR)
101 		complete(&epf_test->transfer_complete);
102 }
103 
104 /**
105  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
106  *				  data between PCIe EP and remote PCIe RC
107  * @epf_test: the EPF test device that performs the data transfer operation
108  * @dma_dst: The destination address of the data transfer. It can be a physical
109  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
110  * @dma_src: The source address of the data transfer. It can be a physical
111  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
112  * @len: The size of the data transfer
113  * @dma_remote: remote RC physical address
114  * @dir: DMA transfer direction
115  *
116  * Function that uses dmaengine API to transfer data between PCIe EP and remote
117  * PCIe RC. The source and destination address can be a physical address given
118  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
119  *
120  * The function returns '0' on success and negative value on failure.
121  */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,dma_addr_t dma_remote,enum dma_transfer_direction dir)122 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
123 				      dma_addr_t dma_dst, dma_addr_t dma_src,
124 				      size_t len, dma_addr_t dma_remote,
125 				      enum dma_transfer_direction dir)
126 {
127 	struct dma_chan *chan = (dir == DMA_MEM_TO_DEV) ?
128 				 epf_test->dma_chan_tx : epf_test->dma_chan_rx;
129 	dma_addr_t dma_local = (dir == DMA_MEM_TO_DEV) ? dma_src : dma_dst;
130 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
131 	struct pci_epf *epf = epf_test->epf;
132 	struct dma_async_tx_descriptor *tx;
133 	struct dma_slave_config sconf = {};
134 	struct device *dev = &epf->dev;
135 	int ret;
136 
137 	if (IS_ERR_OR_NULL(chan)) {
138 		dev_err(dev, "Invalid DMA memcpy channel\n");
139 		return -EINVAL;
140 	}
141 
142 	if (epf_test->dma_private) {
143 		sconf.direction = dir;
144 		if (dir == DMA_MEM_TO_DEV)
145 			sconf.dst_addr = dma_remote;
146 		else
147 			sconf.src_addr = dma_remote;
148 
149 		if (dmaengine_slave_config(chan, &sconf)) {
150 			dev_err(dev, "DMA slave config fail\n");
151 			return -EIO;
152 		}
153 		tx = dmaengine_prep_slave_single(chan, dma_local, len, dir,
154 						 flags);
155 	} else {
156 		tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len,
157 					       flags);
158 	}
159 
160 	if (!tx) {
161 		dev_err(dev, "Failed to prepare DMA memcpy\n");
162 		return -EIO;
163 	}
164 
165 	reinit_completion(&epf_test->transfer_complete);
166 	epf_test->transfer_chan = chan;
167 	tx->callback = pci_epf_test_dma_callback;
168 	tx->callback_param = epf_test;
169 	epf_test->transfer_cookie = dmaengine_submit(tx);
170 
171 	ret = dma_submit_error(epf_test->transfer_cookie);
172 	if (ret) {
173 		dev_err(dev, "Failed to do DMA tx_submit %d\n", ret);
174 		goto terminate;
175 	}
176 
177 	dma_async_issue_pending(chan);
178 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
179 	if (ret < 0) {
180 		dev_err(dev, "DMA wait_for_completion interrupted\n");
181 		goto terminate;
182 	}
183 
184 	if (epf_test->transfer_status == DMA_ERROR) {
185 		dev_err(dev, "DMA transfer failed\n");
186 		ret = -EIO;
187 	}
188 
189 terminate:
190 	dmaengine_terminate_sync(chan);
191 
192 	return ret;
193 }
194 
195 struct epf_dma_filter {
196 	struct device *dev;
197 	u32 dma_mask;
198 };
199 
epf_dma_filter_fn(struct dma_chan * chan,void * node)200 static bool epf_dma_filter_fn(struct dma_chan *chan, void *node)
201 {
202 	struct epf_dma_filter *filter = node;
203 	struct dma_slave_caps caps;
204 
205 	memset(&caps, 0, sizeof(caps));
206 	dma_get_slave_caps(chan, &caps);
207 
208 	return chan->device->dev == filter->dev
209 		&& (filter->dma_mask & caps.directions);
210 }
211 
212 /**
213  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
214  * @epf_test: the EPF test device that performs data transfer operation
215  *
216  * Function to initialize EPF test DMA channel.
217  */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)218 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
219 {
220 	struct pci_epf *epf = epf_test->epf;
221 	struct device *dev = &epf->dev;
222 	struct epf_dma_filter filter;
223 	struct dma_chan *dma_chan;
224 	dma_cap_mask_t mask;
225 	int ret;
226 
227 	filter.dev = epf->epc->dev.parent;
228 	filter.dma_mask = BIT(DMA_DEV_TO_MEM);
229 
230 	dma_cap_zero(mask);
231 	dma_cap_set(DMA_SLAVE, mask);
232 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
233 	if (!dma_chan) {
234 		dev_info(dev, "Failed to get private DMA rx channel. Falling back to generic one\n");
235 		goto fail_back_tx;
236 	}
237 
238 	epf_test->dma_chan_rx = dma_chan;
239 
240 	filter.dma_mask = BIT(DMA_MEM_TO_DEV);
241 	dma_chan = dma_request_channel(mask, epf_dma_filter_fn, &filter);
242 
243 	if (!dma_chan) {
244 		dev_info(dev, "Failed to get private DMA tx channel. Falling back to generic one\n");
245 		goto fail_back_rx;
246 	}
247 
248 	epf_test->dma_chan_tx = dma_chan;
249 	epf_test->dma_private = true;
250 
251 	init_completion(&epf_test->transfer_complete);
252 
253 	return 0;
254 
255 fail_back_rx:
256 	dma_release_channel(epf_test->dma_chan_rx);
257 	epf_test->dma_chan_rx = NULL;
258 
259 fail_back_tx:
260 	dma_cap_zero(mask);
261 	dma_cap_set(DMA_MEMCPY, mask);
262 
263 	dma_chan = dma_request_chan_by_mask(&mask);
264 	if (IS_ERR(dma_chan)) {
265 		ret = PTR_ERR(dma_chan);
266 		if (ret != -EPROBE_DEFER)
267 			dev_err(dev, "Failed to get DMA channel\n");
268 		return ret;
269 	}
270 	init_completion(&epf_test->transfer_complete);
271 
272 	epf_test->dma_chan_tx = epf_test->dma_chan_rx = dma_chan;
273 
274 	return 0;
275 }
276 
277 /**
278  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
279  * @epf_test: the EPF test device that performs data transfer operation
280  *
281  * Helper to cleanup EPF test DMA channel.
282  */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)283 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
284 {
285 	if (!epf_test->dma_supported)
286 		return;
287 
288 	dma_release_channel(epf_test->dma_chan_tx);
289 	if (epf_test->dma_chan_tx == epf_test->dma_chan_rx) {
290 		epf_test->dma_chan_tx = NULL;
291 		epf_test->dma_chan_rx = NULL;
292 		return;
293 	}
294 
295 	dma_release_channel(epf_test->dma_chan_rx);
296 	epf_test->dma_chan_rx = NULL;
297 }
298 
pci_epf_test_print_rate(struct pci_epf_test * epf_test,const char * op,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)299 static void pci_epf_test_print_rate(struct pci_epf_test *epf_test,
300 				    const char *op, u64 size,
301 				    struct timespec64 *start,
302 				    struct timespec64 *end, bool dma)
303 {
304 	struct timespec64 ts = timespec64_sub(*end, *start);
305 	u64 rate = 0, ns;
306 
307 	/* calculate the rate */
308 	ns = timespec64_to_ns(&ts);
309 	if (ns)
310 		rate = div64_u64(size * NSEC_PER_SEC, ns * 1000);
311 
312 	dev_info(&epf_test->epf->dev,
313 		 "%s => Size: %llu B, DMA: %s, Time: %llu.%09u s, Rate: %llu KB/s\n",
314 		 op, size, dma ? "YES" : "NO",
315 		 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate);
316 }
317 
pci_epf_test_copy(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)318 static void pci_epf_test_copy(struct pci_epf_test *epf_test,
319 			      struct pci_epf_test_reg *reg)
320 {
321 	int ret = 0;
322 	struct timespec64 start, end;
323 	struct pci_epf *epf = epf_test->epf;
324 	struct pci_epc *epc = epf->epc;
325 	struct device *dev = &epf->dev;
326 	struct pci_epc_map src_map, dst_map;
327 	u64 src_addr = le64_to_cpu(reg->src_addr);
328 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
329 	size_t orig_size, copy_size;
330 	ssize_t map_size = 0;
331 	u32 flags = le32_to_cpu(reg->flags);
332 	u32 status = 0;
333 	void *copy_buf = NULL, *buf;
334 
335 	orig_size = copy_size = le32_to_cpu(reg->size);
336 
337 	if (flags & FLAG_USE_DMA) {
338 		if (!dma_has_cap(DMA_MEMCPY, epf_test->dma_chan_tx->device->cap_mask)) {
339 			dev_err(dev, "DMA controller doesn't support MEMCPY\n");
340 			ret = -EINVAL;
341 			goto set_status;
342 		}
343 	} else {
344 		copy_buf = kzalloc(copy_size, GFP_KERNEL);
345 		if (!copy_buf) {
346 			ret = -ENOMEM;
347 			goto set_status;
348 		}
349 		buf = copy_buf;
350 	}
351 
352 	while (copy_size) {
353 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
354 				      src_addr, copy_size, &src_map);
355 		if (ret) {
356 			dev_err(dev, "Failed to map source address\n");
357 			status = STATUS_SRC_ADDR_INVALID;
358 			goto free_buf;
359 		}
360 
361 		ret = pci_epc_mem_map(epf->epc, epf->func_no, epf->vfunc_no,
362 					   dst_addr, copy_size, &dst_map);
363 		if (ret) {
364 			dev_err(dev, "Failed to map destination address\n");
365 			status = STATUS_DST_ADDR_INVALID;
366 			pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no,
367 					  &src_map);
368 			goto free_buf;
369 		}
370 
371 		map_size = min_t(size_t, dst_map.pci_size, src_map.pci_size);
372 
373 		ktime_get_ts64(&start);
374 		if (flags & FLAG_USE_DMA) {
375 			ret = pci_epf_test_data_transfer(epf_test,
376 					dst_map.phys_addr, src_map.phys_addr,
377 					map_size, 0, DMA_MEM_TO_MEM);
378 			if (ret) {
379 				dev_err(dev, "Data transfer failed\n");
380 				goto unmap;
381 			}
382 		} else {
383 			memcpy_fromio(buf, src_map.virt_addr, map_size);
384 			memcpy_toio(dst_map.virt_addr, buf, map_size);
385 			buf += map_size;
386 		}
387 		ktime_get_ts64(&end);
388 
389 		copy_size -= map_size;
390 		src_addr += map_size;
391 		dst_addr += map_size;
392 
393 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
394 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
395 		map_size = 0;
396 	}
397 
398 	pci_epf_test_print_rate(epf_test, "COPY", orig_size, &start, &end,
399 				flags & FLAG_USE_DMA);
400 
401 unmap:
402 	if (map_size) {
403 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &dst_map);
404 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &src_map);
405 	}
406 
407 free_buf:
408 	kfree(copy_buf);
409 
410 set_status:
411 	if (!ret)
412 		status |= STATUS_COPY_SUCCESS;
413 	else
414 		status |= STATUS_COPY_FAIL;
415 	reg->status = cpu_to_le32(status);
416 }
417 
pci_epf_test_read(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)418 static void pci_epf_test_read(struct pci_epf_test *epf_test,
419 			      struct pci_epf_test_reg *reg)
420 {
421 	int ret = 0;
422 	void *src_buf, *buf;
423 	u32 crc32;
424 	struct pci_epc_map map;
425 	phys_addr_t dst_phys_addr;
426 	struct timespec64 start, end;
427 	struct pci_epf *epf = epf_test->epf;
428 	struct pci_epc *epc = epf->epc;
429 	struct device *dev = &epf->dev;
430 	struct device *dma_dev = epf->epc->dev.parent;
431 	u64 src_addr = le64_to_cpu(reg->src_addr);
432 	size_t orig_size, src_size;
433 	ssize_t map_size = 0;
434 	u32 flags = le32_to_cpu(reg->flags);
435 	u32 checksum = le32_to_cpu(reg->checksum);
436 	u32 status = 0;
437 
438 	orig_size = src_size = le32_to_cpu(reg->size);
439 
440 	src_buf = kzalloc(src_size, GFP_KERNEL);
441 	if (!src_buf) {
442 		ret = -ENOMEM;
443 		goto set_status;
444 	}
445 	buf = src_buf;
446 
447 	while (src_size) {
448 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
449 					   src_addr, src_size, &map);
450 		if (ret) {
451 			dev_err(dev, "Failed to map address\n");
452 			status = STATUS_SRC_ADDR_INVALID;
453 			goto free_buf;
454 		}
455 
456 		map_size = map.pci_size;
457 		if (flags & FLAG_USE_DMA) {
458 			dst_phys_addr = dma_map_single(dma_dev, buf, map_size,
459 						       DMA_FROM_DEVICE);
460 			if (dma_mapping_error(dma_dev, dst_phys_addr)) {
461 				dev_err(dev,
462 					"Failed to map destination buffer addr\n");
463 				ret = -ENOMEM;
464 				goto unmap;
465 			}
466 
467 			ktime_get_ts64(&start);
468 			ret = pci_epf_test_data_transfer(epf_test,
469 					dst_phys_addr, map.phys_addr,
470 					map_size, src_addr, DMA_DEV_TO_MEM);
471 			if (ret)
472 				dev_err(dev, "Data transfer failed\n");
473 			ktime_get_ts64(&end);
474 
475 			dma_unmap_single(dma_dev, dst_phys_addr, map_size,
476 					 DMA_FROM_DEVICE);
477 
478 			if (ret)
479 				goto unmap;
480 		} else {
481 			ktime_get_ts64(&start);
482 			memcpy_fromio(buf, map.virt_addr, map_size);
483 			ktime_get_ts64(&end);
484 		}
485 
486 		src_size -= map_size;
487 		src_addr += map_size;
488 		buf += map_size;
489 
490 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
491 		map_size = 0;
492 	}
493 
494 	pci_epf_test_print_rate(epf_test, "READ", orig_size, &start, &end,
495 				flags & FLAG_USE_DMA);
496 
497 	crc32 = crc32_le(~0, src_buf, orig_size);
498 	if (crc32 != checksum)
499 		ret = -EIO;
500 
501 unmap:
502 	if (map_size)
503 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
504 
505 free_buf:
506 	kfree(src_buf);
507 
508 set_status:
509 	if (!ret)
510 		status |= STATUS_READ_SUCCESS;
511 	else
512 		status |= STATUS_READ_FAIL;
513 	reg->status = cpu_to_le32(status);
514 }
515 
pci_epf_test_write(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)516 static void pci_epf_test_write(struct pci_epf_test *epf_test,
517 			       struct pci_epf_test_reg *reg)
518 {
519 	int ret = 0;
520 	void *dst_buf, *buf;
521 	struct pci_epc_map map;
522 	phys_addr_t src_phys_addr;
523 	struct timespec64 start, end;
524 	struct pci_epf *epf = epf_test->epf;
525 	struct pci_epc *epc = epf->epc;
526 	struct device *dev = &epf->dev;
527 	struct device *dma_dev = epf->epc->dev.parent;
528 	u64 dst_addr = le64_to_cpu(reg->dst_addr);
529 	size_t orig_size, dst_size;
530 	ssize_t map_size = 0;
531 	u32 flags = le32_to_cpu(reg->flags);
532 	u32 status = 0;
533 
534 	orig_size = dst_size = le32_to_cpu(reg->size);
535 
536 	dst_buf = kzalloc(dst_size, GFP_KERNEL);
537 	if (!dst_buf) {
538 		ret = -ENOMEM;
539 		goto set_status;
540 	}
541 	get_random_bytes(dst_buf, dst_size);
542 	reg->checksum = cpu_to_le32(crc32_le(~0, dst_buf, dst_size));
543 	buf = dst_buf;
544 
545 	while (dst_size) {
546 		ret = pci_epc_mem_map(epc, epf->func_no, epf->vfunc_no,
547 					   dst_addr, dst_size, &map);
548 		if (ret) {
549 			dev_err(dev, "Failed to map address\n");
550 			status = STATUS_DST_ADDR_INVALID;
551 			goto free_buf;
552 		}
553 
554 		map_size = map.pci_size;
555 		if (flags & FLAG_USE_DMA) {
556 			src_phys_addr = dma_map_single(dma_dev, buf, map_size,
557 						       DMA_TO_DEVICE);
558 			if (dma_mapping_error(dma_dev, src_phys_addr)) {
559 				dev_err(dev,
560 					"Failed to map source buffer addr\n");
561 				ret = -ENOMEM;
562 				goto unmap;
563 			}
564 
565 			ktime_get_ts64(&start);
566 
567 			ret = pci_epf_test_data_transfer(epf_test,
568 						map.phys_addr, src_phys_addr,
569 						map_size, dst_addr,
570 						DMA_MEM_TO_DEV);
571 			if (ret)
572 				dev_err(dev, "Data transfer failed\n");
573 			ktime_get_ts64(&end);
574 
575 			dma_unmap_single(dma_dev, src_phys_addr, map_size,
576 					 DMA_TO_DEVICE);
577 
578 			if (ret)
579 				goto unmap;
580 		} else {
581 			ktime_get_ts64(&start);
582 			memcpy_toio(map.virt_addr, buf, map_size);
583 			ktime_get_ts64(&end);
584 		}
585 
586 		dst_size -= map_size;
587 		dst_addr += map_size;
588 		buf += map_size;
589 
590 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
591 		map_size = 0;
592 	}
593 
594 	pci_epf_test_print_rate(epf_test, "WRITE", orig_size, &start, &end,
595 				flags & FLAG_USE_DMA);
596 
597 	/*
598 	 * wait 1ms inorder for the write to complete. Without this delay L3
599 	 * error in observed in the host system.
600 	 */
601 	usleep_range(1000, 2000);
602 
603 unmap:
604 	if (map_size)
605 		pci_epc_mem_unmap(epc, epf->func_no, epf->vfunc_no, &map);
606 
607 free_buf:
608 	kfree(dst_buf);
609 
610 set_status:
611 	if (!ret)
612 		status |= STATUS_WRITE_SUCCESS;
613 	else
614 		status |= STATUS_WRITE_FAIL;
615 	reg->status = cpu_to_le32(status);
616 }
617 
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,struct pci_epf_test_reg * reg)618 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test,
619 				   struct pci_epf_test_reg *reg)
620 {
621 	struct pci_epf *epf = epf_test->epf;
622 	struct device *dev = &epf->dev;
623 	struct pci_epc *epc = epf->epc;
624 	u32 status = le32_to_cpu(reg->status);
625 	u32 irq_number = le32_to_cpu(reg->irq_number);
626 	u32 irq_type = le32_to_cpu(reg->irq_type);
627 	int count;
628 
629 	/*
630 	 * Set the status before raising the IRQ to ensure that the host sees
631 	 * the updated value when it gets the IRQ.
632 	 */
633 	status |= STATUS_IRQ_RAISED;
634 	WRITE_ONCE(reg->status, cpu_to_le32(status));
635 
636 	switch (irq_type) {
637 	case IRQ_TYPE_INTX:
638 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
639 				  PCI_IRQ_INTX, 0);
640 		break;
641 	case IRQ_TYPE_MSI:
642 		count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no);
643 		if (irq_number > count || count <= 0) {
644 			dev_err(dev, "Invalid MSI IRQ number %d / %d\n",
645 				irq_number, count);
646 			return;
647 		}
648 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
649 				  PCI_IRQ_MSI, irq_number);
650 		break;
651 	case IRQ_TYPE_MSIX:
652 		count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no);
653 		if (irq_number > count || count <= 0) {
654 			dev_err(dev, "Invalid MSIX IRQ number %d / %d\n",
655 				irq_number, count);
656 			return;
657 		}
658 		pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no,
659 				  PCI_IRQ_MSIX, irq_number);
660 		break;
661 	default:
662 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
663 		break;
664 	}
665 }
666 
pci_epf_test_cmd_handler(struct work_struct * work)667 static void pci_epf_test_cmd_handler(struct work_struct *work)
668 {
669 	u32 command;
670 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
671 						     cmd_handler.work);
672 	struct pci_epf *epf = epf_test->epf;
673 	struct device *dev = &epf->dev;
674 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
675 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
676 	u32 irq_type = le32_to_cpu(reg->irq_type);
677 
678 	command = le32_to_cpu(READ_ONCE(reg->command));
679 	if (!command)
680 		goto reset_handler;
681 
682 	WRITE_ONCE(reg->command, 0);
683 	WRITE_ONCE(reg->status, 0);
684 
685 	if ((le32_to_cpu(READ_ONCE(reg->flags)) & FLAG_USE_DMA) &&
686 	    !epf_test->dma_supported) {
687 		dev_err(dev, "Cannot transfer data using DMA\n");
688 		goto reset_handler;
689 	}
690 
691 	if (irq_type > IRQ_TYPE_MSIX) {
692 		dev_err(dev, "Failed to detect IRQ type\n");
693 		goto reset_handler;
694 	}
695 
696 	switch (command) {
697 	case COMMAND_RAISE_INTX_IRQ:
698 	case COMMAND_RAISE_MSI_IRQ:
699 	case COMMAND_RAISE_MSIX_IRQ:
700 		pci_epf_test_raise_irq(epf_test, reg);
701 		break;
702 	case COMMAND_WRITE:
703 		pci_epf_test_write(epf_test, reg);
704 		pci_epf_test_raise_irq(epf_test, reg);
705 		break;
706 	case COMMAND_READ:
707 		pci_epf_test_read(epf_test, reg);
708 		pci_epf_test_raise_irq(epf_test, reg);
709 		break;
710 	case COMMAND_COPY:
711 		pci_epf_test_copy(epf_test, reg);
712 		pci_epf_test_raise_irq(epf_test, reg);
713 		break;
714 	default:
715 		dev_err(dev, "Invalid command 0x%x\n", command);
716 		break;
717 	}
718 
719 reset_handler:
720 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
721 			   msecs_to_jiffies(1));
722 }
723 
pci_epf_test_set_bar(struct pci_epf * epf)724 static int pci_epf_test_set_bar(struct pci_epf *epf)
725 {
726 	int bar, ret;
727 	struct pci_epc *epc = epf->epc;
728 	struct device *dev = &epf->dev;
729 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
730 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
731 
732 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
733 		if (!epf_test->reg[bar])
734 			continue;
735 
736 		ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no,
737 				      &epf->bar[bar]);
738 		if (ret) {
739 			pci_epf_free_space(epf, epf_test->reg[bar], bar,
740 					   PRIMARY_INTERFACE);
741 			dev_err(dev, "Failed to set BAR%d\n", bar);
742 			if (bar == test_reg_bar)
743 				return ret;
744 		}
745 	}
746 
747 	return 0;
748 }
749 
pci_epf_test_clear_bar(struct pci_epf * epf)750 static void pci_epf_test_clear_bar(struct pci_epf *epf)
751 {
752 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
753 	struct pci_epc *epc = epf->epc;
754 	int bar;
755 
756 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
757 		if (!epf_test->reg[bar])
758 			continue;
759 
760 		pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no,
761 				  &epf->bar[bar]);
762 	}
763 }
764 
pci_epf_test_set_capabilities(struct pci_epf * epf)765 static void pci_epf_test_set_capabilities(struct pci_epf *epf)
766 {
767 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
768 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
769 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
770 	struct pci_epc *epc = epf->epc;
771 	u32 caps = 0;
772 
773 	if (epc->ops->align_addr)
774 		caps |= CAP_UNALIGNED_ACCESS;
775 
776 	reg->caps = cpu_to_le32(caps);
777 }
778 
pci_epf_test_epc_init(struct pci_epf * epf)779 static int pci_epf_test_epc_init(struct pci_epf *epf)
780 {
781 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
782 	struct pci_epf_header *header = epf->header;
783 	const struct pci_epc_features *epc_features = epf_test->epc_features;
784 	struct pci_epc *epc = epf->epc;
785 	struct device *dev = &epf->dev;
786 	bool linkup_notifier = false;
787 	int ret;
788 
789 	epf_test->dma_supported = true;
790 
791 	ret = pci_epf_test_init_dma_chan(epf_test);
792 	if (ret)
793 		epf_test->dma_supported = false;
794 
795 	if (epf->vfunc_no <= 1) {
796 		ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header);
797 		if (ret) {
798 			dev_err(dev, "Configuration header write failed\n");
799 			return ret;
800 		}
801 	}
802 
803 	pci_epf_test_set_capabilities(epf);
804 
805 	ret = pci_epf_test_set_bar(epf);
806 	if (ret)
807 		return ret;
808 
809 	if (epc_features->msi_capable) {
810 		ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no,
811 				      epf->msi_interrupts);
812 		if (ret) {
813 			dev_err(dev, "MSI configuration failed\n");
814 			return ret;
815 		}
816 	}
817 
818 	if (epc_features->msix_capable) {
819 		ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no,
820 				       epf->msix_interrupts,
821 				       epf_test->test_reg_bar,
822 				       epf_test->msix_table_offset);
823 		if (ret) {
824 			dev_err(dev, "MSI-X configuration failed\n");
825 			return ret;
826 		}
827 	}
828 
829 	linkup_notifier = epc_features->linkup_notifier;
830 	if (!linkup_notifier)
831 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
832 
833 	return 0;
834 }
835 
pci_epf_test_epc_deinit(struct pci_epf * epf)836 static void pci_epf_test_epc_deinit(struct pci_epf *epf)
837 {
838 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
839 
840 	cancel_delayed_work_sync(&epf_test->cmd_handler);
841 	pci_epf_test_clean_dma_chan(epf_test);
842 	pci_epf_test_clear_bar(epf);
843 }
844 
pci_epf_test_link_up(struct pci_epf * epf)845 static int pci_epf_test_link_up(struct pci_epf *epf)
846 {
847 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
848 
849 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
850 			   msecs_to_jiffies(1));
851 
852 	return 0;
853 }
854 
pci_epf_test_link_down(struct pci_epf * epf)855 static int pci_epf_test_link_down(struct pci_epf *epf)
856 {
857 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
858 
859 	cancel_delayed_work_sync(&epf_test->cmd_handler);
860 
861 	return 0;
862 }
863 
864 static const struct pci_epc_event_ops pci_epf_test_event_ops = {
865 	.epc_init = pci_epf_test_epc_init,
866 	.epc_deinit = pci_epf_test_epc_deinit,
867 	.link_up = pci_epf_test_link_up,
868 	.link_down = pci_epf_test_link_down,
869 };
870 
pci_epf_test_alloc_space(struct pci_epf * epf)871 static int pci_epf_test_alloc_space(struct pci_epf *epf)
872 {
873 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
874 	struct device *dev = &epf->dev;
875 	size_t msix_table_size = 0;
876 	size_t test_reg_bar_size;
877 	size_t pba_size = 0;
878 	void *base;
879 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
880 	enum pci_barno bar;
881 	const struct pci_epc_features *epc_features = epf_test->epc_features;
882 	size_t test_reg_size;
883 
884 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
885 
886 	if (epc_features->msix_capable) {
887 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
888 		epf_test->msix_table_offset = test_reg_bar_size;
889 		/* Align to QWORD or 8 Bytes */
890 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
891 	}
892 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
893 
894 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
895 				   epc_features, PRIMARY_INTERFACE);
896 	if (!base) {
897 		dev_err(dev, "Failed to allocated register space\n");
898 		return -ENOMEM;
899 	}
900 	epf_test->reg[test_reg_bar] = base;
901 
902 	for (bar = BAR_0; bar < PCI_STD_NUM_BARS; bar++) {
903 		bar = pci_epc_get_next_free_bar(epc_features, bar);
904 		if (bar == NO_BAR)
905 			break;
906 
907 		if (bar == test_reg_bar)
908 			continue;
909 
910 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
911 					   epc_features, PRIMARY_INTERFACE);
912 		if (!base)
913 			dev_err(dev, "Failed to allocate space for BAR%d\n",
914 				bar);
915 		epf_test->reg[bar] = base;
916 	}
917 
918 	return 0;
919 }
920 
pci_epf_test_free_space(struct pci_epf * epf)921 static void pci_epf_test_free_space(struct pci_epf *epf)
922 {
923 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
924 	int bar;
925 
926 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
927 		if (!epf_test->reg[bar])
928 			continue;
929 
930 		pci_epf_free_space(epf, epf_test->reg[bar], bar,
931 				   PRIMARY_INTERFACE);
932 	}
933 }
934 
pci_epf_test_bind(struct pci_epf * epf)935 static int pci_epf_test_bind(struct pci_epf *epf)
936 {
937 	int ret;
938 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
939 	const struct pci_epc_features *epc_features;
940 	enum pci_barno test_reg_bar = BAR_0;
941 	struct pci_epc *epc = epf->epc;
942 
943 	if (WARN_ON_ONCE(!epc))
944 		return -EINVAL;
945 
946 	epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no);
947 	if (!epc_features) {
948 		dev_err(&epf->dev, "epc_features not implemented\n");
949 		return -EOPNOTSUPP;
950 	}
951 
952 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
953 	if (test_reg_bar < 0)
954 		return -EINVAL;
955 
956 	epf_test->test_reg_bar = test_reg_bar;
957 	epf_test->epc_features = epc_features;
958 
959 	ret = pci_epf_test_alloc_space(epf);
960 	if (ret)
961 		return ret;
962 
963 	return 0;
964 }
965 
pci_epf_test_unbind(struct pci_epf * epf)966 static void pci_epf_test_unbind(struct pci_epf *epf)
967 {
968 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
969 	struct pci_epc *epc = epf->epc;
970 
971 	cancel_delayed_work_sync(&epf_test->cmd_handler);
972 	if (epc->init_complete) {
973 		pci_epf_test_clean_dma_chan(epf_test);
974 		pci_epf_test_clear_bar(epf);
975 	}
976 	pci_epf_test_free_space(epf);
977 }
978 
979 static const struct pci_epf_device_id pci_epf_test_ids[] = {
980 	{
981 		.name = "pci_epf_test",
982 	},
983 	{},
984 };
985 
pci_epf_test_probe(struct pci_epf * epf,const struct pci_epf_device_id * id)986 static int pci_epf_test_probe(struct pci_epf *epf,
987 			      const struct pci_epf_device_id *id)
988 {
989 	struct pci_epf_test *epf_test;
990 	struct device *dev = &epf->dev;
991 
992 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
993 	if (!epf_test)
994 		return -ENOMEM;
995 
996 	epf->header = &test_header;
997 	epf_test->epf = epf;
998 
999 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
1000 
1001 	epf->event_ops = &pci_epf_test_event_ops;
1002 
1003 	epf_set_drvdata(epf, epf_test);
1004 	return 0;
1005 }
1006 
1007 static const struct pci_epf_ops ops = {
1008 	.unbind	= pci_epf_test_unbind,
1009 	.bind	= pci_epf_test_bind,
1010 };
1011 
1012 static struct pci_epf_driver test_driver = {
1013 	.driver.name	= "pci_epf_test",
1014 	.probe		= pci_epf_test_probe,
1015 	.id_table	= pci_epf_test_ids,
1016 	.ops		= &ops,
1017 	.owner		= THIS_MODULE,
1018 };
1019 
pci_epf_test_init(void)1020 static int __init pci_epf_test_init(void)
1021 {
1022 	int ret;
1023 
1024 	kpcitest_workqueue = alloc_workqueue("kpcitest",
1025 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
1026 	if (!kpcitest_workqueue) {
1027 		pr_err("Failed to allocate the kpcitest work queue\n");
1028 		return -ENOMEM;
1029 	}
1030 
1031 	ret = pci_epf_register_driver(&test_driver);
1032 	if (ret) {
1033 		destroy_workqueue(kpcitest_workqueue);
1034 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
1035 		return ret;
1036 	}
1037 
1038 	return 0;
1039 }
1040 module_init(pci_epf_test_init);
1041 
pci_epf_test_exit(void)1042 static void __exit pci_epf_test_exit(void)
1043 {
1044 	if (kpcitest_workqueue)
1045 		destroy_workqueue(kpcitest_workqueue);
1046 	pci_epf_unregister_driver(&test_driver);
1047 }
1048 module_exit(pci_epf_test_exit);
1049 
1050 MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
1051 MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
1052 MODULE_LICENSE("GPL v2");
1053