1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <[email protected]>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/cleanup.h>
11 #include <linux/delay.h>
12 #include <linux/fs.h>
13 #include <linux/io.h>
14 #include <linux/interrupt.h>
15 #include <linux/irq.h>
16 #include <linux/miscdevice.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/random.h>
20 #include <linux/slab.h>
21 #include <linux/uaccess.h>
22 #include <linux/pci.h>
23 #include <linux/pci_ids.h>
24 
25 #include <linux/pci_regs.h>
26 
27 #include <uapi/linux/pcitest.h>
28 
29 #define DRV_MODULE_NAME				"pci-endpoint-test"
30 
31 #define IRQ_TYPE_UNDEFINED			-1
32 #define IRQ_TYPE_INTX				0
33 #define IRQ_TYPE_MSI				1
34 #define IRQ_TYPE_MSIX				2
35 
36 #define PCI_ENDPOINT_TEST_MAGIC			0x0
37 
38 #define PCI_ENDPOINT_TEST_COMMAND		0x4
39 #define COMMAND_RAISE_INTX_IRQ			BIT(0)
40 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
41 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
42 #define COMMAND_READ				BIT(3)
43 #define COMMAND_WRITE				BIT(4)
44 #define COMMAND_COPY				BIT(5)
45 
46 #define PCI_ENDPOINT_TEST_STATUS		0x8
47 #define STATUS_READ_SUCCESS			BIT(0)
48 #define STATUS_READ_FAIL			BIT(1)
49 #define STATUS_WRITE_SUCCESS			BIT(2)
50 #define STATUS_WRITE_FAIL			BIT(3)
51 #define STATUS_COPY_SUCCESS			BIT(4)
52 #define STATUS_COPY_FAIL			BIT(5)
53 #define STATUS_IRQ_RAISED			BIT(6)
54 #define STATUS_SRC_ADDR_INVALID			BIT(7)
55 #define STATUS_DST_ADDR_INVALID			BIT(8)
56 
57 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
58 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
59 
60 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
61 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
62 
63 #define PCI_ENDPOINT_TEST_SIZE			0x1c
64 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
65 
66 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
67 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
68 
69 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
70 #define FLAG_USE_DMA				BIT(0)
71 
72 #define PCI_ENDPOINT_TEST_CAPS			0x30
73 #define CAP_UNALIGNED_ACCESS			BIT(0)
74 
75 #define PCI_DEVICE_ID_TI_AM654			0xb00c
76 #define PCI_DEVICE_ID_TI_J7200			0xb00f
77 #define PCI_DEVICE_ID_TI_AM64			0xb010
78 #define PCI_DEVICE_ID_TI_J721S2		0xb013
79 #define PCI_DEVICE_ID_LS1088A			0x80c0
80 #define PCI_DEVICE_ID_IMX8			0x0808
81 
82 #define is_am654_pci_dev(pdev)		\
83 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
84 
85 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
86 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
87 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
88 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
89 #define PCI_DEVICE_ID_RENESAS_R8A779F0		0x0031
90 
91 #define PCI_DEVICE_ID_ROCKCHIP_RK3588		0x3588
92 
93 static DEFINE_IDA(pci_endpoint_test_ida);
94 
95 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
96 					    miscdev)
97 
98 static bool no_msi;
99 module_param(no_msi, bool, 0444);
100 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
101 
102 static int irq_type = IRQ_TYPE_MSI;
103 module_param(irq_type, int, 0444);
104 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
105 
106 enum pci_barno {
107 	BAR_0,
108 	BAR_1,
109 	BAR_2,
110 	BAR_3,
111 	BAR_4,
112 	BAR_5,
113 };
114 
115 struct pci_endpoint_test {
116 	struct pci_dev	*pdev;
117 	void __iomem	*base;
118 	void __iomem	*bar[PCI_STD_NUM_BARS];
119 	struct completion irq_raised;
120 	int		last_irq;
121 	int		num_irqs;
122 	int		irq_type;
123 	/* mutex to protect the ioctls */
124 	struct mutex	mutex;
125 	struct miscdevice miscdev;
126 	enum pci_barno test_reg_bar;
127 	size_t alignment;
128 	const char *name;
129 };
130 
131 struct pci_endpoint_test_data {
132 	enum pci_barno test_reg_bar;
133 	size_t alignment;
134 	int irq_type;
135 };
136 
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)137 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
138 					  u32 offset)
139 {
140 	return readl(test->base + offset);
141 }
142 
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)143 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
144 					    u32 offset, u32 value)
145 {
146 	writel(value, test->base + offset);
147 }
148 
pci_endpoint_test_irqhandler(int irq,void * dev_id)149 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
150 {
151 	struct pci_endpoint_test *test = dev_id;
152 	u32 reg;
153 
154 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
155 	if (reg & STATUS_IRQ_RAISED) {
156 		test->last_irq = irq;
157 		complete(&test->irq_raised);
158 	}
159 
160 	return IRQ_HANDLED;
161 }
162 
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)163 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
164 {
165 	struct pci_dev *pdev = test->pdev;
166 
167 	pci_free_irq_vectors(pdev);
168 	test->irq_type = IRQ_TYPE_UNDEFINED;
169 }
170 
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)171 static int pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
172 						int type)
173 {
174 	int irq;
175 	struct pci_dev *pdev = test->pdev;
176 	struct device *dev = &pdev->dev;
177 
178 	switch (type) {
179 	case IRQ_TYPE_INTX:
180 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_INTX);
181 		if (irq < 0) {
182 			dev_err(dev, "Failed to get Legacy interrupt\n");
183 			return irq;
184 		}
185 
186 		break;
187 	case IRQ_TYPE_MSI:
188 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
189 		if (irq < 0) {
190 			dev_err(dev, "Failed to get MSI interrupts\n");
191 			return irq;
192 		}
193 
194 		break;
195 	case IRQ_TYPE_MSIX:
196 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
197 		if (irq < 0) {
198 			dev_err(dev, "Failed to get MSI-X interrupts\n");
199 			return irq;
200 		}
201 
202 		break;
203 	default:
204 		dev_err(dev, "Invalid IRQ type selected\n");
205 		return -EINVAL;
206 	}
207 
208 	test->irq_type = type;
209 	test->num_irqs = irq;
210 
211 	return 0;
212 }
213 
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)214 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
215 {
216 	int i;
217 	struct pci_dev *pdev = test->pdev;
218 	struct device *dev = &pdev->dev;
219 
220 	for (i = 0; i < test->num_irqs; i++)
221 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
222 
223 	test->num_irqs = 0;
224 }
225 
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)226 static int pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
227 {
228 	int i;
229 	int ret;
230 	struct pci_dev *pdev = test->pdev;
231 	struct device *dev = &pdev->dev;
232 
233 	for (i = 0; i < test->num_irqs; i++) {
234 		ret = devm_request_irq(dev, pci_irq_vector(pdev, i),
235 				       pci_endpoint_test_irqhandler,
236 				       IRQF_SHARED, test->name, test);
237 		if (ret)
238 			goto fail;
239 	}
240 
241 	return 0;
242 
243 fail:
244 	switch (test->irq_type) {
245 	case IRQ_TYPE_INTX:
246 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
247 			pci_irq_vector(pdev, i));
248 		break;
249 	case IRQ_TYPE_MSI:
250 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
251 			pci_irq_vector(pdev, i),
252 			i + 1);
253 		break;
254 	case IRQ_TYPE_MSIX:
255 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
256 			pci_irq_vector(pdev, i),
257 			i + 1);
258 		break;
259 	}
260 
261 	test->num_irqs = i;
262 	pci_endpoint_test_release_irq(test);
263 
264 	return ret;
265 }
266 
267 static const u32 bar_test_pattern[] = {
268 	0xA0A0A0A0,
269 	0xA1A1A1A1,
270 	0xA2A2A2A2,
271 	0xA3A3A3A3,
272 	0xA4A4A4A4,
273 	0xA5A5A5A5,
274 };
275 
pci_endpoint_test_bar_memcmp(struct pci_endpoint_test * test,enum pci_barno barno,resource_size_t offset,void * write_buf,void * read_buf,int size)276 static int pci_endpoint_test_bar_memcmp(struct pci_endpoint_test *test,
277 					enum pci_barno barno,
278 					resource_size_t offset, void *write_buf,
279 					void *read_buf, int size)
280 {
281 	memset(write_buf, bar_test_pattern[barno], size);
282 	memcpy_toio(test->bar[barno] + offset, write_buf, size);
283 
284 	memcpy_fromio(read_buf, test->bar[barno] + offset, size);
285 
286 	return memcmp(write_buf, read_buf, size);
287 }
288 
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)289 static int pci_endpoint_test_bar(struct pci_endpoint_test *test,
290 				  enum pci_barno barno)
291 {
292 	resource_size_t bar_size, offset = 0;
293 	void *write_buf __free(kfree) = NULL;
294 	void *read_buf __free(kfree) = NULL;
295 	struct pci_dev *pdev = test->pdev;
296 	int buf_size;
297 
298 	if (!test->bar[barno])
299 		return -ENOMEM;
300 
301 	bar_size = pci_resource_len(pdev, barno);
302 
303 	if (barno == test->test_reg_bar)
304 		bar_size = 0x4;
305 
306 	/*
307 	 * Allocate a buffer of max size 1MB, and reuse that buffer while
308 	 * iterating over the whole BAR size (which might be much larger).
309 	 */
310 	buf_size = min(SZ_1M, bar_size);
311 
312 	write_buf = kmalloc(buf_size, GFP_KERNEL);
313 	if (!write_buf)
314 		return -ENOMEM;
315 
316 	read_buf = kmalloc(buf_size, GFP_KERNEL);
317 	if (!read_buf)
318 		return -ENOMEM;
319 
320 	while (offset < bar_size) {
321 		if (pci_endpoint_test_bar_memcmp(test, barno, offset, write_buf,
322 						 read_buf, buf_size))
323 			return -EIO;
324 		offset += buf_size;
325 	}
326 
327 	return 0;
328 }
329 
bar_test_pattern_with_offset(enum pci_barno barno,int offset)330 static u32 bar_test_pattern_with_offset(enum pci_barno barno, int offset)
331 {
332 	u32 val;
333 
334 	/* Keep the BAR pattern in the top byte. */
335 	val = bar_test_pattern[barno] & 0xff000000;
336 	/* Store the (partial) offset in the remaining bytes. */
337 	val |= offset & 0x00ffffff;
338 
339 	return val;
340 }
341 
pci_endpoint_test_bars_write_bar(struct pci_endpoint_test * test,enum pci_barno barno)342 static void pci_endpoint_test_bars_write_bar(struct pci_endpoint_test *test,
343 					     enum pci_barno barno)
344 {
345 	struct pci_dev *pdev = test->pdev;
346 	int j, size;
347 
348 	size = pci_resource_len(pdev, barno);
349 
350 	if (barno == test->test_reg_bar)
351 		size = 0x4;
352 
353 	for (j = 0; j < size; j += 4)
354 		writel_relaxed(bar_test_pattern_with_offset(barno, j),
355 			       test->bar[barno] + j);
356 }
357 
pci_endpoint_test_bars_read_bar(struct pci_endpoint_test * test,enum pci_barno barno)358 static int pci_endpoint_test_bars_read_bar(struct pci_endpoint_test *test,
359 					    enum pci_barno barno)
360 {
361 	struct pci_dev *pdev = test->pdev;
362 	struct device *dev = &pdev->dev;
363 	int j, size;
364 	u32 val;
365 
366 	size = pci_resource_len(pdev, barno);
367 
368 	if (barno == test->test_reg_bar)
369 		size = 0x4;
370 
371 	for (j = 0; j < size; j += 4) {
372 		u32 expected = bar_test_pattern_with_offset(barno, j);
373 
374 		val = readl_relaxed(test->bar[barno] + j);
375 		if (val != expected) {
376 			dev_err(dev,
377 				"BAR%d incorrect data at offset: %#x, got: %#x expected: %#x\n",
378 				barno, j, val, expected);
379 			return -EIO;
380 		}
381 	}
382 
383 	return 0;
384 }
385 
pci_endpoint_test_bars(struct pci_endpoint_test * test)386 static int pci_endpoint_test_bars(struct pci_endpoint_test *test)
387 {
388 	enum pci_barno bar;
389 	int ret;
390 
391 	/* Write all BARs in order (without reading). */
392 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++)
393 		if (test->bar[bar])
394 			pci_endpoint_test_bars_write_bar(test, bar);
395 
396 	/*
397 	 * Read all BARs in order (without writing).
398 	 * If there is an address translation issue on the EP, writing one BAR
399 	 * might have overwritten another BAR. Ensure that this is not the case.
400 	 * (Reading back the BAR directly after writing can not detect this.)
401 	 */
402 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
403 		if (test->bar[bar]) {
404 			ret = pci_endpoint_test_bars_read_bar(test, bar);
405 			if (ret)
406 				return ret;
407 		}
408 	}
409 
410 	return 0;
411 }
412 
pci_endpoint_test_intx_irq(struct pci_endpoint_test * test)413 static int pci_endpoint_test_intx_irq(struct pci_endpoint_test *test)
414 {
415 	u32 val;
416 
417 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
418 				 IRQ_TYPE_INTX);
419 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
420 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
421 				 COMMAND_RAISE_INTX_IRQ);
422 	val = wait_for_completion_timeout(&test->irq_raised,
423 					  msecs_to_jiffies(1000));
424 	if (!val)
425 		return -ETIMEDOUT;
426 
427 	return 0;
428 }
429 
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)430 static int pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
431 				       u16 msi_num, bool msix)
432 {
433 	struct pci_dev *pdev = test->pdev;
434 	u32 val;
435 	int ret;
436 
437 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
438 				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
439 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
440 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
441 				 msix ? COMMAND_RAISE_MSIX_IRQ :
442 				 COMMAND_RAISE_MSI_IRQ);
443 	val = wait_for_completion_timeout(&test->irq_raised,
444 					  msecs_to_jiffies(1000));
445 	if (!val)
446 		return -ETIMEDOUT;
447 
448 	ret = pci_irq_vector(pdev, msi_num - 1);
449 	if (ret < 0)
450 		return ret;
451 
452 	if (ret != test->last_irq)
453 		return -EIO;
454 
455 	return 0;
456 }
457 
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)458 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
459 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
460 {
461 	if (!param->size) {
462 		dev_dbg(dev, "Data size is zero\n");
463 		return -EINVAL;
464 	}
465 
466 	if (param->size > SIZE_MAX - alignment) {
467 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
468 		return -EINVAL;
469 	}
470 
471 	return 0;
472 }
473 
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)474 static int pci_endpoint_test_copy(struct pci_endpoint_test *test,
475 				   unsigned long arg)
476 {
477 	struct pci_endpoint_test_xfer_param param;
478 	void *src_addr;
479 	void *dst_addr;
480 	u32 flags = 0;
481 	bool use_dma;
482 	size_t size;
483 	dma_addr_t src_phys_addr;
484 	dma_addr_t dst_phys_addr;
485 	struct pci_dev *pdev = test->pdev;
486 	struct device *dev = &pdev->dev;
487 	void *orig_src_addr;
488 	dma_addr_t orig_src_phys_addr;
489 	void *orig_dst_addr;
490 	dma_addr_t orig_dst_phys_addr;
491 	size_t offset;
492 	size_t alignment = test->alignment;
493 	int irq_type = test->irq_type;
494 	u32 src_crc32;
495 	u32 dst_crc32;
496 	int ret;
497 
498 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
499 	if (ret) {
500 		dev_err(dev, "Failed to get transfer param\n");
501 		return -EFAULT;
502 	}
503 
504 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
505 	if (ret)
506 		return ret;
507 
508 	size = param.size;
509 
510 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
511 	if (use_dma)
512 		flags |= FLAG_USE_DMA;
513 
514 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
515 		dev_err(dev, "Invalid IRQ type option\n");
516 		return -EINVAL;
517 	}
518 
519 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
520 	if (!orig_src_addr) {
521 		dev_err(dev, "Failed to allocate source buffer\n");
522 		return -ENOMEM;
523 	}
524 
525 	get_random_bytes(orig_src_addr, size + alignment);
526 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
527 					    size + alignment, DMA_TO_DEVICE);
528 	ret = dma_mapping_error(dev, orig_src_phys_addr);
529 	if (ret) {
530 		dev_err(dev, "failed to map source buffer address\n");
531 		goto err_src_phys_addr;
532 	}
533 
534 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
535 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
536 		offset = src_phys_addr - orig_src_phys_addr;
537 		src_addr = orig_src_addr + offset;
538 	} else {
539 		src_phys_addr = orig_src_phys_addr;
540 		src_addr = orig_src_addr;
541 	}
542 
543 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
544 				 lower_32_bits(src_phys_addr));
545 
546 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
547 				 upper_32_bits(src_phys_addr));
548 
549 	src_crc32 = crc32_le(~0, src_addr, size);
550 
551 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
552 	if (!orig_dst_addr) {
553 		dev_err(dev, "Failed to allocate destination address\n");
554 		ret = -ENOMEM;
555 		goto err_dst_addr;
556 	}
557 
558 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
559 					    size + alignment, DMA_FROM_DEVICE);
560 	ret = dma_mapping_error(dev, orig_dst_phys_addr);
561 	if (ret) {
562 		dev_err(dev, "failed to map destination buffer address\n");
563 		goto err_dst_phys_addr;
564 	}
565 
566 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
567 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
568 		offset = dst_phys_addr - orig_dst_phys_addr;
569 		dst_addr = orig_dst_addr + offset;
570 	} else {
571 		dst_phys_addr = orig_dst_phys_addr;
572 		dst_addr = orig_dst_addr;
573 	}
574 
575 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
576 				 lower_32_bits(dst_phys_addr));
577 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
578 				 upper_32_bits(dst_phys_addr));
579 
580 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
581 				 size);
582 
583 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
584 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
585 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
586 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
587 				 COMMAND_COPY);
588 
589 	wait_for_completion(&test->irq_raised);
590 
591 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
592 			 DMA_FROM_DEVICE);
593 
594 	dst_crc32 = crc32_le(~0, dst_addr, size);
595 	if (dst_crc32 != src_crc32)
596 		ret = -EIO;
597 
598 err_dst_phys_addr:
599 	kfree(orig_dst_addr);
600 
601 err_dst_addr:
602 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
603 			 DMA_TO_DEVICE);
604 
605 err_src_phys_addr:
606 	kfree(orig_src_addr);
607 	return ret;
608 }
609 
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)610 static int pci_endpoint_test_write(struct pci_endpoint_test *test,
611 				    unsigned long arg)
612 {
613 	struct pci_endpoint_test_xfer_param param;
614 	u32 flags = 0;
615 	bool use_dma;
616 	u32 reg;
617 	void *addr;
618 	dma_addr_t phys_addr;
619 	struct pci_dev *pdev = test->pdev;
620 	struct device *dev = &pdev->dev;
621 	void *orig_addr;
622 	dma_addr_t orig_phys_addr;
623 	size_t offset;
624 	size_t alignment = test->alignment;
625 	int irq_type = test->irq_type;
626 	size_t size;
627 	u32 crc32;
628 	int ret;
629 
630 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
631 	if (ret) {
632 		dev_err(dev, "Failed to get transfer param\n");
633 		return -EFAULT;
634 	}
635 
636 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
637 	if (ret)
638 		return ret;
639 
640 	size = param.size;
641 
642 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
643 	if (use_dma)
644 		flags |= FLAG_USE_DMA;
645 
646 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
647 		dev_err(dev, "Invalid IRQ type option\n");
648 		return -EINVAL;
649 	}
650 
651 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
652 	if (!orig_addr) {
653 		dev_err(dev, "Failed to allocate address\n");
654 		return -ENOMEM;
655 	}
656 
657 	get_random_bytes(orig_addr, size + alignment);
658 
659 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
660 					DMA_TO_DEVICE);
661 	ret = dma_mapping_error(dev, orig_phys_addr);
662 	if (ret) {
663 		dev_err(dev, "failed to map source buffer address\n");
664 		goto err_phys_addr;
665 	}
666 
667 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
668 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
669 		offset = phys_addr - orig_phys_addr;
670 		addr = orig_addr + offset;
671 	} else {
672 		phys_addr = orig_phys_addr;
673 		addr = orig_addr;
674 	}
675 
676 	crc32 = crc32_le(~0, addr, size);
677 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
678 				 crc32);
679 
680 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
681 				 lower_32_bits(phys_addr));
682 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
683 				 upper_32_bits(phys_addr));
684 
685 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
686 
687 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
688 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
689 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
690 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
691 				 COMMAND_READ);
692 
693 	wait_for_completion(&test->irq_raised);
694 
695 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
696 	if (!(reg & STATUS_READ_SUCCESS))
697 		ret = -EIO;
698 
699 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
700 			 DMA_TO_DEVICE);
701 
702 err_phys_addr:
703 	kfree(orig_addr);
704 	return ret;
705 }
706 
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)707 static int pci_endpoint_test_read(struct pci_endpoint_test *test,
708 				   unsigned long arg)
709 {
710 	struct pci_endpoint_test_xfer_param param;
711 	u32 flags = 0;
712 	bool use_dma;
713 	size_t size;
714 	void *addr;
715 	dma_addr_t phys_addr;
716 	struct pci_dev *pdev = test->pdev;
717 	struct device *dev = &pdev->dev;
718 	void *orig_addr;
719 	dma_addr_t orig_phys_addr;
720 	size_t offset;
721 	size_t alignment = test->alignment;
722 	int irq_type = test->irq_type;
723 	u32 crc32;
724 	int ret;
725 
726 	ret = copy_from_user(&param, (void __user *)arg, sizeof(param));
727 	if (ret) {
728 		dev_err(dev, "Failed to get transfer param\n");
729 		return -EFAULT;
730 	}
731 
732 	ret = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
733 	if (ret)
734 		return ret;
735 
736 	size = param.size;
737 
738 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
739 	if (use_dma)
740 		flags |= FLAG_USE_DMA;
741 
742 	if (irq_type < IRQ_TYPE_INTX || irq_type > IRQ_TYPE_MSIX) {
743 		dev_err(dev, "Invalid IRQ type option\n");
744 		return -EINVAL;
745 	}
746 
747 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
748 	if (!orig_addr) {
749 		dev_err(dev, "Failed to allocate destination address\n");
750 		return -ENOMEM;
751 	}
752 
753 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
754 					DMA_FROM_DEVICE);
755 	ret = dma_mapping_error(dev, orig_phys_addr);
756 	if (ret) {
757 		dev_err(dev, "failed to map source buffer address\n");
758 		goto err_phys_addr;
759 	}
760 
761 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
762 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
763 		offset = phys_addr - orig_phys_addr;
764 		addr = orig_addr + offset;
765 	} else {
766 		phys_addr = orig_phys_addr;
767 		addr = orig_addr;
768 	}
769 
770 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
771 				 lower_32_bits(phys_addr));
772 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
773 				 upper_32_bits(phys_addr));
774 
775 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
776 
777 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
778 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
779 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
780 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
781 				 COMMAND_WRITE);
782 
783 	wait_for_completion(&test->irq_raised);
784 
785 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
786 			 DMA_FROM_DEVICE);
787 
788 	crc32 = crc32_le(~0, addr, size);
789 	if (crc32 != pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
790 		ret = -EIO;
791 
792 err_phys_addr:
793 	kfree(orig_addr);
794 	return ret;
795 }
796 
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)797 static int pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
798 {
799 	pci_endpoint_test_release_irq(test);
800 	pci_endpoint_test_free_irq_vectors(test);
801 
802 	return 0;
803 }
804 
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)805 static int pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
806 				      int req_irq_type)
807 {
808 	struct pci_dev *pdev = test->pdev;
809 	struct device *dev = &pdev->dev;
810 	int ret;
811 
812 	if (req_irq_type < IRQ_TYPE_INTX || req_irq_type > IRQ_TYPE_MSIX) {
813 		dev_err(dev, "Invalid IRQ type option\n");
814 		return -EINVAL;
815 	}
816 
817 	if (test->irq_type == req_irq_type)
818 		return 0;
819 
820 	pci_endpoint_test_release_irq(test);
821 	pci_endpoint_test_free_irq_vectors(test);
822 
823 	ret = pci_endpoint_test_alloc_irq_vectors(test, req_irq_type);
824 	if (ret)
825 		return ret;
826 
827 	ret = pci_endpoint_test_request_irq(test);
828 	if (ret) {
829 		pci_endpoint_test_free_irq_vectors(test);
830 		return ret;
831 	}
832 
833 	irq_type = test->irq_type;
834 	return 0;
835 }
836 
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)837 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
838 				    unsigned long arg)
839 {
840 	int ret = -EINVAL;
841 	enum pci_barno bar;
842 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
843 	struct pci_dev *pdev = test->pdev;
844 
845 	mutex_lock(&test->mutex);
846 
847 	reinit_completion(&test->irq_raised);
848 	test->last_irq = -ENODATA;
849 
850 	switch (cmd) {
851 	case PCITEST_BAR:
852 		bar = arg;
853 		if (bar > BAR_5)
854 			goto ret;
855 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
856 			goto ret;
857 		ret = pci_endpoint_test_bar(test, bar);
858 		break;
859 	case PCITEST_BARS:
860 		ret = pci_endpoint_test_bars(test);
861 		break;
862 	case PCITEST_INTX_IRQ:
863 		ret = pci_endpoint_test_intx_irq(test);
864 		break;
865 	case PCITEST_MSI:
866 	case PCITEST_MSIX:
867 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
868 		break;
869 	case PCITEST_WRITE:
870 		ret = pci_endpoint_test_write(test, arg);
871 		break;
872 	case PCITEST_READ:
873 		ret = pci_endpoint_test_read(test, arg);
874 		break;
875 	case PCITEST_COPY:
876 		ret = pci_endpoint_test_copy(test, arg);
877 		break;
878 	case PCITEST_SET_IRQTYPE:
879 		ret = pci_endpoint_test_set_irq(test, arg);
880 		break;
881 	case PCITEST_GET_IRQTYPE:
882 		ret = irq_type;
883 		break;
884 	case PCITEST_CLEAR_IRQ:
885 		ret = pci_endpoint_test_clear_irq(test);
886 		break;
887 	}
888 
889 ret:
890 	mutex_unlock(&test->mutex);
891 	return ret;
892 }
893 
894 static const struct file_operations pci_endpoint_test_fops = {
895 	.owner = THIS_MODULE,
896 	.unlocked_ioctl = pci_endpoint_test_ioctl,
897 };
898 
pci_endpoint_test_get_capabilities(struct pci_endpoint_test * test)899 static void pci_endpoint_test_get_capabilities(struct pci_endpoint_test *test)
900 {
901 	struct pci_dev *pdev = test->pdev;
902 	struct device *dev = &pdev->dev;
903 	u32 caps;
904 
905 	caps = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CAPS);
906 	dev_dbg(dev, "PCI_ENDPOINT_TEST_CAPS: %#x\n", caps);
907 
908 	/* CAP_UNALIGNED_ACCESS is set if the EP can do unaligned access */
909 	if (caps & CAP_UNALIGNED_ACCESS)
910 		test->alignment = 0;
911 }
912 
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)913 static int pci_endpoint_test_probe(struct pci_dev *pdev,
914 				   const struct pci_device_id *ent)
915 {
916 	int ret;
917 	int id;
918 	char name[24];
919 	enum pci_barno bar;
920 	void __iomem *base;
921 	struct device *dev = &pdev->dev;
922 	struct pci_endpoint_test *test;
923 	struct pci_endpoint_test_data *data;
924 	enum pci_barno test_reg_bar = BAR_0;
925 	struct miscdevice *misc_device;
926 
927 	if (pci_is_bridge(pdev))
928 		return -ENODEV;
929 
930 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
931 	if (!test)
932 		return -ENOMEM;
933 
934 	test->test_reg_bar = 0;
935 	test->alignment = 0;
936 	test->pdev = pdev;
937 	test->irq_type = IRQ_TYPE_UNDEFINED;
938 
939 	if (no_msi)
940 		irq_type = IRQ_TYPE_INTX;
941 
942 	data = (struct pci_endpoint_test_data *)ent->driver_data;
943 	if (data) {
944 		test_reg_bar = data->test_reg_bar;
945 		test->test_reg_bar = test_reg_bar;
946 		test->alignment = data->alignment;
947 		irq_type = data->irq_type;
948 	}
949 
950 	init_completion(&test->irq_raised);
951 	mutex_init(&test->mutex);
952 
953 	dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48));
954 
955 	ret = pci_enable_device(pdev);
956 	if (ret) {
957 		dev_err(dev, "Cannot enable PCI device\n");
958 		return ret;
959 	}
960 
961 	ret = pci_request_regions(pdev, DRV_MODULE_NAME);
962 	if (ret) {
963 		dev_err(dev, "Cannot obtain PCI resources\n");
964 		goto err_disable_pdev;
965 	}
966 
967 	pci_set_master(pdev);
968 
969 	ret = pci_endpoint_test_alloc_irq_vectors(test, irq_type);
970 	if (ret)
971 		goto err_disable_irq;
972 
973 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
974 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
975 			base = pci_ioremap_bar(pdev, bar);
976 			if (!base) {
977 				dev_err(dev, "Failed to read BAR%d\n", bar);
978 				WARN_ON(bar == test_reg_bar);
979 			}
980 			test->bar[bar] = base;
981 		}
982 	}
983 
984 	test->base = test->bar[test_reg_bar];
985 	if (!test->base) {
986 		ret = -ENOMEM;
987 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
988 			test_reg_bar);
989 		goto err_iounmap;
990 	}
991 
992 	pci_set_drvdata(pdev, test);
993 
994 	id = ida_alloc(&pci_endpoint_test_ida, GFP_KERNEL);
995 	if (id < 0) {
996 		ret = id;
997 		dev_err(dev, "Unable to get id\n");
998 		goto err_iounmap;
999 	}
1000 
1001 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
1002 	test->name = kstrdup(name, GFP_KERNEL);
1003 	if (!test->name) {
1004 		ret = -ENOMEM;
1005 		goto err_ida_remove;
1006 	}
1007 
1008 	ret = pci_endpoint_test_request_irq(test);
1009 	if (ret)
1010 		goto err_kfree_test_name;
1011 
1012 	pci_endpoint_test_get_capabilities(test);
1013 
1014 	misc_device = &test->miscdev;
1015 	misc_device->minor = MISC_DYNAMIC_MINOR;
1016 	misc_device->name = kstrdup(name, GFP_KERNEL);
1017 	if (!misc_device->name) {
1018 		ret = -ENOMEM;
1019 		goto err_release_irq;
1020 	}
1021 	misc_device->parent = &pdev->dev;
1022 	misc_device->fops = &pci_endpoint_test_fops;
1023 
1024 	ret = misc_register(misc_device);
1025 	if (ret) {
1026 		dev_err(dev, "Failed to register device\n");
1027 		goto err_kfree_name;
1028 	}
1029 
1030 	return 0;
1031 
1032 err_kfree_name:
1033 	kfree(misc_device->name);
1034 
1035 err_release_irq:
1036 	pci_endpoint_test_release_irq(test);
1037 
1038 err_kfree_test_name:
1039 	kfree(test->name);
1040 
1041 err_ida_remove:
1042 	ida_free(&pci_endpoint_test_ida, id);
1043 
1044 err_iounmap:
1045 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1046 		if (test->bar[bar])
1047 			pci_iounmap(pdev, test->bar[bar]);
1048 	}
1049 
1050 err_disable_irq:
1051 	pci_endpoint_test_free_irq_vectors(test);
1052 	pci_release_regions(pdev);
1053 
1054 err_disable_pdev:
1055 	pci_disable_device(pdev);
1056 
1057 	return ret;
1058 }
1059 
pci_endpoint_test_remove(struct pci_dev * pdev)1060 static void pci_endpoint_test_remove(struct pci_dev *pdev)
1061 {
1062 	int id;
1063 	enum pci_barno bar;
1064 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
1065 	struct miscdevice *misc_device = &test->miscdev;
1066 
1067 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
1068 		return;
1069 	if (id < 0)
1070 		return;
1071 
1072 	pci_endpoint_test_release_irq(test);
1073 	pci_endpoint_test_free_irq_vectors(test);
1074 
1075 	misc_deregister(&test->miscdev);
1076 	kfree(misc_device->name);
1077 	kfree(test->name);
1078 	ida_free(&pci_endpoint_test_ida, id);
1079 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
1080 		if (test->bar[bar])
1081 			pci_iounmap(pdev, test->bar[bar]);
1082 	}
1083 
1084 	pci_release_regions(pdev);
1085 	pci_disable_device(pdev);
1086 }
1087 
1088 static const struct pci_endpoint_test_data default_data = {
1089 	.test_reg_bar = BAR_0,
1090 	.alignment = SZ_4K,
1091 	.irq_type = IRQ_TYPE_MSI,
1092 };
1093 
1094 static const struct pci_endpoint_test_data am654_data = {
1095 	.test_reg_bar = BAR_2,
1096 	.alignment = SZ_64K,
1097 	.irq_type = IRQ_TYPE_MSI,
1098 };
1099 
1100 static const struct pci_endpoint_test_data j721e_data = {
1101 	.alignment = 256,
1102 	.irq_type = IRQ_TYPE_MSI,
1103 };
1104 
1105 static const struct pci_endpoint_test_data rk3588_data = {
1106 	.alignment = SZ_64K,
1107 	.irq_type = IRQ_TYPE_MSI,
1108 };
1109 
1110 /*
1111  * If the controller's Vendor/Device ID are programmable, you may be able to
1112  * use one of the existing entries for testing instead of adding a new one.
1113  */
1114 static const struct pci_device_id pci_endpoint_test_tbl[] = {
1115 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
1116 	  .driver_data = (kernel_ulong_t)&default_data,
1117 	},
1118 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
1119 	  .driver_data = (kernel_ulong_t)&default_data,
1120 	},
1121 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
1122 	  .driver_data = (kernel_ulong_t)&default_data,
1123 	},
1124 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
1125 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
1126 	  .driver_data = (kernel_ulong_t)&default_data,
1127 	},
1128 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
1129 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
1130 	  .driver_data = (kernel_ulong_t)&am654_data
1131 	},
1132 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
1133 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
1134 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
1135 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
1136 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
1137 	  .driver_data = (kernel_ulong_t)&default_data,
1138 	},
1139 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
1140 	  .driver_data = (kernel_ulong_t)&j721e_data,
1141 	},
1142 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1143 	  .driver_data = (kernel_ulong_t)&j721e_data,
1144 	},
1145 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1146 	  .driver_data = (kernel_ulong_t)&j721e_data,
1147 	},
1148 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1149 	  .driver_data = (kernel_ulong_t)&j721e_data,
1150 	},
1151 	{ PCI_DEVICE(PCI_VENDOR_ID_ROCKCHIP, PCI_DEVICE_ID_ROCKCHIP_RK3588),
1152 	  .driver_data = (kernel_ulong_t)&rk3588_data,
1153 	},
1154 	{ }
1155 };
1156 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1157 
1158 static struct pci_driver pci_endpoint_test_driver = {
1159 	.name		= DRV_MODULE_NAME,
1160 	.id_table	= pci_endpoint_test_tbl,
1161 	.probe		= pci_endpoint_test_probe,
1162 	.remove		= pci_endpoint_test_remove,
1163 	.sriov_configure = pci_sriov_configure_simple,
1164 };
1165 module_pci_driver(pci_endpoint_test_driver);
1166 
1167 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1168 MODULE_AUTHOR("Kishon Vijay Abraham I <[email protected]>");
1169 MODULE_LICENSE("GPL v2");
1170