1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Accelerated Function Unit (AFU)
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Wu Hao <[email protected]>
9  *   Xiao Guangrong <[email protected]>
10  *   Joseph Grecco <[email protected]>
11  *   Enno Luebbers <[email protected]>
12  *   Tim Whisonant <[email protected]>
13  *   Ananda Ravuri <[email protected]>
14  *   Henry Mitchel <[email protected]>
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
21 
22 #include "dfl-afu.h"
23 
24 #define RST_POLL_INVL 10 /* us */
25 #define RST_POLL_TIMEOUT 1000 /* us */
26 
27 /**
28  * __afu_port_enable - enable a port by clear reset
29  * @fdata: port feature dev data.
30  *
31  * Enable Port by clear the port soft reset bit, which is set by default.
32  * The AFU is unable to respond to any MMIO access while in reset.
33  * __afu_port_enable function should only be used after __afu_port_disable
34  * function.
35  *
36  * The caller needs to hold lock for protection.
37  */
__afu_port_enable(struct dfl_feature_dev_data * fdata)38 int __afu_port_enable(struct dfl_feature_dev_data *fdata)
39 {
40 	void __iomem *base;
41 	u64 v;
42 
43 	WARN_ON(!fdata->disable_count);
44 
45 	if (--fdata->disable_count != 0)
46 		return 0;
47 
48 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
49 
50 	/* Clear port soft reset */
51 	v = readq(base + PORT_HDR_CTRL);
52 	v &= ~PORT_CTRL_SFTRST;
53 	writeq(v, base + PORT_HDR_CTRL);
54 
55 	/*
56 	 * HW clears the ack bit to indicate that the port is fully out
57 	 * of reset.
58 	 */
59 	if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
60 			       !(v & PORT_CTRL_SFTRST_ACK),
61 			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
62 		dev_err(fdata->dfl_cdev->parent,
63 			"timeout, failure to enable device\n");
64 		return -ETIMEDOUT;
65 	}
66 
67 	return 0;
68 }
69 
70 /**
71  * __afu_port_disable - disable a port by hold reset
72  * @fdata: port feature dev data.
73  *
74  * Disable Port by setting the port soft reset bit, it puts the port into reset.
75  *
76  * The caller needs to hold lock for protection.
77  */
__afu_port_disable(struct dfl_feature_dev_data * fdata)78 int __afu_port_disable(struct dfl_feature_dev_data *fdata)
79 {
80 	void __iomem *base;
81 	u64 v;
82 
83 	if (fdata->disable_count++ != 0)
84 		return 0;
85 
86 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
87 
88 	/* Set port soft reset */
89 	v = readq(base + PORT_HDR_CTRL);
90 	v |= PORT_CTRL_SFTRST;
91 	writeq(v, base + PORT_HDR_CTRL);
92 
93 	/*
94 	 * HW sets ack bit to 1 when all outstanding requests have been drained
95 	 * on this port and minimum soft reset pulse width has elapsed.
96 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
97 	 */
98 	if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
99 			       v & PORT_CTRL_SFTRST_ACK,
100 			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
101 		dev_err(fdata->dfl_cdev->parent,
102 			"timeout, failure to disable device\n");
103 		return -ETIMEDOUT;
104 	}
105 
106 	return 0;
107 }
108 
109 /*
110  * This function resets the FPGA Port and its accelerator (AFU) by function
111  * __port_disable and __port_enable (set port soft reset bit and then clear
112  * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
113  * Reconfiguration. But it should never cause any system level issue, only
114  * functional failure (e.g. DMA or PR operation failure) and be recoverable
115  * from the failure.
116  *
117  * Note: the accelerator (AFU) is not accessible when its port is in reset
118  * (disabled). Any attempts on MMIO access to AFU while in reset, will
119  * result errors reported via port error reporting sub feature (if present).
120  */
__port_reset(struct dfl_feature_dev_data * fdata)121 static int __port_reset(struct dfl_feature_dev_data *fdata)
122 {
123 	int ret;
124 
125 	ret = __afu_port_disable(fdata);
126 	if (ret)
127 		return ret;
128 
129 	return __afu_port_enable(fdata);
130 }
131 
port_reset(struct platform_device * pdev)132 static int port_reset(struct platform_device *pdev)
133 {
134 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
135 	int ret;
136 
137 	mutex_lock(&fdata->lock);
138 	ret = __port_reset(fdata);
139 	mutex_unlock(&fdata->lock);
140 
141 	return ret;
142 }
143 
port_get_id(struct dfl_feature_dev_data * fdata)144 static int port_get_id(struct dfl_feature_dev_data *fdata)
145 {
146 	void __iomem *base;
147 
148 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
149 
150 	return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
151 }
152 
153 static ssize_t
id_show(struct device * dev,struct device_attribute * attr,char * buf)154 id_show(struct device *dev, struct device_attribute *attr, char *buf)
155 {
156 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
157 	int id = port_get_id(fdata);
158 
159 	return scnprintf(buf, PAGE_SIZE, "%d\n", id);
160 }
161 static DEVICE_ATTR_RO(id);
162 
163 static ssize_t
ltr_show(struct device * dev,struct device_attribute * attr,char * buf)164 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
165 {
166 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
167 	void __iomem *base;
168 	u64 v;
169 
170 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
171 
172 	mutex_lock(&fdata->lock);
173 	v = readq(base + PORT_HDR_CTRL);
174 	mutex_unlock(&fdata->lock);
175 
176 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
177 }
178 
179 static ssize_t
ltr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)180 ltr_store(struct device *dev, struct device_attribute *attr,
181 	  const char *buf, size_t count)
182 {
183 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
184 	void __iomem *base;
185 	bool ltr;
186 	u64 v;
187 
188 	if (kstrtobool(buf, &ltr))
189 		return -EINVAL;
190 
191 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
192 
193 	mutex_lock(&fdata->lock);
194 	v = readq(base + PORT_HDR_CTRL);
195 	v &= ~PORT_CTRL_LATENCY;
196 	v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
197 	writeq(v, base + PORT_HDR_CTRL);
198 	mutex_unlock(&fdata->lock);
199 
200 	return count;
201 }
202 static DEVICE_ATTR_RW(ltr);
203 
204 static ssize_t
ap1_event_show(struct device * dev,struct device_attribute * attr,char * buf)205 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
206 {
207 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
208 	void __iomem *base;
209 	u64 v;
210 
211 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
212 
213 	mutex_lock(&fdata->lock);
214 	v = readq(base + PORT_HDR_STS);
215 	mutex_unlock(&fdata->lock);
216 
217 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
218 }
219 
220 static ssize_t
ap1_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)221 ap1_event_store(struct device *dev, struct device_attribute *attr,
222 		const char *buf, size_t count)
223 {
224 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
225 	void __iomem *base;
226 	bool clear;
227 
228 	if (kstrtobool(buf, &clear) || !clear)
229 		return -EINVAL;
230 
231 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
232 
233 	mutex_lock(&fdata->lock);
234 	writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
235 	mutex_unlock(&fdata->lock);
236 
237 	return count;
238 }
239 static DEVICE_ATTR_RW(ap1_event);
240 
241 static ssize_t
ap2_event_show(struct device * dev,struct device_attribute * attr,char * buf)242 ap2_event_show(struct device *dev, struct device_attribute *attr,
243 	       char *buf)
244 {
245 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
246 	void __iomem *base;
247 	u64 v;
248 
249 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
250 
251 	mutex_lock(&fdata->lock);
252 	v = readq(base + PORT_HDR_STS);
253 	mutex_unlock(&fdata->lock);
254 
255 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
256 }
257 
258 static ssize_t
ap2_event_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)259 ap2_event_store(struct device *dev, struct device_attribute *attr,
260 		const char *buf, size_t count)
261 {
262 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
263 	void __iomem *base;
264 	bool clear;
265 
266 	if (kstrtobool(buf, &clear) || !clear)
267 		return -EINVAL;
268 
269 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
270 
271 	mutex_lock(&fdata->lock);
272 	writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
273 	mutex_unlock(&fdata->lock);
274 
275 	return count;
276 }
277 static DEVICE_ATTR_RW(ap2_event);
278 
279 static ssize_t
power_state_show(struct device * dev,struct device_attribute * attr,char * buf)280 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
281 {
282 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
283 	void __iomem *base;
284 	u64 v;
285 
286 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
287 
288 	mutex_lock(&fdata->lock);
289 	v = readq(base + PORT_HDR_STS);
290 	mutex_unlock(&fdata->lock);
291 
292 	return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
293 }
294 static DEVICE_ATTR_RO(power_state);
295 
296 static ssize_t
userclk_freqcmd_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)297 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
298 		      const char *buf, size_t count)
299 {
300 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
301 	u64 userclk_freq_cmd;
302 	void __iomem *base;
303 
304 	if (kstrtou64(buf, 0, &userclk_freq_cmd))
305 		return -EINVAL;
306 
307 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
308 
309 	mutex_lock(&fdata->lock);
310 	writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
311 	mutex_unlock(&fdata->lock);
312 
313 	return count;
314 }
315 static DEVICE_ATTR_WO(userclk_freqcmd);
316 
317 static ssize_t
userclk_freqcntrcmd_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)318 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
319 			  const char *buf, size_t count)
320 {
321 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
322 	u64 userclk_freqcntr_cmd;
323 	void __iomem *base;
324 
325 	if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
326 		return -EINVAL;
327 
328 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
329 
330 	mutex_lock(&fdata->lock);
331 	writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
332 	mutex_unlock(&fdata->lock);
333 
334 	return count;
335 }
336 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
337 
338 static ssize_t
userclk_freqsts_show(struct device * dev,struct device_attribute * attr,char * buf)339 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
340 		     char *buf)
341 {
342 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
343 	u64 userclk_freqsts;
344 	void __iomem *base;
345 
346 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
347 
348 	mutex_lock(&fdata->lock);
349 	userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
350 	mutex_unlock(&fdata->lock);
351 
352 	return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
353 }
354 static DEVICE_ATTR_RO(userclk_freqsts);
355 
356 static ssize_t
userclk_freqcntrsts_show(struct device * dev,struct device_attribute * attr,char * buf)357 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
358 			 char *buf)
359 {
360 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
361 	u64 userclk_freqcntrsts;
362 	void __iomem *base;
363 
364 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
365 
366 	mutex_lock(&fdata->lock);
367 	userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
368 	mutex_unlock(&fdata->lock);
369 
370 	return sprintf(buf, "0x%llx\n",
371 		       (unsigned long long)userclk_freqcntrsts);
372 }
373 static DEVICE_ATTR_RO(userclk_freqcntrsts);
374 
375 static struct attribute *port_hdr_attrs[] = {
376 	&dev_attr_id.attr,
377 	&dev_attr_ltr.attr,
378 	&dev_attr_ap1_event.attr,
379 	&dev_attr_ap2_event.attr,
380 	&dev_attr_power_state.attr,
381 	&dev_attr_userclk_freqcmd.attr,
382 	&dev_attr_userclk_freqcntrcmd.attr,
383 	&dev_attr_userclk_freqsts.attr,
384 	&dev_attr_userclk_freqcntrsts.attr,
385 	NULL,
386 };
387 
port_hdr_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)388 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
389 				      struct attribute *attr, int n)
390 {
391 	struct device *dev = kobj_to_dev(kobj);
392 	struct dfl_feature_dev_data *fdata;
393 	umode_t mode = attr->mode;
394 	void __iomem *base;
395 
396 	fdata = to_dfl_feature_dev_data(dev);
397 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
398 
399 	if (dfl_feature_revision(base) > 0) {
400 		/*
401 		 * userclk sysfs interfaces are only visible in case port
402 		 * revision is 0, as hardware with revision >0 doesn't
403 		 * support this.
404 		 */
405 		if (attr == &dev_attr_userclk_freqcmd.attr ||
406 		    attr == &dev_attr_userclk_freqcntrcmd.attr ||
407 		    attr == &dev_attr_userclk_freqsts.attr ||
408 		    attr == &dev_attr_userclk_freqcntrsts.attr)
409 			mode = 0;
410 	}
411 
412 	return mode;
413 }
414 
415 static const struct attribute_group port_hdr_group = {
416 	.attrs      = port_hdr_attrs,
417 	.is_visible = port_hdr_attrs_visible,
418 };
419 
port_hdr_init(struct platform_device * pdev,struct dfl_feature * feature)420 static int port_hdr_init(struct platform_device *pdev,
421 			 struct dfl_feature *feature)
422 {
423 	port_reset(pdev);
424 
425 	return 0;
426 }
427 
428 static long
port_hdr_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)429 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
430 	       unsigned int cmd, unsigned long arg)
431 {
432 	long ret;
433 
434 	switch (cmd) {
435 	case DFL_FPGA_PORT_RESET:
436 		if (!arg)
437 			ret = port_reset(pdev);
438 		else
439 			ret = -EINVAL;
440 		break;
441 	default:
442 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
443 		ret = -ENODEV;
444 	}
445 
446 	return ret;
447 }
448 
449 static const struct dfl_feature_id port_hdr_id_table[] = {
450 	{.id = PORT_FEATURE_ID_HEADER,},
451 	{0,}
452 };
453 
454 static const struct dfl_feature_ops port_hdr_ops = {
455 	.init = port_hdr_init,
456 	.ioctl = port_hdr_ioctl,
457 };
458 
459 static ssize_t
afu_id_show(struct device * dev,struct device_attribute * attr,char * buf)460 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
461 {
462 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
463 	void __iomem *base;
464 	u64 guidl, guidh;
465 
466 	base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_AFU);
467 
468 	mutex_lock(&fdata->lock);
469 	if (fdata->disable_count) {
470 		mutex_unlock(&fdata->lock);
471 		return -EBUSY;
472 	}
473 
474 	guidl = readq(base + GUID_L);
475 	guidh = readq(base + GUID_H);
476 	mutex_unlock(&fdata->lock);
477 
478 	return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
479 }
480 static DEVICE_ATTR_RO(afu_id);
481 
482 static struct attribute *port_afu_attrs[] = {
483 	&dev_attr_afu_id.attr,
484 	NULL
485 };
486 
port_afu_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)487 static umode_t port_afu_attrs_visible(struct kobject *kobj,
488 				      struct attribute *attr, int n)
489 {
490 	struct device *dev = kobj_to_dev(kobj);
491 	struct dfl_feature_dev_data *fdata;
492 
493 	fdata = to_dfl_feature_dev_data(dev);
494 	/*
495 	 * sysfs entries are visible only if related private feature is
496 	 * enumerated.
497 	 */
498 	if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_AFU))
499 		return 0;
500 
501 	return attr->mode;
502 }
503 
504 static const struct attribute_group port_afu_group = {
505 	.attrs      = port_afu_attrs,
506 	.is_visible = port_afu_attrs_visible,
507 };
508 
port_afu_init(struct platform_device * pdev,struct dfl_feature * feature)509 static int port_afu_init(struct platform_device *pdev,
510 			 struct dfl_feature *feature)
511 {
512 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
513 	struct resource *res = &pdev->resource[feature->resource_index];
514 
515 	return afu_mmio_region_add(fdata,
516 				   DFL_PORT_REGION_INDEX_AFU,
517 				   resource_size(res), res->start,
518 				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
519 				   DFL_PORT_REGION_WRITE);
520 }
521 
522 static const struct dfl_feature_id port_afu_id_table[] = {
523 	{.id = PORT_FEATURE_ID_AFU,},
524 	{0,}
525 };
526 
527 static const struct dfl_feature_ops port_afu_ops = {
528 	.init = port_afu_init,
529 };
530 
port_stp_init(struct platform_device * pdev,struct dfl_feature * feature)531 static int port_stp_init(struct platform_device *pdev,
532 			 struct dfl_feature *feature)
533 {
534 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
535 	struct resource *res = &pdev->resource[feature->resource_index];
536 
537 	return afu_mmio_region_add(fdata,
538 				   DFL_PORT_REGION_INDEX_STP,
539 				   resource_size(res), res->start,
540 				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
541 				   DFL_PORT_REGION_WRITE);
542 }
543 
544 static const struct dfl_feature_id port_stp_id_table[] = {
545 	{.id = PORT_FEATURE_ID_STP,},
546 	{0,}
547 };
548 
549 static const struct dfl_feature_ops port_stp_ops = {
550 	.init = port_stp_init,
551 };
552 
553 static long
port_uint_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)554 port_uint_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
555 		unsigned int cmd, unsigned long arg)
556 {
557 	switch (cmd) {
558 	case DFL_FPGA_PORT_UINT_GET_IRQ_NUM:
559 		return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
560 	case DFL_FPGA_PORT_UINT_SET_IRQ:
561 		return dfl_feature_ioctl_set_irq(pdev, feature, arg);
562 	default:
563 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
564 		return -ENODEV;
565 	}
566 }
567 
568 static const struct dfl_feature_id port_uint_id_table[] = {
569 	{.id = PORT_FEATURE_ID_UINT,},
570 	{0,}
571 };
572 
573 static const struct dfl_feature_ops port_uint_ops = {
574 	.ioctl = port_uint_ioctl,
575 };
576 
577 static struct dfl_feature_driver port_feature_drvs[] = {
578 	{
579 		.id_table = port_hdr_id_table,
580 		.ops = &port_hdr_ops,
581 	},
582 	{
583 		.id_table = port_afu_id_table,
584 		.ops = &port_afu_ops,
585 	},
586 	{
587 		.id_table = port_err_id_table,
588 		.ops = &port_err_ops,
589 	},
590 	{
591 		.id_table = port_stp_id_table,
592 		.ops = &port_stp_ops,
593 	},
594 	{
595 		.id_table = port_uint_id_table,
596 		.ops = &port_uint_ops,
597 	},
598 	{
599 		.ops = NULL,
600 	}
601 };
602 
afu_open(struct inode * inode,struct file * filp)603 static int afu_open(struct inode *inode, struct file *filp)
604 {
605 	struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
606 	struct platform_device *fdev = fdata->dev;
607 	int ret;
608 
609 	mutex_lock(&fdata->lock);
610 	ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
611 	if (!ret) {
612 		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
613 			dfl_feature_dev_use_count(fdata));
614 		filp->private_data = fdev;
615 	}
616 	mutex_unlock(&fdata->lock);
617 
618 	return ret;
619 }
620 
afu_release(struct inode * inode,struct file * filp)621 static int afu_release(struct inode *inode, struct file *filp)
622 {
623 	struct platform_device *pdev = filp->private_data;
624 	struct dfl_feature_dev_data *fdata;
625 	struct dfl_feature *feature;
626 
627 	dev_dbg(&pdev->dev, "Device File Release\n");
628 
629 	fdata = to_dfl_feature_dev_data(&pdev->dev);
630 
631 	mutex_lock(&fdata->lock);
632 	dfl_feature_dev_use_end(fdata);
633 
634 	if (!dfl_feature_dev_use_count(fdata)) {
635 		dfl_fpga_dev_for_each_feature(fdata, feature)
636 			dfl_fpga_set_irq_triggers(feature, 0,
637 						  feature->nr_irqs, NULL);
638 		__port_reset(fdata);
639 		afu_dma_region_destroy(fdata);
640 	}
641 	mutex_unlock(&fdata->lock);
642 
643 	return 0;
644 }
645 
afu_ioctl_check_extension(struct dfl_feature_dev_data * fdata,unsigned long arg)646 static long afu_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
647 				      unsigned long arg)
648 {
649 	/* No extension support for now */
650 	return 0;
651 }
652 
653 static long
afu_ioctl_get_info(struct dfl_feature_dev_data * fdata,void __user * arg)654 afu_ioctl_get_info(struct dfl_feature_dev_data *fdata, void __user *arg)
655 {
656 	struct dfl_fpga_port_info info;
657 	struct dfl_afu *afu;
658 	unsigned long minsz;
659 
660 	minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
661 
662 	if (copy_from_user(&info, arg, minsz))
663 		return -EFAULT;
664 
665 	if (info.argsz < minsz)
666 		return -EINVAL;
667 
668 	mutex_lock(&fdata->lock);
669 	afu = dfl_fpga_fdata_get_private(fdata);
670 	info.flags = 0;
671 	info.num_regions = afu->num_regions;
672 	info.num_umsgs = afu->num_umsgs;
673 	mutex_unlock(&fdata->lock);
674 
675 	if (copy_to_user(arg, &info, sizeof(info)))
676 		return -EFAULT;
677 
678 	return 0;
679 }
680 
afu_ioctl_get_region_info(struct dfl_feature_dev_data * fdata,void __user * arg)681 static long afu_ioctl_get_region_info(struct dfl_feature_dev_data *fdata,
682 				      void __user *arg)
683 {
684 	struct dfl_fpga_port_region_info rinfo;
685 	struct dfl_afu_mmio_region region;
686 	unsigned long minsz;
687 	long ret;
688 
689 	minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
690 
691 	if (copy_from_user(&rinfo, arg, minsz))
692 		return -EFAULT;
693 
694 	if (rinfo.argsz < minsz || rinfo.padding)
695 		return -EINVAL;
696 
697 	ret = afu_mmio_region_get_by_index(fdata, rinfo.index, &region);
698 	if (ret)
699 		return ret;
700 
701 	rinfo.flags = region.flags;
702 	rinfo.size = region.size;
703 	rinfo.offset = region.offset;
704 
705 	if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
706 		return -EFAULT;
707 
708 	return 0;
709 }
710 
711 static long
afu_ioctl_dma_map(struct dfl_feature_dev_data * fdata,void __user * arg)712 afu_ioctl_dma_map(struct dfl_feature_dev_data *fdata, void __user *arg)
713 {
714 	struct dfl_fpga_port_dma_map map;
715 	unsigned long minsz;
716 	long ret;
717 
718 	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
719 
720 	if (copy_from_user(&map, arg, minsz))
721 		return -EFAULT;
722 
723 	if (map.argsz < minsz || map.flags)
724 		return -EINVAL;
725 
726 	ret = afu_dma_map_region(fdata, map.user_addr, map.length, &map.iova);
727 	if (ret)
728 		return ret;
729 
730 	if (copy_to_user(arg, &map, sizeof(map))) {
731 		afu_dma_unmap_region(fdata, map.iova);
732 		return -EFAULT;
733 	}
734 
735 	dev_dbg(&fdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
736 		(unsigned long long)map.user_addr,
737 		(unsigned long long)map.length,
738 		(unsigned long long)map.iova);
739 
740 	return 0;
741 }
742 
743 static long
afu_ioctl_dma_unmap(struct dfl_feature_dev_data * fdata,void __user * arg)744 afu_ioctl_dma_unmap(struct dfl_feature_dev_data *fdata, void __user *arg)
745 {
746 	struct dfl_fpga_port_dma_unmap unmap;
747 	unsigned long minsz;
748 
749 	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
750 
751 	if (copy_from_user(&unmap, arg, minsz))
752 		return -EFAULT;
753 
754 	if (unmap.argsz < minsz || unmap.flags)
755 		return -EINVAL;
756 
757 	return afu_dma_unmap_region(fdata, unmap.iova);
758 }
759 
afu_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)760 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
761 {
762 	struct platform_device *pdev = filp->private_data;
763 	struct dfl_feature_dev_data *fdata;
764 	struct dfl_feature *f;
765 	long ret;
766 
767 	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
768 
769 	fdata = to_dfl_feature_dev_data(&pdev->dev);
770 
771 	switch (cmd) {
772 	case DFL_FPGA_GET_API_VERSION:
773 		return DFL_FPGA_API_VERSION;
774 	case DFL_FPGA_CHECK_EXTENSION:
775 		return afu_ioctl_check_extension(fdata, arg);
776 	case DFL_FPGA_PORT_GET_INFO:
777 		return afu_ioctl_get_info(fdata, (void __user *)arg);
778 	case DFL_FPGA_PORT_GET_REGION_INFO:
779 		return afu_ioctl_get_region_info(fdata, (void __user *)arg);
780 	case DFL_FPGA_PORT_DMA_MAP:
781 		return afu_ioctl_dma_map(fdata, (void __user *)arg);
782 	case DFL_FPGA_PORT_DMA_UNMAP:
783 		return afu_ioctl_dma_unmap(fdata, (void __user *)arg);
784 	default:
785 		/*
786 		 * Let sub-feature's ioctl function to handle the cmd
787 		 * Sub-feature's ioctl returns -ENODEV when cmd is not
788 		 * handled in this sub feature, and returns 0 and other
789 		 * error code if cmd is handled.
790 		 */
791 		dfl_fpga_dev_for_each_feature(fdata, f)
792 			if (f->ops && f->ops->ioctl) {
793 				ret = f->ops->ioctl(pdev, f, cmd, arg);
794 				if (ret != -ENODEV)
795 					return ret;
796 			}
797 	}
798 
799 	return -EINVAL;
800 }
801 
802 static const struct vm_operations_struct afu_vma_ops = {
803 #ifdef CONFIG_HAVE_IOREMAP_PROT
804 	.access = generic_access_phys,
805 #endif
806 };
807 
afu_mmap(struct file * filp,struct vm_area_struct * vma)808 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
809 {
810 	struct platform_device *pdev = filp->private_data;
811 	u64 size = vma->vm_end - vma->vm_start;
812 	struct dfl_feature_dev_data *fdata;
813 	struct dfl_afu_mmio_region region;
814 	u64 offset;
815 	int ret;
816 
817 	if (!(vma->vm_flags & VM_SHARED))
818 		return -EINVAL;
819 
820 	fdata = to_dfl_feature_dev_data(&pdev->dev);
821 
822 	offset = vma->vm_pgoff << PAGE_SHIFT;
823 	ret = afu_mmio_region_get_by_offset(fdata, offset, size, &region);
824 	if (ret)
825 		return ret;
826 
827 	if (!(region.flags & DFL_PORT_REGION_MMAP))
828 		return -EINVAL;
829 
830 	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
831 		return -EPERM;
832 
833 	if ((vma->vm_flags & VM_WRITE) &&
834 	    !(region.flags & DFL_PORT_REGION_WRITE))
835 		return -EPERM;
836 
837 	/* Support debug access to the mapping */
838 	vma->vm_ops = &afu_vma_ops;
839 
840 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
841 
842 	return remap_pfn_range(vma, vma->vm_start,
843 			(region.phys + (offset - region.offset)) >> PAGE_SHIFT,
844 			size, vma->vm_page_prot);
845 }
846 
847 static const struct file_operations afu_fops = {
848 	.owner = THIS_MODULE,
849 	.open = afu_open,
850 	.release = afu_release,
851 	.unlocked_ioctl = afu_ioctl,
852 	.mmap = afu_mmap,
853 };
854 
afu_dev_init(struct platform_device * pdev)855 static int afu_dev_init(struct platform_device *pdev)
856 {
857 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
858 	struct dfl_afu *afu;
859 
860 	afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
861 	if (!afu)
862 		return -ENOMEM;
863 
864 	mutex_lock(&fdata->lock);
865 	dfl_fpga_fdata_set_private(fdata, afu);
866 	afu_mmio_region_init(fdata);
867 	afu_dma_region_init(fdata);
868 	mutex_unlock(&fdata->lock);
869 
870 	return 0;
871 }
872 
afu_dev_destroy(struct platform_device * pdev)873 static int afu_dev_destroy(struct platform_device *pdev)
874 {
875 	struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
876 
877 	mutex_lock(&fdata->lock);
878 	afu_mmio_region_destroy(fdata);
879 	afu_dma_region_destroy(fdata);
880 	dfl_fpga_fdata_set_private(fdata, NULL);
881 	mutex_unlock(&fdata->lock);
882 
883 	return 0;
884 }
885 
port_enable_set(struct dfl_feature_dev_data * fdata,bool enable)886 static int port_enable_set(struct dfl_feature_dev_data *fdata, bool enable)
887 {
888 	int ret;
889 
890 	mutex_lock(&fdata->lock);
891 	if (enable)
892 		ret = __afu_port_enable(fdata);
893 	else
894 		ret = __afu_port_disable(fdata);
895 	mutex_unlock(&fdata->lock);
896 
897 	return ret;
898 }
899 
900 static struct dfl_fpga_port_ops afu_port_ops = {
901 	.name = DFL_FPGA_FEATURE_DEV_PORT,
902 	.owner = THIS_MODULE,
903 	.get_id = port_get_id,
904 	.enable_set = port_enable_set,
905 };
906 
afu_probe(struct platform_device * pdev)907 static int afu_probe(struct platform_device *pdev)
908 {
909 	int ret;
910 
911 	dev_dbg(&pdev->dev, "%s\n", __func__);
912 
913 	ret = afu_dev_init(pdev);
914 	if (ret)
915 		goto exit;
916 
917 	ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
918 	if (ret)
919 		goto dev_destroy;
920 
921 	ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
922 	if (ret) {
923 		dfl_fpga_dev_feature_uinit(pdev);
924 		goto dev_destroy;
925 	}
926 
927 	return 0;
928 
929 dev_destroy:
930 	afu_dev_destroy(pdev);
931 exit:
932 	return ret;
933 }
934 
afu_remove(struct platform_device * pdev)935 static void afu_remove(struct platform_device *pdev)
936 {
937 	dev_dbg(&pdev->dev, "%s\n", __func__);
938 
939 	dfl_fpga_dev_ops_unregister(pdev);
940 	dfl_fpga_dev_feature_uinit(pdev);
941 	afu_dev_destroy(pdev);
942 }
943 
944 static const struct attribute_group *afu_dev_groups[] = {
945 	&port_hdr_group,
946 	&port_afu_group,
947 	&port_err_group,
948 	NULL
949 };
950 
951 static struct platform_driver afu_driver = {
952 	.driver = {
953 		.name = DFL_FPGA_FEATURE_DEV_PORT,
954 		.dev_groups = afu_dev_groups,
955 	},
956 	.probe = afu_probe,
957 	.remove = afu_remove,
958 };
959 
afu_init(void)960 static int __init afu_init(void)
961 {
962 	int ret;
963 
964 	dfl_fpga_port_ops_add(&afu_port_ops);
965 
966 	ret = platform_driver_register(&afu_driver);
967 	if (ret)
968 		dfl_fpga_port_ops_del(&afu_port_ops);
969 
970 	return ret;
971 }
972 
afu_exit(void)973 static void __exit afu_exit(void)
974 {
975 	platform_driver_unregister(&afu_driver);
976 
977 	dfl_fpga_port_ops_del(&afu_port_ops);
978 }
979 
980 module_init(afu_init);
981 module_exit(afu_exit);
982 
983 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
984 MODULE_AUTHOR("Intel Corporation");
985 MODULE_LICENSE("GPL v2");
986 MODULE_ALIAS("platform:dfl-port");
987