1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Management Engine (FME)
4 *
5 * Copyright (C) 2017-2018 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Kang Luwei <[email protected]>
9 * Xiao Guangrong <[email protected]>
10 * Joseph Grecco <[email protected]>
11 * Enno Luebbers <[email protected]>
12 * Tim Whisonant <[email protected]>
13 * Ananda Ravuri <[email protected]>
14 * Henry Mitchel <[email protected]>
15 */
16
17 #include <linux/hwmon.h>
18 #include <linux/hwmon-sysfs.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/uaccess.h>
22 #include <linux/units.h>
23 #include <linux/fpga-dfl.h>
24
25 #include "dfl.h"
26 #include "dfl-fme.h"
27
ports_num_show(struct device * dev,struct device_attribute * attr,char * buf)28 static ssize_t ports_num_show(struct device *dev,
29 struct device_attribute *attr, char *buf)
30 {
31 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
32 void __iomem *base;
33 u64 v;
34
35 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
36
37 v = readq(base + FME_HDR_CAP);
38
39 return scnprintf(buf, PAGE_SIZE, "%u\n",
40 (unsigned int)FIELD_GET(FME_CAP_NUM_PORTS, v));
41 }
42 static DEVICE_ATTR_RO(ports_num);
43
44 /*
45 * Bitstream (static FPGA region) identifier number. It contains the
46 * detailed version and other information of this static FPGA region.
47 */
bitstream_id_show(struct device * dev,struct device_attribute * attr,char * buf)48 static ssize_t bitstream_id_show(struct device *dev,
49 struct device_attribute *attr, char *buf)
50 {
51 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
52 void __iomem *base;
53 u64 v;
54
55 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
56
57 v = readq(base + FME_HDR_BITSTREAM_ID);
58
59 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
60 }
61 static DEVICE_ATTR_RO(bitstream_id);
62
63 /*
64 * Bitstream (static FPGA region) meta data. It contains the synthesis
65 * date, seed and other information of this static FPGA region.
66 */
bitstream_metadata_show(struct device * dev,struct device_attribute * attr,char * buf)67 static ssize_t bitstream_metadata_show(struct device *dev,
68 struct device_attribute *attr, char *buf)
69 {
70 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
71 void __iomem *base;
72 u64 v;
73
74 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
75
76 v = readq(base + FME_HDR_BITSTREAM_MD);
77
78 return scnprintf(buf, PAGE_SIZE, "0x%llx\n", (unsigned long long)v);
79 }
80 static DEVICE_ATTR_RO(bitstream_metadata);
81
cache_size_show(struct device * dev,struct device_attribute * attr,char * buf)82 static ssize_t cache_size_show(struct device *dev,
83 struct device_attribute *attr, char *buf)
84 {
85 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
86 void __iomem *base;
87 u64 v;
88
89 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
90
91 v = readq(base + FME_HDR_CAP);
92
93 return sprintf(buf, "%u\n",
94 (unsigned int)FIELD_GET(FME_CAP_CACHE_SIZE, v));
95 }
96 static DEVICE_ATTR_RO(cache_size);
97
fabric_version_show(struct device * dev,struct device_attribute * attr,char * buf)98 static ssize_t fabric_version_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100 {
101 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
102 void __iomem *base;
103 u64 v;
104
105 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
106
107 v = readq(base + FME_HDR_CAP);
108
109 return sprintf(buf, "%u\n",
110 (unsigned int)FIELD_GET(FME_CAP_FABRIC_VERID, v));
111 }
112 static DEVICE_ATTR_RO(fabric_version);
113
socket_id_show(struct device * dev,struct device_attribute * attr,char * buf)114 static ssize_t socket_id_show(struct device *dev,
115 struct device_attribute *attr, char *buf)
116 {
117 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
118 void __iomem *base;
119 u64 v;
120
121 base = dfl_get_feature_ioaddr_by_id(fdata, FME_FEATURE_ID_HEADER);
122
123 v = readq(base + FME_HDR_CAP);
124
125 return sprintf(buf, "%u\n",
126 (unsigned int)FIELD_GET(FME_CAP_SOCKET_ID, v));
127 }
128 static DEVICE_ATTR_RO(socket_id);
129
130 static struct attribute *fme_hdr_attrs[] = {
131 &dev_attr_ports_num.attr,
132 &dev_attr_bitstream_id.attr,
133 &dev_attr_bitstream_metadata.attr,
134 &dev_attr_cache_size.attr,
135 &dev_attr_fabric_version.attr,
136 &dev_attr_socket_id.attr,
137 NULL,
138 };
139
140 static const struct attribute_group fme_hdr_group = {
141 .attrs = fme_hdr_attrs,
142 };
143
fme_hdr_ioctl_release_port(struct dfl_feature_dev_data * fdata,unsigned long arg)144 static long fme_hdr_ioctl_release_port(struct dfl_feature_dev_data *fdata,
145 unsigned long arg)
146 {
147 struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
148 int port_id;
149
150 if (get_user(port_id, (int __user *)arg))
151 return -EFAULT;
152
153 return dfl_fpga_cdev_release_port(cdev, port_id);
154 }
155
fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data * fdata,unsigned long arg)156 static long fme_hdr_ioctl_assign_port(struct dfl_feature_dev_data *fdata,
157 unsigned long arg)
158 {
159 struct dfl_fpga_cdev *cdev = fdata->dfl_cdev;
160 int port_id;
161
162 if (get_user(port_id, (int __user *)arg))
163 return -EFAULT;
164
165 return dfl_fpga_cdev_assign_port(cdev, port_id);
166 }
167
fme_hdr_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)168 static long fme_hdr_ioctl(struct platform_device *pdev,
169 struct dfl_feature *feature,
170 unsigned int cmd, unsigned long arg)
171 {
172 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
173
174 switch (cmd) {
175 case DFL_FPGA_FME_PORT_RELEASE:
176 return fme_hdr_ioctl_release_port(fdata, arg);
177 case DFL_FPGA_FME_PORT_ASSIGN:
178 return fme_hdr_ioctl_assign_port(fdata, arg);
179 }
180
181 return -ENODEV;
182 }
183
184 static const struct dfl_feature_id fme_hdr_id_table[] = {
185 {.id = FME_FEATURE_ID_HEADER,},
186 {0,}
187 };
188
189 static const struct dfl_feature_ops fme_hdr_ops = {
190 .ioctl = fme_hdr_ioctl,
191 };
192
193 #define FME_THERM_THRESHOLD 0x8
194 #define TEMP_THRESHOLD1 GENMASK_ULL(6, 0)
195 #define TEMP_THRESHOLD1_EN BIT_ULL(7)
196 #define TEMP_THRESHOLD2 GENMASK_ULL(14, 8)
197 #define TEMP_THRESHOLD2_EN BIT_ULL(15)
198 #define TRIP_THRESHOLD GENMASK_ULL(30, 24)
199 #define TEMP_THRESHOLD1_STATUS BIT_ULL(32) /* threshold1 reached */
200 #define TEMP_THRESHOLD2_STATUS BIT_ULL(33) /* threshold2 reached */
201 /* threshold1 policy: 0 - AP2 (90% throttle) / 1 - AP1 (50% throttle) */
202 #define TEMP_THRESHOLD1_POLICY BIT_ULL(44)
203
204 #define FME_THERM_RDSENSOR_FMT1 0x10
205 #define FPGA_TEMPERATURE GENMASK_ULL(6, 0)
206
207 #define FME_THERM_CAP 0x20
208 #define THERM_NO_THROTTLE BIT_ULL(0)
209
210 #define MD_PRE_DEG
211
fme_thermal_throttle_support(void __iomem * base)212 static bool fme_thermal_throttle_support(void __iomem *base)
213 {
214 u64 v = readq(base + FME_THERM_CAP);
215
216 return FIELD_GET(THERM_NO_THROTTLE, v) ? false : true;
217 }
218
thermal_hwmon_attrs_visible(const void * drvdata,enum hwmon_sensor_types type,u32 attr,int channel)219 static umode_t thermal_hwmon_attrs_visible(const void *drvdata,
220 enum hwmon_sensor_types type,
221 u32 attr, int channel)
222 {
223 const struct dfl_feature *feature = drvdata;
224
225 /* temperature is always supported, and check hardware cap for others */
226 if (attr == hwmon_temp_input)
227 return 0444;
228
229 return fme_thermal_throttle_support(feature->ioaddr) ? 0444 : 0;
230 }
231
thermal_hwmon_read(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,long * val)232 static int thermal_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
233 u32 attr, int channel, long *val)
234 {
235 struct dfl_feature *feature = dev_get_drvdata(dev);
236 u64 v;
237
238 switch (attr) {
239 case hwmon_temp_input:
240 v = readq(feature->ioaddr + FME_THERM_RDSENSOR_FMT1);
241 *val = (long)(FIELD_GET(FPGA_TEMPERATURE, v) * MILLI);
242 break;
243 case hwmon_temp_max:
244 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
245 *val = (long)(FIELD_GET(TEMP_THRESHOLD1, v) * MILLI);
246 break;
247 case hwmon_temp_crit:
248 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
249 *val = (long)(FIELD_GET(TEMP_THRESHOLD2, v) * MILLI);
250 break;
251 case hwmon_temp_emergency:
252 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
253 *val = (long)(FIELD_GET(TRIP_THRESHOLD, v) * MILLI);
254 break;
255 case hwmon_temp_max_alarm:
256 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
257 *val = (long)FIELD_GET(TEMP_THRESHOLD1_STATUS, v);
258 break;
259 case hwmon_temp_crit_alarm:
260 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
261 *val = (long)FIELD_GET(TEMP_THRESHOLD2_STATUS, v);
262 break;
263 default:
264 return -EOPNOTSUPP;
265 }
266
267 return 0;
268 }
269
270 static const struct hwmon_ops thermal_hwmon_ops = {
271 .is_visible = thermal_hwmon_attrs_visible,
272 .read = thermal_hwmon_read,
273 };
274
275 static const struct hwmon_channel_info * const thermal_hwmon_info[] = {
276 HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_EMERGENCY |
277 HWMON_T_MAX | HWMON_T_MAX_ALARM |
278 HWMON_T_CRIT | HWMON_T_CRIT_ALARM),
279 NULL
280 };
281
282 static const struct hwmon_chip_info thermal_hwmon_chip_info = {
283 .ops = &thermal_hwmon_ops,
284 .info = thermal_hwmon_info,
285 };
286
temp1_max_policy_show(struct device * dev,struct device_attribute * attr,char * buf)287 static ssize_t temp1_max_policy_show(struct device *dev,
288 struct device_attribute *attr, char *buf)
289 {
290 struct dfl_feature *feature = dev_get_drvdata(dev);
291 u64 v;
292
293 v = readq(feature->ioaddr + FME_THERM_THRESHOLD);
294
295 return sprintf(buf, "%u\n",
296 (unsigned int)FIELD_GET(TEMP_THRESHOLD1_POLICY, v));
297 }
298
299 static DEVICE_ATTR_RO(temp1_max_policy);
300
301 static struct attribute *thermal_extra_attrs[] = {
302 &dev_attr_temp1_max_policy.attr,
303 NULL,
304 };
305
thermal_extra_attrs_visible(struct kobject * kobj,struct attribute * attr,int index)306 static umode_t thermal_extra_attrs_visible(struct kobject *kobj,
307 struct attribute *attr, int index)
308 {
309 struct device *dev = kobj_to_dev(kobj);
310 struct dfl_feature *feature = dev_get_drvdata(dev);
311
312 return fme_thermal_throttle_support(feature->ioaddr) ? attr->mode : 0;
313 }
314
315 static const struct attribute_group thermal_extra_group = {
316 .attrs = thermal_extra_attrs,
317 .is_visible = thermal_extra_attrs_visible,
318 };
319 __ATTRIBUTE_GROUPS(thermal_extra);
320
fme_thermal_mgmt_init(struct platform_device * pdev,struct dfl_feature * feature)321 static int fme_thermal_mgmt_init(struct platform_device *pdev,
322 struct dfl_feature *feature)
323 {
324 struct device *hwmon;
325
326 /*
327 * create hwmon to allow userspace monitoring temperature and other
328 * threshold information.
329 *
330 * temp1_input -> FPGA device temperature
331 * temp1_max -> hardware threshold 1 -> 50% or 90% throttling
332 * temp1_crit -> hardware threshold 2 -> 100% throttling
333 * temp1_emergency -> hardware trip_threshold to shutdown FPGA
334 * temp1_max_alarm -> hardware threshold 1 alarm
335 * temp1_crit_alarm -> hardware threshold 2 alarm
336 *
337 * create device specific sysfs interfaces, e.g. read temp1_max_policy
338 * to understand the actual hardware throttling action (50% vs 90%).
339 *
340 * If hardware doesn't support automatic throttling per thresholds,
341 * then all above sysfs interfaces are not visible except temp1_input
342 * for temperature.
343 */
344 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
345 "dfl_fme_thermal", feature,
346 &thermal_hwmon_chip_info,
347 thermal_extra_groups);
348 if (IS_ERR(hwmon)) {
349 dev_err(&pdev->dev, "Fail to register thermal hwmon\n");
350 return PTR_ERR(hwmon);
351 }
352
353 return 0;
354 }
355
356 static const struct dfl_feature_id fme_thermal_mgmt_id_table[] = {
357 {.id = FME_FEATURE_ID_THERMAL_MGMT,},
358 {0,}
359 };
360
361 static const struct dfl_feature_ops fme_thermal_mgmt_ops = {
362 .init = fme_thermal_mgmt_init,
363 };
364
365 #define FME_PWR_STATUS 0x8
366 #define FME_LATENCY_TOLERANCE BIT_ULL(18)
367 #define PWR_CONSUMED GENMASK_ULL(17, 0)
368
369 #define FME_PWR_THRESHOLD 0x10
370 #define PWR_THRESHOLD1 GENMASK_ULL(6, 0) /* in Watts */
371 #define PWR_THRESHOLD2 GENMASK_ULL(14, 8) /* in Watts */
372 #define PWR_THRESHOLD_MAX 0x7f /* in Watts */
373 #define PWR_THRESHOLD1_STATUS BIT_ULL(16)
374 #define PWR_THRESHOLD2_STATUS BIT_ULL(17)
375
376 #define FME_PWR_XEON_LIMIT 0x18
377 #define XEON_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
378 #define XEON_PWR_EN BIT_ULL(15)
379 #define FME_PWR_FPGA_LIMIT 0x20
380 #define FPGA_PWR_LIMIT GENMASK_ULL(14, 0) /* in 0.1 Watts */
381 #define FPGA_PWR_EN BIT_ULL(15)
382
power_hwmon_read(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,long * val)383 static int power_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
384 u32 attr, int channel, long *val)
385 {
386 struct dfl_feature *feature = dev_get_drvdata(dev);
387 u64 v;
388
389 switch (attr) {
390 case hwmon_power_input:
391 v = readq(feature->ioaddr + FME_PWR_STATUS);
392 *val = (long)(FIELD_GET(PWR_CONSUMED, v) * MICRO);
393 break;
394 case hwmon_power_max:
395 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
396 *val = (long)(FIELD_GET(PWR_THRESHOLD1, v) * MICRO);
397 break;
398 case hwmon_power_crit:
399 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
400 *val = (long)(FIELD_GET(PWR_THRESHOLD2, v) * MICRO);
401 break;
402 case hwmon_power_max_alarm:
403 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
404 *val = (long)FIELD_GET(PWR_THRESHOLD1_STATUS, v);
405 break;
406 case hwmon_power_crit_alarm:
407 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
408 *val = (long)FIELD_GET(PWR_THRESHOLD2_STATUS, v);
409 break;
410 default:
411 return -EOPNOTSUPP;
412 }
413
414 return 0;
415 }
416
power_hwmon_write(struct device * dev,enum hwmon_sensor_types type,u32 attr,int channel,long val)417 static int power_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
418 u32 attr, int channel, long val)
419 {
420 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev->parent);
421 struct dfl_feature *feature = dev_get_drvdata(dev);
422 int ret = 0;
423 u64 v;
424
425 val = clamp_val(val / MICRO, 0, PWR_THRESHOLD_MAX);
426
427 mutex_lock(&fdata->lock);
428
429 switch (attr) {
430 case hwmon_power_max:
431 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
432 v &= ~PWR_THRESHOLD1;
433 v |= FIELD_PREP(PWR_THRESHOLD1, val);
434 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
435 break;
436 case hwmon_power_crit:
437 v = readq(feature->ioaddr + FME_PWR_THRESHOLD);
438 v &= ~PWR_THRESHOLD2;
439 v |= FIELD_PREP(PWR_THRESHOLD2, val);
440 writeq(v, feature->ioaddr + FME_PWR_THRESHOLD);
441 break;
442 default:
443 ret = -EOPNOTSUPP;
444 break;
445 }
446
447 mutex_unlock(&fdata->lock);
448
449 return ret;
450 }
451
power_hwmon_attrs_visible(const void * drvdata,enum hwmon_sensor_types type,u32 attr,int channel)452 static umode_t power_hwmon_attrs_visible(const void *drvdata,
453 enum hwmon_sensor_types type,
454 u32 attr, int channel)
455 {
456 switch (attr) {
457 case hwmon_power_input:
458 case hwmon_power_max_alarm:
459 case hwmon_power_crit_alarm:
460 return 0444;
461 case hwmon_power_max:
462 case hwmon_power_crit:
463 return 0644;
464 }
465
466 return 0;
467 }
468
469 static const struct hwmon_ops power_hwmon_ops = {
470 .is_visible = power_hwmon_attrs_visible,
471 .read = power_hwmon_read,
472 .write = power_hwmon_write,
473 };
474
475 static const struct hwmon_channel_info * const power_hwmon_info[] = {
476 HWMON_CHANNEL_INFO(power, HWMON_P_INPUT |
477 HWMON_P_MAX | HWMON_P_MAX_ALARM |
478 HWMON_P_CRIT | HWMON_P_CRIT_ALARM),
479 NULL
480 };
481
482 static const struct hwmon_chip_info power_hwmon_chip_info = {
483 .ops = &power_hwmon_ops,
484 .info = power_hwmon_info,
485 };
486
power1_xeon_limit_show(struct device * dev,struct device_attribute * attr,char * buf)487 static ssize_t power1_xeon_limit_show(struct device *dev,
488 struct device_attribute *attr, char *buf)
489 {
490 struct dfl_feature *feature = dev_get_drvdata(dev);
491 u16 xeon_limit = 0;
492 u64 v;
493
494 v = readq(feature->ioaddr + FME_PWR_XEON_LIMIT);
495
496 if (FIELD_GET(XEON_PWR_EN, v))
497 xeon_limit = FIELD_GET(XEON_PWR_LIMIT, v);
498
499 return sprintf(buf, "%u\n", xeon_limit * 100000);
500 }
501
power1_fpga_limit_show(struct device * dev,struct device_attribute * attr,char * buf)502 static ssize_t power1_fpga_limit_show(struct device *dev,
503 struct device_attribute *attr, char *buf)
504 {
505 struct dfl_feature *feature = dev_get_drvdata(dev);
506 u16 fpga_limit = 0;
507 u64 v;
508
509 v = readq(feature->ioaddr + FME_PWR_FPGA_LIMIT);
510
511 if (FIELD_GET(FPGA_PWR_EN, v))
512 fpga_limit = FIELD_GET(FPGA_PWR_LIMIT, v);
513
514 return sprintf(buf, "%u\n", fpga_limit * 100000);
515 }
516
power1_ltr_show(struct device * dev,struct device_attribute * attr,char * buf)517 static ssize_t power1_ltr_show(struct device *dev,
518 struct device_attribute *attr, char *buf)
519 {
520 struct dfl_feature *feature = dev_get_drvdata(dev);
521 u64 v;
522
523 v = readq(feature->ioaddr + FME_PWR_STATUS);
524
525 return sprintf(buf, "%u\n",
526 (unsigned int)FIELD_GET(FME_LATENCY_TOLERANCE, v));
527 }
528
529 static DEVICE_ATTR_RO(power1_xeon_limit);
530 static DEVICE_ATTR_RO(power1_fpga_limit);
531 static DEVICE_ATTR_RO(power1_ltr);
532
533 static struct attribute *power_extra_attrs[] = {
534 &dev_attr_power1_xeon_limit.attr,
535 &dev_attr_power1_fpga_limit.attr,
536 &dev_attr_power1_ltr.attr,
537 NULL
538 };
539
540 ATTRIBUTE_GROUPS(power_extra);
541
fme_power_mgmt_init(struct platform_device * pdev,struct dfl_feature * feature)542 static int fme_power_mgmt_init(struct platform_device *pdev,
543 struct dfl_feature *feature)
544 {
545 struct device *hwmon;
546
547 hwmon = devm_hwmon_device_register_with_info(&pdev->dev,
548 "dfl_fme_power", feature,
549 &power_hwmon_chip_info,
550 power_extra_groups);
551 if (IS_ERR(hwmon)) {
552 dev_err(&pdev->dev, "Fail to register power hwmon\n");
553 return PTR_ERR(hwmon);
554 }
555
556 return 0;
557 }
558
559 static const struct dfl_feature_id fme_power_mgmt_id_table[] = {
560 {.id = FME_FEATURE_ID_POWER_MGMT,},
561 {0,}
562 };
563
564 static const struct dfl_feature_ops fme_power_mgmt_ops = {
565 .init = fme_power_mgmt_init,
566 };
567
568 static struct dfl_feature_driver fme_feature_drvs[] = {
569 {
570 .id_table = fme_hdr_id_table,
571 .ops = &fme_hdr_ops,
572 },
573 {
574 .id_table = fme_pr_mgmt_id_table,
575 .ops = &fme_pr_mgmt_ops,
576 },
577 {
578 .id_table = fme_global_err_id_table,
579 .ops = &fme_global_err_ops,
580 },
581 {
582 .id_table = fme_thermal_mgmt_id_table,
583 .ops = &fme_thermal_mgmt_ops,
584 },
585 {
586 .id_table = fme_power_mgmt_id_table,
587 .ops = &fme_power_mgmt_ops,
588 },
589 {
590 .id_table = fme_perf_id_table,
591 .ops = &fme_perf_ops,
592 },
593 {
594 .ops = NULL,
595 },
596 };
597
fme_ioctl_check_extension(struct dfl_feature_dev_data * fdata,unsigned long arg)598 static long fme_ioctl_check_extension(struct dfl_feature_dev_data *fdata,
599 unsigned long arg)
600 {
601 /* No extension support for now */
602 return 0;
603 }
604
fme_open(struct inode * inode,struct file * filp)605 static int fme_open(struct inode *inode, struct file *filp)
606 {
607 struct dfl_feature_dev_data *fdata = dfl_fpga_inode_to_feature_dev_data(inode);
608 struct platform_device *fdev = fdata->dev;
609 int ret;
610
611 mutex_lock(&fdata->lock);
612 ret = dfl_feature_dev_use_begin(fdata, filp->f_flags & O_EXCL);
613 if (!ret) {
614 dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
615 dfl_feature_dev_use_count(fdata));
616 filp->private_data = fdata;
617 }
618 mutex_unlock(&fdata->lock);
619
620 return ret;
621 }
622
fme_release(struct inode * inode,struct file * filp)623 static int fme_release(struct inode *inode, struct file *filp)
624 {
625 struct dfl_feature_dev_data *fdata = filp->private_data;
626 struct platform_device *pdev = fdata->dev;
627 struct dfl_feature *feature;
628
629 dev_dbg(&pdev->dev, "Device File Release\n");
630
631 mutex_lock(&fdata->lock);
632 dfl_feature_dev_use_end(fdata);
633
634 if (!dfl_feature_dev_use_count(fdata))
635 dfl_fpga_dev_for_each_feature(fdata, feature)
636 dfl_fpga_set_irq_triggers(feature, 0,
637 feature->nr_irqs, NULL);
638 mutex_unlock(&fdata->lock);
639
640 return 0;
641 }
642
fme_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)643 static long fme_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
644 {
645 struct dfl_feature_dev_data *fdata = filp->private_data;
646 struct platform_device *pdev = fdata->dev;
647 struct dfl_feature *f;
648 long ret;
649
650 dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
651
652 switch (cmd) {
653 case DFL_FPGA_GET_API_VERSION:
654 return DFL_FPGA_API_VERSION;
655 case DFL_FPGA_CHECK_EXTENSION:
656 return fme_ioctl_check_extension(fdata, arg);
657 default:
658 /*
659 * Let sub-feature's ioctl function to handle the cmd.
660 * Sub-feature's ioctl returns -ENODEV when cmd is not
661 * handled in this sub feature, and returns 0 or other
662 * error code if cmd is handled.
663 */
664 dfl_fpga_dev_for_each_feature(fdata, f) {
665 if (f->ops && f->ops->ioctl) {
666 ret = f->ops->ioctl(pdev, f, cmd, arg);
667 if (ret != -ENODEV)
668 return ret;
669 }
670 }
671 }
672
673 return -EINVAL;
674 }
675
fme_dev_init(struct platform_device * pdev)676 static int fme_dev_init(struct platform_device *pdev)
677 {
678 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
679 struct dfl_fme *fme;
680
681 fme = devm_kzalloc(&pdev->dev, sizeof(*fme), GFP_KERNEL);
682 if (!fme)
683 return -ENOMEM;
684
685 mutex_lock(&fdata->lock);
686 dfl_fpga_fdata_set_private(fdata, fme);
687 mutex_unlock(&fdata->lock);
688
689 return 0;
690 }
691
fme_dev_destroy(struct platform_device * pdev)692 static void fme_dev_destroy(struct platform_device *pdev)
693 {
694 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(&pdev->dev);
695
696 mutex_lock(&fdata->lock);
697 dfl_fpga_fdata_set_private(fdata, NULL);
698 mutex_unlock(&fdata->lock);
699 }
700
701 static const struct file_operations fme_fops = {
702 .owner = THIS_MODULE,
703 .open = fme_open,
704 .release = fme_release,
705 .unlocked_ioctl = fme_ioctl,
706 };
707
fme_probe(struct platform_device * pdev)708 static int fme_probe(struct platform_device *pdev)
709 {
710 int ret;
711
712 ret = fme_dev_init(pdev);
713 if (ret)
714 goto exit;
715
716 ret = dfl_fpga_dev_feature_init(pdev, fme_feature_drvs);
717 if (ret)
718 goto dev_destroy;
719
720 ret = dfl_fpga_dev_ops_register(pdev, &fme_fops, THIS_MODULE);
721 if (ret)
722 goto feature_uinit;
723
724 return 0;
725
726 feature_uinit:
727 dfl_fpga_dev_feature_uinit(pdev);
728 dev_destroy:
729 fme_dev_destroy(pdev);
730 exit:
731 return ret;
732 }
733
fme_remove(struct platform_device * pdev)734 static void fme_remove(struct platform_device *pdev)
735 {
736 dfl_fpga_dev_ops_unregister(pdev);
737 dfl_fpga_dev_feature_uinit(pdev);
738 fme_dev_destroy(pdev);
739 }
740
741 static const struct attribute_group *fme_dev_groups[] = {
742 &fme_hdr_group,
743 &fme_global_err_group,
744 NULL
745 };
746
747 static struct platform_driver fme_driver = {
748 .driver = {
749 .name = DFL_FPGA_FEATURE_DEV_FME,
750 .dev_groups = fme_dev_groups,
751 },
752 .probe = fme_probe,
753 .remove = fme_remove,
754 };
755
756 module_platform_driver(fme_driver);
757
758 MODULE_DESCRIPTION("FPGA Management Engine driver");
759 MODULE_AUTHOR("Intel Corporation");
760 MODULE_LICENSE("GPL v2");
761 MODULE_ALIAS("platform:dfl-fme");
762