1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Driver for FPGA Accelerated Function Unit (AFU) Error Reporting
4 *
5 * Copyright 2019 Intel Corporation, Inc.
6 *
7 * Authors:
8 * Wu Hao <[email protected]>
9 * Xiao Guangrong <[email protected]>
10 * Joseph Grecco <[email protected]>
11 * Enno Luebbers <[email protected]>
12 * Tim Whisonant <[email protected]>
13 * Ananda Ravuri <[email protected]>
14 * Mitchel Henry <[email protected]>
15 */
16
17 #include <linux/fpga-dfl.h>
18 #include <linux/uaccess.h>
19
20 #include "dfl-afu.h"
21
22 #define PORT_ERROR_MASK 0x8
23 #define PORT_ERROR 0x10
24 #define PORT_FIRST_ERROR 0x18
25 #define PORT_MALFORMED_REQ0 0x20
26 #define PORT_MALFORMED_REQ1 0x28
27
28 #define ERROR_MASK GENMASK_ULL(63, 0)
29
30 /* mask or unmask port errors by the error mask register. */
__afu_port_err_mask(struct dfl_feature_dev_data * fdata,bool mask)31 static void __afu_port_err_mask(struct dfl_feature_dev_data *fdata, bool mask)
32 {
33 void __iomem *base;
34
35 base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
36
37 writeq(mask ? ERROR_MASK : 0, base + PORT_ERROR_MASK);
38 }
39
afu_port_err_mask(struct device * dev,bool mask)40 static void afu_port_err_mask(struct device *dev, bool mask)
41 {
42 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
43
44 mutex_lock(&fdata->lock);
45 __afu_port_err_mask(fdata, mask);
46 mutex_unlock(&fdata->lock);
47 }
48
49 /* clear port errors. */
afu_port_err_clear(struct device * dev,u64 err)50 static int afu_port_err_clear(struct device *dev, u64 err)
51 {
52 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
53 void __iomem *base_err, *base_hdr;
54 int enable_ret = 0, ret = -EBUSY;
55 u64 v;
56
57 base_err = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
58 base_hdr = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_HEADER);
59
60 mutex_lock(&fdata->lock);
61
62 /*
63 * clear Port Errors
64 *
65 * - Check for AP6 State
66 * - Halt Port by keeping Port in reset
67 * - Set PORT Error mask to all 1 to mask errors
68 * - Clear all errors
69 * - Set Port mask to all 0 to enable errors
70 * - All errors start capturing new errors
71 * - Enable Port by pulling the port out of reset
72 */
73
74 /* if device is still in AP6 power state, can not clear any error. */
75 v = readq(base_hdr + PORT_HDR_STS);
76 if (FIELD_GET(PORT_STS_PWR_STATE, v) == PORT_STS_PWR_STATE_AP6) {
77 dev_err(dev, "Could not clear errors, device in AP6 state.\n");
78 goto done;
79 }
80
81 /* Halt Port by keeping Port in reset */
82 ret = __afu_port_disable(fdata);
83 if (ret)
84 goto done;
85
86 /* Mask all errors */
87 __afu_port_err_mask(fdata, true);
88
89 /* Clear errors if err input matches with current port errors.*/
90 v = readq(base_err + PORT_ERROR);
91
92 if (v == err) {
93 writeq(v, base_err + PORT_ERROR);
94
95 v = readq(base_err + PORT_FIRST_ERROR);
96 writeq(v, base_err + PORT_FIRST_ERROR);
97 } else {
98 dev_warn(dev, "%s: received 0x%llx, expected 0x%llx\n",
99 __func__, v, err);
100 ret = -EINVAL;
101 }
102
103 /* Clear mask */
104 __afu_port_err_mask(fdata, false);
105
106 /* Enable the Port by clearing the reset */
107 enable_ret = __afu_port_enable(fdata);
108
109 done:
110 mutex_unlock(&fdata->lock);
111 return enable_ret ? enable_ret : ret;
112 }
113
errors_show(struct device * dev,struct device_attribute * attr,char * buf)114 static ssize_t errors_show(struct device *dev, struct device_attribute *attr,
115 char *buf)
116 {
117 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
118 void __iomem *base;
119 u64 error;
120
121 base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
122
123 mutex_lock(&fdata->lock);
124 error = readq(base + PORT_ERROR);
125 mutex_unlock(&fdata->lock);
126
127 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
128 }
129
errors_store(struct device * dev,struct device_attribute * attr,const char * buff,size_t count)130 static ssize_t errors_store(struct device *dev, struct device_attribute *attr,
131 const char *buff, size_t count)
132 {
133 u64 value;
134 int ret;
135
136 if (kstrtou64(buff, 0, &value))
137 return -EINVAL;
138
139 ret = afu_port_err_clear(dev, value);
140
141 return ret ? ret : count;
142 }
143 static DEVICE_ATTR_RW(errors);
144
first_error_show(struct device * dev,struct device_attribute * attr,char * buf)145 static ssize_t first_error_show(struct device *dev,
146 struct device_attribute *attr, char *buf)
147 {
148 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
149 void __iomem *base;
150 u64 error;
151
152 base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
153
154 mutex_lock(&fdata->lock);
155 error = readq(base + PORT_FIRST_ERROR);
156 mutex_unlock(&fdata->lock);
157
158 return sprintf(buf, "0x%llx\n", (unsigned long long)error);
159 }
160 static DEVICE_ATTR_RO(first_error);
161
first_malformed_req_show(struct device * dev,struct device_attribute * attr,char * buf)162 static ssize_t first_malformed_req_show(struct device *dev,
163 struct device_attribute *attr,
164 char *buf)
165 {
166 struct dfl_feature_dev_data *fdata = to_dfl_feature_dev_data(dev);
167 void __iomem *base;
168 u64 req0, req1;
169
170 base = dfl_get_feature_ioaddr_by_id(fdata, PORT_FEATURE_ID_ERROR);
171
172 mutex_lock(&fdata->lock);
173 req0 = readq(base + PORT_MALFORMED_REQ0);
174 req1 = readq(base + PORT_MALFORMED_REQ1);
175 mutex_unlock(&fdata->lock);
176
177 return sprintf(buf, "0x%016llx%016llx\n",
178 (unsigned long long)req1, (unsigned long long)req0);
179 }
180 static DEVICE_ATTR_RO(first_malformed_req);
181
182 static struct attribute *port_err_attrs[] = {
183 &dev_attr_errors.attr,
184 &dev_attr_first_error.attr,
185 &dev_attr_first_malformed_req.attr,
186 NULL,
187 };
188
port_err_attrs_visible(struct kobject * kobj,struct attribute * attr,int n)189 static umode_t port_err_attrs_visible(struct kobject *kobj,
190 struct attribute *attr, int n)
191 {
192 struct device *dev = kobj_to_dev(kobj);
193 struct dfl_feature_dev_data *fdata;
194
195 fdata = to_dfl_feature_dev_data(dev);
196 /*
197 * sysfs entries are visible only if related private feature is
198 * enumerated.
199 */
200 if (!dfl_get_feature_by_id(fdata, PORT_FEATURE_ID_ERROR))
201 return 0;
202
203 return attr->mode;
204 }
205
206 const struct attribute_group port_err_group = {
207 .name = "errors",
208 .attrs = port_err_attrs,
209 .is_visible = port_err_attrs_visible,
210 };
211
port_err_init(struct platform_device * pdev,struct dfl_feature * feature)212 static int port_err_init(struct platform_device *pdev,
213 struct dfl_feature *feature)
214 {
215 afu_port_err_mask(&pdev->dev, false);
216
217 return 0;
218 }
219
port_err_uinit(struct platform_device * pdev,struct dfl_feature * feature)220 static void port_err_uinit(struct platform_device *pdev,
221 struct dfl_feature *feature)
222 {
223 afu_port_err_mask(&pdev->dev, true);
224 }
225
226 static long
port_err_ioctl(struct platform_device * pdev,struct dfl_feature * feature,unsigned int cmd,unsigned long arg)227 port_err_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
228 unsigned int cmd, unsigned long arg)
229 {
230 switch (cmd) {
231 case DFL_FPGA_PORT_ERR_GET_IRQ_NUM:
232 return dfl_feature_ioctl_get_num_irqs(pdev, feature, arg);
233 case DFL_FPGA_PORT_ERR_SET_IRQ:
234 return dfl_feature_ioctl_set_irq(pdev, feature, arg);
235 default:
236 dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
237 return -ENODEV;
238 }
239 }
240
241 const struct dfl_feature_id port_err_id_table[] = {
242 {.id = PORT_FEATURE_ID_ERROR,},
243 {0,}
244 };
245
246 const struct dfl_feature_ops port_err_ops = {
247 .init = port_err_init,
248 .uinit = port_err_uinit,
249 .ioctl = port_err_ioctl,
250 };
251