1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function Devlink
3 *
4 * Copyright (C) 2020 Marvell.
5 *
6 */
7
8 #include <linux/bitfield.h>
9
10 #include "rvu.h"
11 #include "rvu_reg.h"
12 #include "rvu_struct.h"
13 #include "rvu_npc_hash.h"
14
15 #define DRV_NAME "octeontx2-af"
16
rvu_report_pair_start(struct devlink_fmsg * fmsg,const char * name)17 static void rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
18 {
19 devlink_fmsg_pair_nest_start(fmsg, name);
20 devlink_fmsg_obj_nest_start(fmsg);
21 }
22
rvu_report_pair_end(struct devlink_fmsg * fmsg)23 static void rvu_report_pair_end(struct devlink_fmsg *fmsg)
24 {
25 devlink_fmsg_obj_nest_end(fmsg);
26 devlink_fmsg_pair_nest_end(fmsg);
27 }
28
rvu_common_request_irq(struct rvu * rvu,int offset,const char * name,irq_handler_t fn)29 static bool rvu_common_request_irq(struct rvu *rvu, int offset,
30 const char *name, irq_handler_t fn)
31 {
32 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
33 int rc;
34
35 sprintf(&rvu->irq_name[offset * NAME_SIZE], "%s", name);
36 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
37 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
38 if (rc)
39 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
40 else
41 rvu->irq_allocated[offset] = true;
42
43 return rvu->irq_allocated[offset];
44 }
45
rvu_nix_intr_work(struct work_struct * work)46 static void rvu_nix_intr_work(struct work_struct *work)
47 {
48 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
49
50 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, intr_work);
51 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_intr_reporter,
52 "NIX_AF_RVU Error",
53 rvu_nix_health_reporter->nix_event_ctx);
54 }
55
rvu_nix_af_rvu_intr_handler(int irq,void * rvu_irq)56 static irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
57 {
58 struct rvu_nix_event_ctx *nix_event_context;
59 struct rvu_devlink *rvu_dl = rvu_irq;
60 struct rvu *rvu;
61 int blkaddr;
62 u64 intr;
63
64 rvu = rvu_dl->rvu;
65 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
66 if (blkaddr < 0)
67 return IRQ_NONE;
68
69 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
70 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
71 nix_event_context->nix_af_rvu_int = intr;
72
73 /* Clear interrupts */
74 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
75 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
76 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->intr_work);
77
78 return IRQ_HANDLED;
79 }
80
rvu_nix_gen_work(struct work_struct * work)81 static void rvu_nix_gen_work(struct work_struct *work)
82 {
83 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
84
85 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, gen_work);
86 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_gen_reporter,
87 "NIX_AF_GEN Error",
88 rvu_nix_health_reporter->nix_event_ctx);
89 }
90
rvu_nix_af_rvu_gen_handler(int irq,void * rvu_irq)91 static irqreturn_t rvu_nix_af_rvu_gen_handler(int irq, void *rvu_irq)
92 {
93 struct rvu_nix_event_ctx *nix_event_context;
94 struct rvu_devlink *rvu_dl = rvu_irq;
95 struct rvu *rvu;
96 int blkaddr;
97 u64 intr;
98
99 rvu = rvu_dl->rvu;
100 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
101 if (blkaddr < 0)
102 return IRQ_NONE;
103
104 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
105 intr = rvu_read64(rvu, blkaddr, NIX_AF_GEN_INT);
106 nix_event_context->nix_af_rvu_gen = intr;
107
108 /* Clear interrupts */
109 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT, intr);
110 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
111 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->gen_work);
112
113 return IRQ_HANDLED;
114 }
115
rvu_nix_err_work(struct work_struct * work)116 static void rvu_nix_err_work(struct work_struct *work)
117 {
118 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
119
120 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, err_work);
121 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_err_reporter,
122 "NIX_AF_ERR Error",
123 rvu_nix_health_reporter->nix_event_ctx);
124 }
125
rvu_nix_af_rvu_err_handler(int irq,void * rvu_irq)126 static irqreturn_t rvu_nix_af_rvu_err_handler(int irq, void *rvu_irq)
127 {
128 struct rvu_nix_event_ctx *nix_event_context;
129 struct rvu_devlink *rvu_dl = rvu_irq;
130 struct rvu *rvu;
131 int blkaddr;
132 u64 intr;
133
134 rvu = rvu_dl->rvu;
135 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
136 if (blkaddr < 0)
137 return IRQ_NONE;
138
139 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
140 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
141 nix_event_context->nix_af_rvu_err = intr;
142
143 /* Clear interrupts */
144 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
145 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
146 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->err_work);
147
148 return IRQ_HANDLED;
149 }
150
rvu_nix_ras_work(struct work_struct * work)151 static void rvu_nix_ras_work(struct work_struct *work)
152 {
153 struct rvu_nix_health_reporters *rvu_nix_health_reporter;
154
155 rvu_nix_health_reporter = container_of(work, struct rvu_nix_health_reporters, ras_work);
156 devlink_health_report(rvu_nix_health_reporter->rvu_hw_nix_ras_reporter,
157 "NIX_AF_RAS Error",
158 rvu_nix_health_reporter->nix_event_ctx);
159 }
160
rvu_nix_af_rvu_ras_handler(int irq,void * rvu_irq)161 static irqreturn_t rvu_nix_af_rvu_ras_handler(int irq, void *rvu_irq)
162 {
163 struct rvu_nix_event_ctx *nix_event_context;
164 struct rvu_devlink *rvu_dl = rvu_irq;
165 struct rvu *rvu;
166 int blkaddr;
167 u64 intr;
168
169 rvu = rvu_dl->rvu;
170 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
171 if (blkaddr < 0)
172 return IRQ_NONE;
173
174 nix_event_context = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
175 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
176 nix_event_context->nix_af_rvu_ras = intr;
177
178 /* Clear interrupts */
179 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
180 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
181 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_nix_health_reporter->ras_work);
182
183 return IRQ_HANDLED;
184 }
185
rvu_nix_unregister_interrupts(struct rvu * rvu)186 static void rvu_nix_unregister_interrupts(struct rvu *rvu)
187 {
188 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
189 int offs, i, blkaddr;
190
191 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
192 if (blkaddr < 0)
193 return;
194
195 offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
196 if (!offs)
197 return;
198
199 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
200 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1C, ~0ULL);
201 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
202 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
203
204 if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
205 free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
206 rvu_dl);
207 rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
208 }
209
210 for (i = NIX_AF_INT_VEC_GEN; i < NIX_AF_INT_VEC_CNT; i++)
211 if (rvu->irq_allocated[offs + i]) {
212 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
213 rvu->irq_allocated[offs + i] = false;
214 }
215 }
216
rvu_nix_register_interrupts(struct rvu * rvu)217 static int rvu_nix_register_interrupts(struct rvu *rvu)
218 {
219 int blkaddr, base;
220 bool rc;
221
222 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
223 if (blkaddr < 0)
224 return blkaddr;
225
226 /* Get NIX AF MSIX vectors offset. */
227 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
228 if (!base) {
229 dev_warn(rvu->dev,
230 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
231 blkaddr - BLKADDR_NIX0);
232 return 0;
233 }
234 /* Register and enable NIX_AF_RVU_INT interrupt */
235 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
236 "NIX_AF_RVU_INT",
237 rvu_nix_af_rvu_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
241
242 /* Register and enable NIX_AF_GEN_INT interrupt */
243 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_GEN,
244 "NIX_AF_GEN_INT",
245 rvu_nix_af_rvu_gen_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
249
250 /* Register and enable NIX_AF_ERR_INT interrupt */
251 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
252 "NIX_AF_ERR_INT",
253 rvu_nix_af_rvu_err_handler);
254 if (!rc)
255 goto err;
256 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
257
258 /* Register and enable NIX_AF_RAS interrupt */
259 rc = rvu_common_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
260 "NIX_AF_RAS",
261 rvu_nix_af_rvu_ras_handler);
262 if (!rc)
263 goto err;
264 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
265
266 return 0;
267 err:
268 rvu_nix_unregister_interrupts(rvu);
269 return rc;
270 }
271
rvu_nix_report_show(struct devlink_fmsg * fmsg,void * ctx,enum nix_af_rvu_health health_reporter)272 static int rvu_nix_report_show(struct devlink_fmsg *fmsg, void *ctx,
273 enum nix_af_rvu_health health_reporter)
274 {
275 struct rvu_nix_event_ctx *nix_event_context;
276 u64 intr_val;
277
278 nix_event_context = ctx;
279 switch (health_reporter) {
280 case NIX_AF_RVU_INTR:
281 intr_val = nix_event_context->nix_af_rvu_int;
282 rvu_report_pair_start(fmsg, "NIX_AF_RVU");
283 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RVU Interrupt Reg ",
284 nix_event_context->nix_af_rvu_int);
285 if (intr_val & BIT_ULL(0))
286 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
287 rvu_report_pair_end(fmsg);
288 break;
289 case NIX_AF_RVU_GEN:
290 intr_val = nix_event_context->nix_af_rvu_gen;
291 rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
292 devlink_fmsg_u64_pair_put(fmsg, "\tNIX General Interrupt Reg ",
293 nix_event_context->nix_af_rvu_gen);
294 if (intr_val & BIT_ULL(0))
295 devlink_fmsg_string_put(fmsg, "\n\tRx multicast pkt drop");
296 if (intr_val & BIT_ULL(1))
297 devlink_fmsg_string_put(fmsg, "\n\tRx mirror pkt drop");
298 if (intr_val & BIT_ULL(4))
299 devlink_fmsg_string_put(fmsg, "\n\tSMQ flush done");
300 rvu_report_pair_end(fmsg);
301 break;
302 case NIX_AF_RVU_ERR:
303 intr_val = nix_event_context->nix_af_rvu_err;
304 rvu_report_pair_start(fmsg, "NIX_AF_ERR");
305 devlink_fmsg_u64_pair_put(fmsg, "\tNIX Error Interrupt Reg ",
306 nix_event_context->nix_af_rvu_err);
307 if (intr_val & BIT_ULL(14))
308 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_INST_S read");
309 if (intr_val & BIT_ULL(13))
310 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_AQ_RES_S write");
311 if (intr_val & BIT_ULL(12))
312 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
313 if (intr_val & BIT_ULL(6))
314 devlink_fmsg_string_put(fmsg, "\n\tRx on unmapped PF_FUNC");
315 if (intr_val & BIT_ULL(5))
316 devlink_fmsg_string_put(fmsg, "\n\tRx multicast replication error");
317 if (intr_val & BIT_ULL(4))
318 devlink_fmsg_string_put(fmsg, "\n\tFault on NIX_RX_MCE_S read");
319 if (intr_val & BIT_ULL(3))
320 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast WQE read");
321 if (intr_val & BIT_ULL(2))
322 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror WQE read");
323 if (intr_val & BIT_ULL(1))
324 devlink_fmsg_string_put(fmsg, "\n\tFault on mirror pkt write");
325 if (intr_val & BIT_ULL(0))
326 devlink_fmsg_string_put(fmsg, "\n\tFault on multicast pkt write");
327 rvu_report_pair_end(fmsg);
328 break;
329 case NIX_AF_RVU_RAS:
330 intr_val = nix_event_context->nix_af_rvu_err;
331 rvu_report_pair_start(fmsg, "NIX_AF_RAS");
332 devlink_fmsg_u64_pair_put(fmsg, "\tNIX RAS Interrupt Reg ",
333 nix_event_context->nix_af_rvu_err);
334 devlink_fmsg_string_put(fmsg, "\n\tPoison Data on:");
335 if (intr_val & BIT_ULL(34))
336 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_INST_S");
337 if (intr_val & BIT_ULL(33))
338 devlink_fmsg_string_put(fmsg, "\n\tNIX_AQ_RES_S");
339 if (intr_val & BIT_ULL(32))
340 devlink_fmsg_string_put(fmsg, "\n\tHW ctx");
341 if (intr_val & BIT_ULL(4))
342 devlink_fmsg_string_put(fmsg, "\n\tPacket from mirror buffer");
343 if (intr_val & BIT_ULL(3))
344 devlink_fmsg_string_put(fmsg, "\n\tPacket from multicast buffer");
345 if (intr_val & BIT_ULL(2))
346 devlink_fmsg_string_put(fmsg, "\n\tWQE read from mirror buffer");
347 if (intr_val & BIT_ULL(1))
348 devlink_fmsg_string_put(fmsg, "\n\tWQE read from multicast buffer");
349 if (intr_val & BIT_ULL(0))
350 devlink_fmsg_string_put(fmsg, "\n\tNIX_RX_MCE_S read");
351 rvu_report_pair_end(fmsg);
352 break;
353 default:
354 return -EINVAL;
355 }
356
357 return 0;
358 }
359
rvu_hw_nix_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)360 static int rvu_hw_nix_intr_dump(struct devlink_health_reporter *reporter,
361 struct devlink_fmsg *fmsg, void *ctx,
362 struct netlink_ext_ack *netlink_extack)
363 {
364 struct rvu *rvu = devlink_health_reporter_priv(reporter);
365 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
366 struct rvu_nix_event_ctx *nix_ctx;
367
368 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
369
370 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_INTR) :
371 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_INTR);
372 }
373
rvu_hw_nix_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)374 static int rvu_hw_nix_intr_recover(struct devlink_health_reporter *reporter,
375 void *ctx, struct netlink_ext_ack *netlink_extack)
376 {
377 struct rvu *rvu = devlink_health_reporter_priv(reporter);
378 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
379 int blkaddr;
380
381 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
382 if (blkaddr < 0)
383 return blkaddr;
384
385 if (nix_event_ctx->nix_af_rvu_int)
386 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
387
388 return 0;
389 }
390
rvu_hw_nix_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)391 static int rvu_hw_nix_gen_dump(struct devlink_health_reporter *reporter,
392 struct devlink_fmsg *fmsg, void *ctx,
393 struct netlink_ext_ack *netlink_extack)
394 {
395 struct rvu *rvu = devlink_health_reporter_priv(reporter);
396 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
397 struct rvu_nix_event_ctx *nix_ctx;
398
399 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
400
401 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_GEN) :
402 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_GEN);
403 }
404
rvu_hw_nix_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)405 static int rvu_hw_nix_gen_recover(struct devlink_health_reporter *reporter,
406 void *ctx, struct netlink_ext_ack *netlink_extack)
407 {
408 struct rvu *rvu = devlink_health_reporter_priv(reporter);
409 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
410 int blkaddr;
411
412 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
413 if (blkaddr < 0)
414 return blkaddr;
415
416 if (nix_event_ctx->nix_af_rvu_gen)
417 rvu_write64(rvu, blkaddr, NIX_AF_GEN_INT_ENA_W1S, ~0ULL);
418
419 return 0;
420 }
421
rvu_hw_nix_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)422 static int rvu_hw_nix_err_dump(struct devlink_health_reporter *reporter,
423 struct devlink_fmsg *fmsg, void *ctx,
424 struct netlink_ext_ack *netlink_extack)
425 {
426 struct rvu *rvu = devlink_health_reporter_priv(reporter);
427 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
428 struct rvu_nix_event_ctx *nix_ctx;
429
430 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
431
432 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_ERR) :
433 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_ERR);
434 }
435
rvu_hw_nix_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)436 static int rvu_hw_nix_err_recover(struct devlink_health_reporter *reporter,
437 void *ctx, struct netlink_ext_ack *netlink_extack)
438 {
439 struct rvu *rvu = devlink_health_reporter_priv(reporter);
440 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
441 int blkaddr;
442
443 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
444 if (blkaddr < 0)
445 return blkaddr;
446
447 if (nix_event_ctx->nix_af_rvu_err)
448 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
449
450 return 0;
451 }
452
rvu_hw_nix_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)453 static int rvu_hw_nix_ras_dump(struct devlink_health_reporter *reporter,
454 struct devlink_fmsg *fmsg, void *ctx,
455 struct netlink_ext_ack *netlink_extack)
456 {
457 struct rvu *rvu = devlink_health_reporter_priv(reporter);
458 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
459 struct rvu_nix_event_ctx *nix_ctx;
460
461 nix_ctx = rvu_dl->rvu_nix_health_reporter->nix_event_ctx;
462
463 return ctx ? rvu_nix_report_show(fmsg, ctx, NIX_AF_RVU_RAS) :
464 rvu_nix_report_show(fmsg, nix_ctx, NIX_AF_RVU_RAS);
465 }
466
rvu_hw_nix_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)467 static int rvu_hw_nix_ras_recover(struct devlink_health_reporter *reporter,
468 void *ctx, struct netlink_ext_ack *netlink_extack)
469 {
470 struct rvu *rvu = devlink_health_reporter_priv(reporter);
471 struct rvu_nix_event_ctx *nix_event_ctx = ctx;
472 int blkaddr;
473
474 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
475 if (blkaddr < 0)
476 return blkaddr;
477
478 if (nix_event_ctx->nix_af_rvu_int)
479 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
480
481 return 0;
482 }
483
484 RVU_REPORTERS(hw_nix_intr);
485 RVU_REPORTERS(hw_nix_gen);
486 RVU_REPORTERS(hw_nix_err);
487 RVU_REPORTERS(hw_nix_ras);
488
489 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl);
490
rvu_nix_register_reporters(struct rvu_devlink * rvu_dl)491 static int rvu_nix_register_reporters(struct rvu_devlink *rvu_dl)
492 {
493 struct rvu_nix_health_reporters *rvu_reporters;
494 struct rvu_nix_event_ctx *nix_event_context;
495 struct rvu *rvu = rvu_dl->rvu;
496
497 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
498 if (!rvu_reporters)
499 return -ENOMEM;
500
501 rvu_dl->rvu_nix_health_reporter = rvu_reporters;
502 nix_event_context = kzalloc(sizeof(*nix_event_context), GFP_KERNEL);
503 if (!nix_event_context)
504 return -ENOMEM;
505
506 rvu_reporters->nix_event_ctx = nix_event_context;
507 rvu_reporters->rvu_hw_nix_intr_reporter =
508 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_intr_reporter_ops, 0, rvu);
509 if (IS_ERR(rvu_reporters->rvu_hw_nix_intr_reporter)) {
510 dev_warn(rvu->dev, "Failed to create hw_nix_intr reporter, err=%ld\n",
511 PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter));
512 return PTR_ERR(rvu_reporters->rvu_hw_nix_intr_reporter);
513 }
514
515 rvu_reporters->rvu_hw_nix_gen_reporter =
516 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_gen_reporter_ops, 0, rvu);
517 if (IS_ERR(rvu_reporters->rvu_hw_nix_gen_reporter)) {
518 dev_warn(rvu->dev, "Failed to create hw_nix_gen reporter, err=%ld\n",
519 PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter));
520 return PTR_ERR(rvu_reporters->rvu_hw_nix_gen_reporter);
521 }
522
523 rvu_reporters->rvu_hw_nix_err_reporter =
524 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_err_reporter_ops, 0, rvu);
525 if (IS_ERR(rvu_reporters->rvu_hw_nix_err_reporter)) {
526 dev_warn(rvu->dev, "Failed to create hw_nix_err reporter, err=%ld\n",
527 PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter));
528 return PTR_ERR(rvu_reporters->rvu_hw_nix_err_reporter);
529 }
530
531 rvu_reporters->rvu_hw_nix_ras_reporter =
532 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_nix_ras_reporter_ops, 0, rvu);
533 if (IS_ERR(rvu_reporters->rvu_hw_nix_ras_reporter)) {
534 dev_warn(rvu->dev, "Failed to create hw_nix_ras reporter, err=%ld\n",
535 PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter));
536 return PTR_ERR(rvu_reporters->rvu_hw_nix_ras_reporter);
537 }
538
539 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
540 if (!rvu_dl->devlink_wq)
541 return -ENOMEM;
542
543 INIT_WORK(&rvu_reporters->intr_work, rvu_nix_intr_work);
544 INIT_WORK(&rvu_reporters->gen_work, rvu_nix_gen_work);
545 INIT_WORK(&rvu_reporters->err_work, rvu_nix_err_work);
546 INIT_WORK(&rvu_reporters->ras_work, rvu_nix_ras_work);
547
548 return 0;
549 }
550
rvu_nix_health_reporters_create(struct rvu_devlink * rvu_dl)551 static int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
552 {
553 struct rvu *rvu = rvu_dl->rvu;
554 int err;
555
556 err = rvu_nix_register_reporters(rvu_dl);
557 if (err) {
558 dev_warn(rvu->dev, "Failed to create nix reporter, err =%d\n",
559 err);
560 return err;
561 }
562 rvu_nix_register_interrupts(rvu);
563
564 return 0;
565 }
566
rvu_nix_health_reporters_destroy(struct rvu_devlink * rvu_dl)567 static void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
568 {
569 struct rvu_nix_health_reporters *nix_reporters;
570 struct rvu *rvu = rvu_dl->rvu;
571
572 nix_reporters = rvu_dl->rvu_nix_health_reporter;
573
574 if (!nix_reporters->rvu_hw_nix_ras_reporter)
575 return;
576 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_intr_reporter))
577 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_intr_reporter);
578
579 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_gen_reporter))
580 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_gen_reporter);
581
582 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_err_reporter))
583 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_err_reporter);
584
585 if (!IS_ERR_OR_NULL(nix_reporters->rvu_hw_nix_ras_reporter))
586 devlink_health_reporter_destroy(nix_reporters->rvu_hw_nix_ras_reporter);
587
588 rvu_nix_unregister_interrupts(rvu);
589 kfree(rvu_dl->rvu_nix_health_reporter->nix_event_ctx);
590 kfree(rvu_dl->rvu_nix_health_reporter);
591 }
592
rvu_npa_intr_work(struct work_struct * work)593 static void rvu_npa_intr_work(struct work_struct *work)
594 {
595 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
596
597 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, intr_work);
598 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_intr_reporter,
599 "NPA_AF_RVU Error",
600 rvu_npa_health_reporter->npa_event_ctx);
601 }
602
rvu_npa_af_rvu_intr_handler(int irq,void * rvu_irq)603 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
604 {
605 struct rvu_npa_event_ctx *npa_event_context;
606 struct rvu_devlink *rvu_dl = rvu_irq;
607 struct rvu *rvu;
608 int blkaddr;
609 u64 intr;
610
611 rvu = rvu_dl->rvu;
612 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
613 if (blkaddr < 0)
614 return IRQ_NONE;
615
616 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
617 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
618 npa_event_context->npa_af_rvu_int = intr;
619
620 /* Clear interrupts */
621 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
622 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
623 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->intr_work);
624
625 return IRQ_HANDLED;
626 }
627
rvu_npa_gen_work(struct work_struct * work)628 static void rvu_npa_gen_work(struct work_struct *work)
629 {
630 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
631
632 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, gen_work);
633 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_gen_reporter,
634 "NPA_AF_GEN Error",
635 rvu_npa_health_reporter->npa_event_ctx);
636 }
637
rvu_npa_af_gen_intr_handler(int irq,void * rvu_irq)638 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
639 {
640 struct rvu_npa_event_ctx *npa_event_context;
641 struct rvu_devlink *rvu_dl = rvu_irq;
642 struct rvu *rvu;
643 int blkaddr;
644 u64 intr;
645
646 rvu = rvu_dl->rvu;
647 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
648 if (blkaddr < 0)
649 return IRQ_NONE;
650
651 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
652 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
653 npa_event_context->npa_af_rvu_gen = intr;
654
655 /* Clear interrupts */
656 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
657 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
658 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->gen_work);
659
660 return IRQ_HANDLED;
661 }
662
rvu_npa_err_work(struct work_struct * work)663 static void rvu_npa_err_work(struct work_struct *work)
664 {
665 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
666
667 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, err_work);
668 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_err_reporter,
669 "NPA_AF_ERR Error",
670 rvu_npa_health_reporter->npa_event_ctx);
671 }
672
rvu_npa_af_err_intr_handler(int irq,void * rvu_irq)673 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
674 {
675 struct rvu_npa_event_ctx *npa_event_context;
676 struct rvu_devlink *rvu_dl = rvu_irq;
677 struct rvu *rvu;
678 int blkaddr;
679 u64 intr;
680
681 rvu = rvu_dl->rvu;
682 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
683 if (blkaddr < 0)
684 return IRQ_NONE;
685 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
686 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
687 npa_event_context->npa_af_rvu_err = intr;
688
689 /* Clear interrupts */
690 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
691 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
692 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->err_work);
693
694 return IRQ_HANDLED;
695 }
696
rvu_npa_ras_work(struct work_struct * work)697 static void rvu_npa_ras_work(struct work_struct *work)
698 {
699 struct rvu_npa_health_reporters *rvu_npa_health_reporter;
700
701 rvu_npa_health_reporter = container_of(work, struct rvu_npa_health_reporters, ras_work);
702 devlink_health_report(rvu_npa_health_reporter->rvu_hw_npa_ras_reporter,
703 "HW NPA_AF_RAS Error reported",
704 rvu_npa_health_reporter->npa_event_ctx);
705 }
706
rvu_npa_af_ras_intr_handler(int irq,void * rvu_irq)707 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
708 {
709 struct rvu_npa_event_ctx *npa_event_context;
710 struct rvu_devlink *rvu_dl = rvu_irq;
711 struct rvu *rvu;
712 int blkaddr;
713 u64 intr;
714
715 rvu = rvu_dl->rvu;
716 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
717 if (blkaddr < 0)
718 return IRQ_NONE;
719
720 npa_event_context = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
721 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
722 npa_event_context->npa_af_rvu_ras = intr;
723
724 /* Clear interrupts */
725 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
726 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
727 queue_work(rvu_dl->devlink_wq, &rvu_dl->rvu_npa_health_reporter->ras_work);
728
729 return IRQ_HANDLED;
730 }
731
rvu_npa_unregister_interrupts(struct rvu * rvu)732 static void rvu_npa_unregister_interrupts(struct rvu *rvu)
733 {
734 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
735 int i, offs, blkaddr;
736 u64 reg;
737
738 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
739 if (blkaddr < 0)
740 return;
741
742 reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
743 offs = reg & 0x3FF;
744
745 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
746 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
747 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
748 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
749
750 for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
751 if (rvu->irq_allocated[offs + i]) {
752 free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
753 rvu->irq_allocated[offs + i] = false;
754 }
755 }
756
rvu_npa_register_interrupts(struct rvu * rvu)757 static int rvu_npa_register_interrupts(struct rvu *rvu)
758 {
759 int blkaddr, base;
760 bool rc;
761
762 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
763 if (blkaddr < 0)
764 return blkaddr;
765
766 /* Get NPA AF MSIX vectors offset. */
767 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
768 if (!base) {
769 dev_warn(rvu->dev,
770 "Failed to get NPA_AF_INT vector offsets\n");
771 return 0;
772 }
773
774 /* Register and enable NPA_AF_RVU_INT interrupt */
775 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_RVU,
776 "NPA_AF_RVU_INT",
777 rvu_npa_af_rvu_intr_handler);
778 if (!rc)
779 goto err;
780 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
781
782 /* Register and enable NPA_AF_GEN_INT interrupt */
783 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_GEN,
784 "NPA_AF_RVU_GEN",
785 rvu_npa_af_gen_intr_handler);
786 if (!rc)
787 goto err;
788 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
789
790 /* Register and enable NPA_AF_ERR_INT interrupt */
791 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_AF_ERR,
792 "NPA_AF_ERR_INT",
793 rvu_npa_af_err_intr_handler);
794 if (!rc)
795 goto err;
796 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
797
798 /* Register and enable NPA_AF_RAS interrupt */
799 rc = rvu_common_request_irq(rvu, base + NPA_AF_INT_VEC_POISON,
800 "NPA_AF_RAS",
801 rvu_npa_af_ras_intr_handler);
802 if (!rc)
803 goto err;
804 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
805
806 return 0;
807 err:
808 rvu_npa_unregister_interrupts(rvu);
809 return rc;
810 }
811
rvu_npa_report_show(struct devlink_fmsg * fmsg,void * ctx,enum npa_af_rvu_health health_reporter)812 static int rvu_npa_report_show(struct devlink_fmsg *fmsg, void *ctx,
813 enum npa_af_rvu_health health_reporter)
814 {
815 struct rvu_npa_event_ctx *npa_event_context;
816 unsigned int alloc_dis, free_dis;
817 u64 intr_val;
818
819 npa_event_context = ctx;
820 switch (health_reporter) {
821 case NPA_AF_RVU_GEN:
822 intr_val = npa_event_context->npa_af_rvu_gen;
823 rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
824 devlink_fmsg_u64_pair_put(fmsg, "\tNPA General Interrupt Reg ",
825 npa_event_context->npa_af_rvu_gen);
826 if (intr_val & BIT_ULL(32))
827 devlink_fmsg_string_put(fmsg, "\n\tUnmap PF Error");
828
829 free_dis = FIELD_GET(GENMASK(15, 0), intr_val);
830 if (free_dis & BIT(NPA_INPQ_NIX0_RX))
831 devlink_fmsg_string_put(fmsg, "\n\tNIX0: free disabled RX");
832 if (free_dis & BIT(NPA_INPQ_NIX0_TX))
833 devlink_fmsg_string_put(fmsg, "\n\tNIX0:free disabled TX");
834 if (free_dis & BIT(NPA_INPQ_NIX1_RX))
835 devlink_fmsg_string_put(fmsg, "\n\tNIX1: free disabled RX");
836 if (free_dis & BIT(NPA_INPQ_NIX1_TX))
837 devlink_fmsg_string_put(fmsg, "\n\tNIX1:free disabled TX");
838 if (free_dis & BIT(NPA_INPQ_SSO))
839 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for SSO");
840 if (free_dis & BIT(NPA_INPQ_TIM))
841 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for TIM");
842 if (free_dis & BIT(NPA_INPQ_DPI))
843 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for DPI");
844 if (free_dis & BIT(NPA_INPQ_AURA_OP))
845 devlink_fmsg_string_put(fmsg, "\n\tFree Disabled for AURA");
846
847 alloc_dis = FIELD_GET(GENMASK(31, 16), intr_val);
848 if (alloc_dis & BIT(NPA_INPQ_NIX0_RX))
849 devlink_fmsg_string_put(fmsg, "\n\tNIX0: alloc disabled RX");
850 if (alloc_dis & BIT(NPA_INPQ_NIX0_TX))
851 devlink_fmsg_string_put(fmsg, "\n\tNIX0:alloc disabled TX");
852 if (alloc_dis & BIT(NPA_INPQ_NIX1_RX))
853 devlink_fmsg_string_put(fmsg, "\n\tNIX1: alloc disabled RX");
854 if (alloc_dis & BIT(NPA_INPQ_NIX1_TX))
855 devlink_fmsg_string_put(fmsg, "\n\tNIX1:alloc disabled TX");
856 if (alloc_dis & BIT(NPA_INPQ_SSO))
857 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for SSO");
858 if (alloc_dis & BIT(NPA_INPQ_TIM))
859 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for TIM");
860 if (alloc_dis & BIT(NPA_INPQ_DPI))
861 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for DPI");
862 if (alloc_dis & BIT(NPA_INPQ_AURA_OP))
863 devlink_fmsg_string_put(fmsg, "\n\tAlloc Disabled for AURA");
864
865 rvu_report_pair_end(fmsg);
866 break;
867 case NPA_AF_RVU_ERR:
868 rvu_report_pair_start(fmsg, "NPA_AF_ERR");
869 devlink_fmsg_u64_pair_put(fmsg, "\tNPA Error Interrupt Reg ",
870 npa_event_context->npa_af_rvu_err);
871 if (npa_event_context->npa_af_rvu_err & BIT_ULL(14))
872 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_INST_S read");
873 if (npa_event_context->npa_af_rvu_err & BIT_ULL(13))
874 devlink_fmsg_string_put(fmsg, "\n\tFault on NPA_AQ_RES_S write");
875 if (npa_event_context->npa_af_rvu_err & BIT_ULL(12))
876 devlink_fmsg_string_put(fmsg, "\n\tAQ Doorbell Error");
877 rvu_report_pair_end(fmsg);
878 break;
879 case NPA_AF_RVU_RAS:
880 rvu_report_pair_start(fmsg, "NPA_AF_RVU_RAS");
881 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RAS Interrupt Reg ",
882 npa_event_context->npa_af_rvu_ras);
883 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(34))
884 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_INST_S");
885 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(33))
886 devlink_fmsg_string_put(fmsg, "\n\tPoison data on NPA_AQ_RES_S");
887 if (npa_event_context->npa_af_rvu_ras & BIT_ULL(32))
888 devlink_fmsg_string_put(fmsg, "\n\tPoison data on HW context");
889 rvu_report_pair_end(fmsg);
890 break;
891 case NPA_AF_RVU_INTR:
892 rvu_report_pair_start(fmsg, "NPA_AF_RVU");
893 devlink_fmsg_u64_pair_put(fmsg, "\tNPA RVU Interrupt Reg ",
894 npa_event_context->npa_af_rvu_int);
895 if (npa_event_context->npa_af_rvu_int & BIT_ULL(0))
896 devlink_fmsg_string_put(fmsg, "\n\tUnmap Slot Error");
897 rvu_report_pair_end(fmsg);
898 break;
899 default:
900 return -EINVAL;
901 }
902
903 return 0;
904 }
905
rvu_hw_npa_intr_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)906 static int rvu_hw_npa_intr_dump(struct devlink_health_reporter *reporter,
907 struct devlink_fmsg *fmsg, void *ctx,
908 struct netlink_ext_ack *netlink_extack)
909 {
910 struct rvu *rvu = devlink_health_reporter_priv(reporter);
911 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
912 struct rvu_npa_event_ctx *npa_ctx;
913
914 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
915
916 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_INTR) :
917 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_INTR);
918 }
919
rvu_hw_npa_intr_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)920 static int rvu_hw_npa_intr_recover(struct devlink_health_reporter *reporter,
921 void *ctx, struct netlink_ext_ack *netlink_extack)
922 {
923 struct rvu *rvu = devlink_health_reporter_priv(reporter);
924 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
925 int blkaddr;
926
927 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
928 if (blkaddr < 0)
929 return blkaddr;
930
931 if (npa_event_ctx->npa_af_rvu_int)
932 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
933
934 return 0;
935 }
936
rvu_hw_npa_gen_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)937 static int rvu_hw_npa_gen_dump(struct devlink_health_reporter *reporter,
938 struct devlink_fmsg *fmsg, void *ctx,
939 struct netlink_ext_ack *netlink_extack)
940 {
941 struct rvu *rvu = devlink_health_reporter_priv(reporter);
942 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
943 struct rvu_npa_event_ctx *npa_ctx;
944
945 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
946
947 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_GEN) :
948 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_GEN);
949 }
950
rvu_hw_npa_gen_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)951 static int rvu_hw_npa_gen_recover(struct devlink_health_reporter *reporter,
952 void *ctx, struct netlink_ext_ack *netlink_extack)
953 {
954 struct rvu *rvu = devlink_health_reporter_priv(reporter);
955 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
956 int blkaddr;
957
958 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
959 if (blkaddr < 0)
960 return blkaddr;
961
962 if (npa_event_ctx->npa_af_rvu_gen)
963 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
964
965 return 0;
966 }
967
rvu_hw_npa_err_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)968 static int rvu_hw_npa_err_dump(struct devlink_health_reporter *reporter,
969 struct devlink_fmsg *fmsg, void *ctx,
970 struct netlink_ext_ack *netlink_extack)
971 {
972 struct rvu *rvu = devlink_health_reporter_priv(reporter);
973 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
974 struct rvu_npa_event_ctx *npa_ctx;
975
976 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
977
978 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_ERR) :
979 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_ERR);
980 }
981
rvu_hw_npa_err_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)982 static int rvu_hw_npa_err_recover(struct devlink_health_reporter *reporter,
983 void *ctx, struct netlink_ext_ack *netlink_extack)
984 {
985 struct rvu *rvu = devlink_health_reporter_priv(reporter);
986 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
987 int blkaddr;
988
989 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
990 if (blkaddr < 0)
991 return blkaddr;
992
993 if (npa_event_ctx->npa_af_rvu_err)
994 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
995
996 return 0;
997 }
998
rvu_hw_npa_ras_dump(struct devlink_health_reporter * reporter,struct devlink_fmsg * fmsg,void * ctx,struct netlink_ext_ack * netlink_extack)999 static int rvu_hw_npa_ras_dump(struct devlink_health_reporter *reporter,
1000 struct devlink_fmsg *fmsg, void *ctx,
1001 struct netlink_ext_ack *netlink_extack)
1002 {
1003 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1004 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1005 struct rvu_npa_event_ctx *npa_ctx;
1006
1007 npa_ctx = rvu_dl->rvu_npa_health_reporter->npa_event_ctx;
1008
1009 return ctx ? rvu_npa_report_show(fmsg, ctx, NPA_AF_RVU_RAS) :
1010 rvu_npa_report_show(fmsg, npa_ctx, NPA_AF_RVU_RAS);
1011 }
1012
rvu_hw_npa_ras_recover(struct devlink_health_reporter * reporter,void * ctx,struct netlink_ext_ack * netlink_extack)1013 static int rvu_hw_npa_ras_recover(struct devlink_health_reporter *reporter,
1014 void *ctx, struct netlink_ext_ack *netlink_extack)
1015 {
1016 struct rvu *rvu = devlink_health_reporter_priv(reporter);
1017 struct rvu_npa_event_ctx *npa_event_ctx = ctx;
1018 int blkaddr;
1019
1020 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
1021 if (blkaddr < 0)
1022 return blkaddr;
1023
1024 if (npa_event_ctx->npa_af_rvu_ras)
1025 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
1026
1027 return 0;
1028 }
1029
1030 RVU_REPORTERS(hw_npa_intr);
1031 RVU_REPORTERS(hw_npa_gen);
1032 RVU_REPORTERS(hw_npa_err);
1033 RVU_REPORTERS(hw_npa_ras);
1034
1035 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl);
1036
rvu_npa_register_reporters(struct rvu_devlink * rvu_dl)1037 static int rvu_npa_register_reporters(struct rvu_devlink *rvu_dl)
1038 {
1039 struct rvu_npa_health_reporters *rvu_reporters;
1040 struct rvu_npa_event_ctx *npa_event_context;
1041 struct rvu *rvu = rvu_dl->rvu;
1042
1043 rvu_reporters = kzalloc(sizeof(*rvu_reporters), GFP_KERNEL);
1044 if (!rvu_reporters)
1045 return -ENOMEM;
1046
1047 rvu_dl->rvu_npa_health_reporter = rvu_reporters;
1048 npa_event_context = kzalloc(sizeof(*npa_event_context), GFP_KERNEL);
1049 if (!npa_event_context)
1050 return -ENOMEM;
1051
1052 rvu_reporters->npa_event_ctx = npa_event_context;
1053 rvu_reporters->rvu_hw_npa_intr_reporter =
1054 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_intr_reporter_ops, 0, rvu);
1055 if (IS_ERR(rvu_reporters->rvu_hw_npa_intr_reporter)) {
1056 dev_warn(rvu->dev, "Failed to create hw_npa_intr reporter, err=%ld\n",
1057 PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter));
1058 return PTR_ERR(rvu_reporters->rvu_hw_npa_intr_reporter);
1059 }
1060
1061 rvu_reporters->rvu_hw_npa_gen_reporter =
1062 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_gen_reporter_ops, 0, rvu);
1063 if (IS_ERR(rvu_reporters->rvu_hw_npa_gen_reporter)) {
1064 dev_warn(rvu->dev, "Failed to create hw_npa_gen reporter, err=%ld\n",
1065 PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter));
1066 return PTR_ERR(rvu_reporters->rvu_hw_npa_gen_reporter);
1067 }
1068
1069 rvu_reporters->rvu_hw_npa_err_reporter =
1070 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_err_reporter_ops, 0, rvu);
1071 if (IS_ERR(rvu_reporters->rvu_hw_npa_err_reporter)) {
1072 dev_warn(rvu->dev, "Failed to create hw_npa_err reporter, err=%ld\n",
1073 PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter));
1074 return PTR_ERR(rvu_reporters->rvu_hw_npa_err_reporter);
1075 }
1076
1077 rvu_reporters->rvu_hw_npa_ras_reporter =
1078 devlink_health_reporter_create(rvu_dl->dl, &rvu_hw_npa_ras_reporter_ops, 0, rvu);
1079 if (IS_ERR(rvu_reporters->rvu_hw_npa_ras_reporter)) {
1080 dev_warn(rvu->dev, "Failed to create hw_npa_ras reporter, err=%ld\n",
1081 PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter));
1082 return PTR_ERR(rvu_reporters->rvu_hw_npa_ras_reporter);
1083 }
1084
1085 rvu_dl->devlink_wq = create_workqueue("rvu_devlink_wq");
1086 if (!rvu_dl->devlink_wq)
1087 return -ENOMEM;
1088
1089 INIT_WORK(&rvu_reporters->intr_work, rvu_npa_intr_work);
1090 INIT_WORK(&rvu_reporters->err_work, rvu_npa_err_work);
1091 INIT_WORK(&rvu_reporters->gen_work, rvu_npa_gen_work);
1092 INIT_WORK(&rvu_reporters->ras_work, rvu_npa_ras_work);
1093
1094 return 0;
1095 }
1096
rvu_npa_health_reporters_create(struct rvu_devlink * rvu_dl)1097 static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
1098 {
1099 struct rvu *rvu = rvu_dl->rvu;
1100 int err;
1101
1102 err = rvu_npa_register_reporters(rvu_dl);
1103 if (err) {
1104 dev_warn(rvu->dev, "Failed to create npa reporter, err =%d\n",
1105 err);
1106 return err;
1107 }
1108 rvu_npa_register_interrupts(rvu);
1109
1110 return 0;
1111 }
1112
rvu_npa_health_reporters_destroy(struct rvu_devlink * rvu_dl)1113 static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
1114 {
1115 struct rvu_npa_health_reporters *npa_reporters;
1116 struct rvu *rvu = rvu_dl->rvu;
1117
1118 npa_reporters = rvu_dl->rvu_npa_health_reporter;
1119
1120 if (!npa_reporters->rvu_hw_npa_ras_reporter)
1121 return;
1122 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_intr_reporter))
1123 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_intr_reporter);
1124
1125 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_gen_reporter))
1126 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_gen_reporter);
1127
1128 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_err_reporter))
1129 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_err_reporter);
1130
1131 if (!IS_ERR_OR_NULL(npa_reporters->rvu_hw_npa_ras_reporter))
1132 devlink_health_reporter_destroy(npa_reporters->rvu_hw_npa_ras_reporter);
1133
1134 rvu_npa_unregister_interrupts(rvu);
1135 kfree(rvu_dl->rvu_npa_health_reporter->npa_event_ctx);
1136 kfree(rvu_dl->rvu_npa_health_reporter);
1137 }
1138
rvu_health_reporters_create(struct rvu * rvu)1139 static int rvu_health_reporters_create(struct rvu *rvu)
1140 {
1141 struct rvu_devlink *rvu_dl;
1142 int err;
1143
1144 rvu_dl = rvu->rvu_dl;
1145 err = rvu_npa_health_reporters_create(rvu_dl);
1146 if (err)
1147 return err;
1148
1149 return rvu_nix_health_reporters_create(rvu_dl);
1150 }
1151
rvu_health_reporters_destroy(struct rvu * rvu)1152 static void rvu_health_reporters_destroy(struct rvu *rvu)
1153 {
1154 struct rvu_devlink *rvu_dl;
1155
1156 if (!rvu->rvu_dl)
1157 return;
1158
1159 rvu_dl = rvu->rvu_dl;
1160 rvu_npa_health_reporters_destroy(rvu_dl);
1161 rvu_nix_health_reporters_destroy(rvu_dl);
1162 }
1163
1164 /* Devlink Params APIs */
rvu_af_dl_dwrr_mtu_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1165 static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id,
1166 union devlink_param_value val,
1167 struct netlink_ext_ack *extack)
1168 {
1169 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1170 struct rvu *rvu = rvu_dl->rvu;
1171 int dwrr_mtu = val.vu32;
1172 struct nix_txsch *txsch;
1173 struct nix_hw *nix_hw;
1174
1175 if (!rvu->hw->cap.nix_common_dwrr_mtu) {
1176 NL_SET_ERR_MSG_MOD(extack,
1177 "Setting DWRR_MTU is not supported on this silicon");
1178 return -EOPNOTSUPP;
1179 }
1180
1181 if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) &&
1182 (dwrr_mtu != 9728 && dwrr_mtu != 10240)) {
1183 NL_SET_ERR_MSG_MOD(extack,
1184 "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240");
1185 return -EINVAL;
1186 }
1187
1188 nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0);
1189 if (!nix_hw)
1190 return -ENODEV;
1191
1192 txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ];
1193 if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) {
1194 NL_SET_ERR_MSG_MOD(extack,
1195 "Changing DWRR MTU is not supported when there are active NIXLFs");
1196 NL_SET_ERR_MSG_MOD(extack,
1197 "Make sure none of the PF/VF interfaces are initialized and retry");
1198 return -EOPNOTSUPP;
1199 }
1200
1201 return 0;
1202 }
1203
rvu_af_dl_dwrr_mtu_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1204 static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id,
1205 struct devlink_param_gset_ctx *ctx,
1206 struct netlink_ext_ack *extack)
1207 {
1208 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1209 struct rvu *rvu = rvu_dl->rvu;
1210 u64 dwrr_mtu;
1211
1212 dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32);
1213 rvu_write64(rvu, BLKADDR_NIX0,
1214 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM), dwrr_mtu);
1215
1216 return 0;
1217 }
1218
rvu_af_dl_dwrr_mtu_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1219 static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id,
1220 struct devlink_param_gset_ctx *ctx)
1221 {
1222 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1223 struct rvu *rvu = rvu_dl->rvu;
1224 u64 dwrr_mtu;
1225
1226 if (!rvu->hw->cap.nix_common_dwrr_mtu)
1227 return -EOPNOTSUPP;
1228
1229 dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0,
1230 nix_get_dwrr_mtu_reg(rvu->hw, SMQ_LINK_TYPE_RPM));
1231 ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu);
1232
1233 return 0;
1234 }
1235
1236 enum rvu_af_dl_param_id {
1237 RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
1238 RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1239 RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1240 RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1241 RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1242 RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1243 };
1244
rvu_af_npc_exact_feature_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1245 static int rvu_af_npc_exact_feature_get(struct devlink *devlink, u32 id,
1246 struct devlink_param_gset_ctx *ctx)
1247 {
1248 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1249 struct rvu *rvu = rvu_dl->rvu;
1250 bool enabled;
1251
1252 enabled = rvu_npc_exact_has_match_table(rvu);
1253
1254 snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s",
1255 enabled ? "enabled" : "disabled");
1256
1257 return 0;
1258 }
1259
rvu_af_npc_exact_feature_disable(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1260 static int rvu_af_npc_exact_feature_disable(struct devlink *devlink, u32 id,
1261 struct devlink_param_gset_ctx *ctx,
1262 struct netlink_ext_ack *extack)
1263 {
1264 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1265 struct rvu *rvu = rvu_dl->rvu;
1266
1267 rvu_npc_exact_disable_feature(rvu);
1268
1269 return 0;
1270 }
1271
rvu_af_npc_exact_feature_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1272 static int rvu_af_npc_exact_feature_validate(struct devlink *devlink, u32 id,
1273 union devlink_param_value val,
1274 struct netlink_ext_ack *extack)
1275 {
1276 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1277 struct rvu *rvu = rvu_dl->rvu;
1278 u64 enable;
1279
1280 if (kstrtoull(val.vstr, 10, &enable)) {
1281 NL_SET_ERR_MSG_MOD(extack,
1282 "Only 1 value is supported");
1283 return -EINVAL;
1284 }
1285
1286 if (enable != 1) {
1287 NL_SET_ERR_MSG_MOD(extack,
1288 "Only disabling exact match feature is supported");
1289 return -EINVAL;
1290 }
1291
1292 if (rvu_npc_exact_can_disable_feature(rvu))
1293 return 0;
1294
1295 NL_SET_ERR_MSG_MOD(extack,
1296 "Can't disable exact match feature; Please try before any configuration");
1297 return -EFAULT;
1298 }
1299
rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1300 static int rvu_af_dl_npc_mcam_high_zone_percent_get(struct devlink *devlink, u32 id,
1301 struct devlink_param_gset_ctx *ctx)
1302 {
1303 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1304 struct rvu *rvu = rvu_dl->rvu;
1305 struct npc_mcam *mcam;
1306 u32 percent;
1307
1308 mcam = &rvu->hw->mcam;
1309 percent = (mcam->hprio_count * 100) / mcam->bmap_entries;
1310 ctx->val.vu8 = (u8)percent;
1311
1312 return 0;
1313 }
1314
rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1315 static int rvu_af_dl_npc_mcam_high_zone_percent_set(struct devlink *devlink, u32 id,
1316 struct devlink_param_gset_ctx *ctx,
1317 struct netlink_ext_ack *extack)
1318 {
1319 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1320 struct rvu *rvu = rvu_dl->rvu;
1321 struct npc_mcam *mcam;
1322 u32 percent;
1323
1324 percent = ctx->val.vu8;
1325 mcam = &rvu->hw->mcam;
1326 mcam->hprio_count = (mcam->bmap_entries * percent) / 100;
1327 mcam->hprio_end = mcam->hprio_count;
1328 mcam->lprio_count = (mcam->bmap_entries - mcam->hprio_count) / 2;
1329 mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count;
1330
1331 return 0;
1332 }
1333
rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1334 static int rvu_af_dl_npc_mcam_high_zone_percent_validate(struct devlink *devlink, u32 id,
1335 union devlink_param_value val,
1336 struct netlink_ext_ack *extack)
1337 {
1338 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1339 struct rvu *rvu = rvu_dl->rvu;
1340 struct npc_mcam *mcam;
1341
1342 /* The percent of high prio zone must range from 12% to 100% of unreserved mcam space */
1343 if (val.vu8 < 12 || val.vu8 > 100) {
1344 NL_SET_ERR_MSG_MOD(extack,
1345 "mcam high zone percent must be between 12% to 100%");
1346 return -EINVAL;
1347 }
1348
1349 /* Do not allow user to modify the high priority zone entries while mcam entries
1350 * have already been assigned.
1351 */
1352 mcam = &rvu->hw->mcam;
1353 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1354 NL_SET_ERR_MSG_MOD(extack,
1355 "mcam entries have already been assigned, can't resize");
1356 return -EPERM;
1357 }
1358
1359 return 0;
1360 }
1361
rvu_af_dl_npc_def_rule_cntr_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1362 static int rvu_af_dl_npc_def_rule_cntr_get(struct devlink *devlink, u32 id,
1363 struct devlink_param_gset_ctx *ctx)
1364 {
1365 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1366 struct rvu *rvu = rvu_dl->rvu;
1367
1368 ctx->val.vbool = rvu->def_rule_cntr_en;
1369
1370 return 0;
1371 }
1372
rvu_af_dl_npc_def_rule_cntr_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1373 static int rvu_af_dl_npc_def_rule_cntr_set(struct devlink *devlink, u32 id,
1374 struct devlink_param_gset_ctx *ctx,
1375 struct netlink_ext_ack *extack)
1376 {
1377 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1378 struct rvu *rvu = rvu_dl->rvu;
1379 int err;
1380
1381 err = npc_config_cntr_default_entries(rvu, ctx->val.vbool);
1382 if (!err)
1383 rvu->def_rule_cntr_en = ctx->val.vbool;
1384
1385 return err;
1386 }
1387
rvu_af_dl_nix_maxlf_get(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx)1388 static int rvu_af_dl_nix_maxlf_get(struct devlink *devlink, u32 id,
1389 struct devlink_param_gset_ctx *ctx)
1390 {
1391 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1392 struct rvu *rvu = rvu_dl->rvu;
1393
1394 ctx->val.vu16 = (u16)rvu_get_nixlf_count(rvu);
1395
1396 return 0;
1397 }
1398
rvu_af_dl_nix_maxlf_set(struct devlink * devlink,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)1399 static int rvu_af_dl_nix_maxlf_set(struct devlink *devlink, u32 id,
1400 struct devlink_param_gset_ctx *ctx,
1401 struct netlink_ext_ack *extack)
1402 {
1403 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1404 struct rvu *rvu = rvu_dl->rvu;
1405 struct rvu_block *block;
1406 int blkaddr = 0;
1407
1408 npc_mcam_rsrcs_deinit(rvu);
1409 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1410 while (blkaddr) {
1411 block = &rvu->hw->block[blkaddr];
1412 block->lf.max = ctx->val.vu16;
1413 blkaddr = rvu_get_next_nix_blkaddr(rvu, blkaddr);
1414 }
1415
1416 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0);
1417 npc_mcam_rsrcs_init(rvu, blkaddr);
1418
1419 return 0;
1420 }
1421
rvu_af_dl_nix_maxlf_validate(struct devlink * devlink,u32 id,union devlink_param_value val,struct netlink_ext_ack * extack)1422 static int rvu_af_dl_nix_maxlf_validate(struct devlink *devlink, u32 id,
1423 union devlink_param_value val,
1424 struct netlink_ext_ack *extack)
1425 {
1426 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1427 struct rvu *rvu = rvu_dl->rvu;
1428 u16 max_nix0_lf, max_nix1_lf;
1429 struct npc_mcam *mcam;
1430 u64 cfg;
1431
1432 cfg = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_CONST2);
1433 max_nix0_lf = cfg & 0xFFF;
1434 cfg = rvu_read64(rvu, BLKADDR_NIX1, NIX_AF_CONST2);
1435 max_nix1_lf = cfg & 0xFFF;
1436
1437 /* Do not allow user to modify maximum NIX LFs while mcam entries
1438 * have already been assigned.
1439 */
1440 mcam = &rvu->hw->mcam;
1441 if (mcam->bmap_fcnt < mcam->bmap_entries) {
1442 NL_SET_ERR_MSG_MOD(extack,
1443 "mcam entries have already been assigned, can't resize");
1444 return -EPERM;
1445 }
1446
1447 if (max_nix0_lf && val.vu16 > max_nix0_lf) {
1448 NL_SET_ERR_MSG_MOD(extack,
1449 "requested nixlf is greater than the max supported nix0_lf");
1450 return -EPERM;
1451 }
1452
1453 if (max_nix1_lf && val.vu16 > max_nix1_lf) {
1454 NL_SET_ERR_MSG_MOD(extack,
1455 "requested nixlf is greater than the max supported nix1_lf");
1456 return -EINVAL;
1457 }
1458
1459 return 0;
1460 }
1461
1462 static const struct devlink_param rvu_af_dl_params[] = {
1463 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU,
1464 "dwrr_mtu", DEVLINK_PARAM_TYPE_U32,
1465 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1466 rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set,
1467 rvu_af_dl_dwrr_mtu_validate),
1468 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_MCAM_ZONE_PERCENT,
1469 "npc_mcam_high_zone_percent", DEVLINK_PARAM_TYPE_U8,
1470 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1471 rvu_af_dl_npc_mcam_high_zone_percent_get,
1472 rvu_af_dl_npc_mcam_high_zone_percent_set,
1473 rvu_af_dl_npc_mcam_high_zone_percent_validate),
1474 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_DEF_RULE_CNTR_ENABLE,
1475 "npc_def_rule_cntr", DEVLINK_PARAM_TYPE_BOOL,
1476 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1477 rvu_af_dl_npc_def_rule_cntr_get,
1478 rvu_af_dl_npc_def_rule_cntr_set, NULL),
1479 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NIX_MAXLF,
1480 "nix_maxlf", DEVLINK_PARAM_TYPE_U16,
1481 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1482 rvu_af_dl_nix_maxlf_get,
1483 rvu_af_dl_nix_maxlf_set,
1484 rvu_af_dl_nix_maxlf_validate),
1485 };
1486
1487 static const struct devlink_param rvu_af_dl_param_exact_match[] = {
1488 DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_NPC_EXACT_FEATURE_DISABLE,
1489 "npc_exact_feature_disable", DEVLINK_PARAM_TYPE_STRING,
1490 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1491 rvu_af_npc_exact_feature_get,
1492 rvu_af_npc_exact_feature_disable,
1493 rvu_af_npc_exact_feature_validate),
1494 };
1495
1496 /* Devlink switch mode */
rvu_devlink_eswitch_mode_get(struct devlink * devlink,u16 * mode)1497 static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
1498 {
1499 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1500 struct rvu *rvu = rvu_dl->rvu;
1501 struct rvu_switch *rswitch;
1502
1503 if (rvu->rep_mode)
1504 return -EOPNOTSUPP;
1505
1506 rswitch = &rvu->rswitch;
1507 *mode = rswitch->mode;
1508
1509 return 0;
1510 }
1511
rvu_devlink_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)1512 static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode,
1513 struct netlink_ext_ack *extack)
1514 {
1515 struct rvu_devlink *rvu_dl = devlink_priv(devlink);
1516 struct rvu *rvu = rvu_dl->rvu;
1517 struct rvu_switch *rswitch;
1518
1519 rswitch = &rvu->rswitch;
1520 switch (mode) {
1521 case DEVLINK_ESWITCH_MODE_LEGACY:
1522 case DEVLINK_ESWITCH_MODE_SWITCHDEV:
1523 if (rswitch->mode == mode)
1524 return 0;
1525 rswitch->mode = mode;
1526 if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV)
1527 rvu_switch_enable(rvu);
1528 else
1529 rvu_switch_disable(rvu);
1530 break;
1531 default:
1532 return -EINVAL;
1533 }
1534
1535 return 0;
1536 }
1537
1538 static const struct devlink_ops rvu_devlink_ops = {
1539 .eswitch_mode_get = rvu_devlink_eswitch_mode_get,
1540 .eswitch_mode_set = rvu_devlink_eswitch_mode_set,
1541 };
1542
rvu_register_dl(struct rvu * rvu)1543 int rvu_register_dl(struct rvu *rvu)
1544 {
1545 struct rvu_devlink *rvu_dl;
1546 struct devlink *dl;
1547 int err;
1548
1549 dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink),
1550 rvu->dev);
1551 if (!dl) {
1552 dev_warn(rvu->dev, "devlink_alloc failed\n");
1553 return -ENOMEM;
1554 }
1555
1556 rvu_dl = devlink_priv(dl);
1557 rvu_dl->dl = dl;
1558 rvu_dl->rvu = rvu;
1559 rvu->rvu_dl = rvu_dl;
1560
1561 err = rvu_health_reporters_create(rvu);
1562 if (err) {
1563 dev_err(rvu->dev,
1564 "devlink health reporter creation failed with error %d\n", err);
1565 goto err_dl_health;
1566 }
1567
1568 err = devlink_params_register(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1569 if (err) {
1570 dev_err(rvu->dev,
1571 "devlink params register failed with error %d", err);
1572 goto err_dl_health;
1573 }
1574
1575 /* Register exact match devlink only for CN10K-B */
1576 if (!rvu_npc_exact_has_match_table(rvu))
1577 goto done;
1578
1579 err = devlink_params_register(dl, rvu_af_dl_param_exact_match,
1580 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1581 if (err) {
1582 dev_err(rvu->dev,
1583 "devlink exact match params register failed with error %d", err);
1584 goto err_dl_exact_match;
1585 }
1586
1587 done:
1588 devlink_register(dl);
1589 return 0;
1590
1591 err_dl_exact_match:
1592 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1593
1594 err_dl_health:
1595 rvu_health_reporters_destroy(rvu);
1596 devlink_free(dl);
1597 return err;
1598 }
1599
rvu_unregister_dl(struct rvu * rvu)1600 void rvu_unregister_dl(struct rvu *rvu)
1601 {
1602 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
1603 struct devlink *dl = rvu_dl->dl;
1604
1605 devlink_unregister(dl);
1606
1607 devlink_params_unregister(dl, rvu_af_dl_params, ARRAY_SIZE(rvu_af_dl_params));
1608
1609 /* Unregister exact match devlink only for CN10K-B */
1610 if (rvu_npc_exact_has_match_table(rvu))
1611 devlink_params_unregister(dl, rvu_af_dl_param_exact_match,
1612 ARRAY_SIZE(rvu_af_dl_param_exact_match));
1613
1614 rvu_health_reporters_destroy(rvu);
1615 devlink_free(dl);
1616 }
1617