1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU representor driver
3 *
4 * Copyright (C) 2024 Marvell.
5 *
6 */
7
8 #include <linux/etherdevice.h>
9 #include <linux/module.h>
10 #include <linux/pci.h>
11 #include <linux/net_tstamp.h>
12 #include <linux/sort.h>
13
14 #include "otx2_common.h"
15 #include "cn10k.h"
16 #include "otx2_reg.h"
17 #include "rep.h"
18
19 #define DRV_NAME "rvu_rep"
20 #define DRV_STRING "Marvell RVU Representor Driver"
21
22 static const struct pci_device_id rvu_rep_id_table[] = {
23 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_RVU_REP) },
24 { }
25 };
26
27 MODULE_AUTHOR("Marvell International Ltd.");
28 MODULE_DESCRIPTION(DRV_STRING);
29 MODULE_LICENSE("GPL");
30 MODULE_DEVICE_TABLE(pci, rvu_rep_id_table);
31
32 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
33 struct rep_event *data);
34
rvu_rep_mcam_flow_init(struct rep_dev * rep)35 static int rvu_rep_mcam_flow_init(struct rep_dev *rep)
36 {
37 struct npc_mcam_alloc_entry_req *req;
38 struct npc_mcam_alloc_entry_rsp *rsp;
39 struct otx2_nic *priv = rep->mdev;
40 int ent, allocated = 0;
41 int count;
42
43 rep->flow_cfg = kcalloc(1, sizeof(struct otx2_flow_config), GFP_KERNEL);
44
45 if (!rep->flow_cfg)
46 return -ENOMEM;
47
48 count = OTX2_DEFAULT_FLOWCOUNT;
49
50 rep->flow_cfg->flow_ent = kcalloc(count, sizeof(u16), GFP_KERNEL);
51 if (!rep->flow_cfg->flow_ent)
52 return -ENOMEM;
53
54 while (allocated < count) {
55 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&priv->mbox);
56 if (!req)
57 goto exit;
58
59 req->hdr.pcifunc = rep->pcifunc;
60 req->contig = false;
61 req->ref_entry = 0;
62 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
63 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
64
65 if (otx2_sync_mbox_msg(&priv->mbox))
66 goto exit;
67
68 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
69 (&priv->mbox.mbox, 0, &req->hdr);
70 if (IS_ERR(rsp))
71 goto exit;
72
73 for (ent = 0; ent < rsp->count; ent++)
74 rep->flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
75
76 allocated += rsp->count;
77
78 if (rsp->count != req->count)
79 break;
80 }
81 exit:
82 /* Multiple MCAM entry alloc requests could result in non-sequential
83 * MCAM entries in the flow_ent[] array. Sort them in an ascending
84 * order, otherwise user installed ntuple filter index and MCAM entry
85 * index will not be in sync.
86 */
87 if (allocated)
88 sort(&rep->flow_cfg->flow_ent[0], allocated,
89 sizeof(rep->flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL);
90
91 mutex_unlock(&priv->mbox.lock);
92
93 rep->flow_cfg->max_flows = allocated;
94
95 if (allocated) {
96 rep->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
97 rep->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
98 rep->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
99 }
100
101 INIT_LIST_HEAD(&rep->flow_cfg->flow_list);
102 INIT_LIST_HEAD(&rep->flow_cfg->flow_list_tc);
103 return 0;
104 }
105
rvu_rep_setup_tc_cb(enum tc_setup_type type,void * type_data,void * cb_priv)106 static int rvu_rep_setup_tc_cb(enum tc_setup_type type,
107 void *type_data, void *cb_priv)
108 {
109 struct rep_dev *rep = cb_priv;
110 struct otx2_nic *priv = rep->mdev;
111
112 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
113 return -EINVAL;
114
115 if (!(rep->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
116 rvu_rep_mcam_flow_init(rep);
117
118 priv->netdev = rep->netdev;
119 priv->flags = rep->flags;
120 priv->pcifunc = rep->pcifunc;
121 priv->flow_cfg = rep->flow_cfg;
122
123 switch (type) {
124 case TC_SETUP_CLSFLOWER:
125 return otx2_setup_tc_cls_flower(priv, type_data);
126 default:
127 return -EOPNOTSUPP;
128 }
129 }
130
131 static LIST_HEAD(rvu_rep_block_cb_list);
rvu_rep_setup_tc(struct net_device * netdev,enum tc_setup_type type,void * type_data)132 static int rvu_rep_setup_tc(struct net_device *netdev, enum tc_setup_type type,
133 void *type_data)
134 {
135 struct rvu_rep *rep = netdev_priv(netdev);
136
137 switch (type) {
138 case TC_SETUP_BLOCK:
139 return flow_block_cb_setup_simple(type_data,
140 &rvu_rep_block_cb_list,
141 rvu_rep_setup_tc_cb,
142 rep, rep, true);
143 default:
144 return -EOPNOTSUPP;
145 }
146 }
147
148 static int
rvu_rep_sp_stats64(const struct net_device * dev,struct rtnl_link_stats64 * stats)149 rvu_rep_sp_stats64(const struct net_device *dev,
150 struct rtnl_link_stats64 *stats)
151 {
152 struct rep_dev *rep = netdev_priv(dev);
153 struct otx2_nic *priv = rep->mdev;
154 struct otx2_rcv_queue *rq;
155 struct otx2_snd_queue *sq;
156 u16 qidx = rep->rep_id;
157
158 otx2_update_rq_stats(priv, qidx);
159 rq = &priv->qset.rq[qidx];
160
161 otx2_update_sq_stats(priv, qidx);
162 sq = &priv->qset.sq[qidx];
163
164 stats->tx_bytes = sq->stats.bytes;
165 stats->tx_packets = sq->stats.pkts;
166 stats->rx_bytes = rq->stats.bytes;
167 stats->rx_packets = rq->stats.pkts;
168 return 0;
169 }
170
171 static bool
rvu_rep_has_offload_stats(const struct net_device * dev,int attr_id)172 rvu_rep_has_offload_stats(const struct net_device *dev, int attr_id)
173 {
174 return attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT;
175 }
176
177 static int
rvu_rep_get_offload_stats(int attr_id,const struct net_device * dev,void * sp)178 rvu_rep_get_offload_stats(int attr_id, const struct net_device *dev,
179 void *sp)
180 {
181 if (attr_id == IFLA_OFFLOAD_XSTATS_CPU_HIT)
182 return rvu_rep_sp_stats64(dev, (struct rtnl_link_stats64 *)sp);
183
184 return -EINVAL;
185 }
186
rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port * port,u8 * hw_addr,int * hw_addr_len,struct netlink_ext_ack * extack)187 static int rvu_rep_dl_port_fn_hw_addr_get(struct devlink_port *port,
188 u8 *hw_addr, int *hw_addr_len,
189 struct netlink_ext_ack *extack)
190 {
191 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
192
193 ether_addr_copy(hw_addr, rep->mac);
194 *hw_addr_len = ETH_ALEN;
195 return 0;
196 }
197
rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port * port,const u8 * hw_addr,int hw_addr_len,struct netlink_ext_ack * extack)198 static int rvu_rep_dl_port_fn_hw_addr_set(struct devlink_port *port,
199 const u8 *hw_addr, int hw_addr_len,
200 struct netlink_ext_ack *extack)
201 {
202 struct rep_dev *rep = container_of(port, struct rep_dev, dl_port);
203 struct otx2_nic *priv = rep->mdev;
204 struct rep_event evt = {0};
205
206 eth_hw_addr_set(rep->netdev, hw_addr);
207 ether_addr_copy(rep->mac, hw_addr);
208
209 ether_addr_copy(evt.evt_data.mac, hw_addr);
210 evt.pcifunc = rep->pcifunc;
211 rvu_rep_notify_pfvf(priv, RVU_EVENT_MAC_ADDR_CHANGE, &evt);
212 return 0;
213 }
214
215 static const struct devlink_port_ops rvu_rep_dl_port_ops = {
216 .port_fn_hw_addr_get = rvu_rep_dl_port_fn_hw_addr_get,
217 .port_fn_hw_addr_set = rvu_rep_dl_port_fn_hw_addr_set,
218 };
219
220 static void
rvu_rep_devlink_set_switch_id(struct otx2_nic * priv,struct netdev_phys_item_id * ppid)221 rvu_rep_devlink_set_switch_id(struct otx2_nic *priv,
222 struct netdev_phys_item_id *ppid)
223 {
224 struct pci_dev *pdev = priv->pdev;
225 u64 id;
226
227 id = pci_get_dsn(pdev);
228
229 ppid->id_len = sizeof(id);
230 put_unaligned_be64(id, &ppid->id);
231 }
232
rvu_rep_devlink_port_unregister(struct rep_dev * rep)233 static void rvu_rep_devlink_port_unregister(struct rep_dev *rep)
234 {
235 devlink_port_unregister(&rep->dl_port);
236 }
237
rvu_rep_devlink_port_register(struct rep_dev * rep)238 static int rvu_rep_devlink_port_register(struct rep_dev *rep)
239 {
240 struct devlink_port_attrs attrs = {};
241 struct otx2_nic *priv = rep->mdev;
242 struct devlink *dl = priv->dl->dl;
243 int err;
244
245 if (!(rep->pcifunc & RVU_PFVF_FUNC_MASK)) {
246 attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
247 attrs.phys.port_number = rvu_get_pf(rep->pcifunc);
248 } else {
249 attrs.flavour = DEVLINK_PORT_FLAVOUR_PCI_VF;
250 attrs.pci_vf.pf = rvu_get_pf(rep->pcifunc);
251 attrs.pci_vf.vf = rep->pcifunc & RVU_PFVF_FUNC_MASK;
252 }
253
254 rvu_rep_devlink_set_switch_id(priv, &attrs.switch_id);
255 devlink_port_attrs_set(&rep->dl_port, &attrs);
256
257 err = devl_port_register_with_ops(dl, &rep->dl_port, rep->rep_id,
258 &rvu_rep_dl_port_ops);
259 if (err) {
260 dev_err(rep->mdev->dev, "devlink_port_register failed: %d\n",
261 err);
262 return err;
263 }
264 return 0;
265 }
266
rvu_rep_get_repid(struct otx2_nic * priv,u16 pcifunc)267 static int rvu_rep_get_repid(struct otx2_nic *priv, u16 pcifunc)
268 {
269 int rep_id;
270
271 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++)
272 if (priv->rep_pf_map[rep_id] == pcifunc)
273 return rep_id;
274 return -EINVAL;
275 }
276
rvu_rep_notify_pfvf(struct otx2_nic * priv,u16 event,struct rep_event * data)277 static int rvu_rep_notify_pfvf(struct otx2_nic *priv, u16 event,
278 struct rep_event *data)
279 {
280 struct rep_event *req;
281
282 mutex_lock(&priv->mbox.lock);
283 req = otx2_mbox_alloc_msg_rep_event_notify(&priv->mbox);
284 if (!req) {
285 mutex_unlock(&priv->mbox.lock);
286 return -ENOMEM;
287 }
288 req->event = event;
289 req->pcifunc = data->pcifunc;
290
291 memcpy(&req->evt_data, &data->evt_data, sizeof(struct rep_evt_data));
292 otx2_sync_mbox_msg(&priv->mbox);
293 mutex_unlock(&priv->mbox.lock);
294 return 0;
295 }
296
rvu_rep_state_evt_handler(struct otx2_nic * priv,struct rep_event * info)297 static void rvu_rep_state_evt_handler(struct otx2_nic *priv,
298 struct rep_event *info)
299 {
300 struct rep_dev *rep;
301 int rep_id;
302
303 rep_id = rvu_rep_get_repid(priv, info->pcifunc);
304 rep = priv->reps[rep_id];
305 if (info->evt_data.vf_state)
306 rep->flags |= RVU_REP_VF_INITIALIZED;
307 else
308 rep->flags &= ~RVU_REP_VF_INITIALIZED;
309 }
310
rvu_event_up_notify(struct otx2_nic * pf,struct rep_event * info)311 int rvu_event_up_notify(struct otx2_nic *pf, struct rep_event *info)
312 {
313 if (info->event & RVU_EVENT_PFVF_STATE)
314 rvu_rep_state_evt_handler(pf, info);
315 return 0;
316 }
317
rvu_rep_change_mtu(struct net_device * dev,int new_mtu)318 static int rvu_rep_change_mtu(struct net_device *dev, int new_mtu)
319 {
320 struct rep_dev *rep = netdev_priv(dev);
321 struct otx2_nic *priv = rep->mdev;
322 struct rep_event evt = {0};
323
324 netdev_info(dev, "Changing MTU from %d to %d\n",
325 dev->mtu, new_mtu);
326 dev->mtu = new_mtu;
327
328 evt.evt_data.mtu = new_mtu;
329 evt.pcifunc = rep->pcifunc;
330 rvu_rep_notify_pfvf(priv, RVU_EVENT_MTU_CHANGE, &evt);
331 return 0;
332 }
333
rvu_rep_get_stats(struct work_struct * work)334 static void rvu_rep_get_stats(struct work_struct *work)
335 {
336 struct delayed_work *del_work = to_delayed_work(work);
337 struct nix_stats_req *req;
338 struct nix_stats_rsp *rsp;
339 struct rep_stats *stats;
340 struct otx2_nic *priv;
341 struct rep_dev *rep;
342 int err;
343
344 rep = container_of(del_work, struct rep_dev, stats_wrk);
345 priv = rep->mdev;
346
347 mutex_lock(&priv->mbox.lock);
348 req = otx2_mbox_alloc_msg_nix_lf_stats(&priv->mbox);
349 if (!req) {
350 mutex_unlock(&priv->mbox.lock);
351 return;
352 }
353 req->pcifunc = rep->pcifunc;
354 err = otx2_sync_mbox_msg_busy_poll(&priv->mbox);
355 if (err)
356 goto exit;
357
358 rsp = (struct nix_stats_rsp *)
359 otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
360
361 if (IS_ERR(rsp)) {
362 err = PTR_ERR(rsp);
363 goto exit;
364 }
365
366 stats = &rep->stats;
367 stats->rx_bytes = rsp->rx.octs;
368 stats->rx_frames = rsp->rx.ucast + rsp->rx.bcast +
369 rsp->rx.mcast;
370 stats->rx_drops = rsp->rx.drop;
371 stats->rx_mcast_frames = rsp->rx.mcast;
372 stats->tx_bytes = rsp->tx.octs;
373 stats->tx_frames = rsp->tx.ucast + rsp->tx.bcast + rsp->tx.mcast;
374 stats->tx_drops = rsp->tx.drop;
375 exit:
376 mutex_unlock(&priv->mbox.lock);
377 }
378
rvu_rep_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)379 static void rvu_rep_get_stats64(struct net_device *dev,
380 struct rtnl_link_stats64 *stats)
381 {
382 struct rep_dev *rep = netdev_priv(dev);
383
384 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
385 return;
386
387 stats->rx_packets = rep->stats.rx_frames;
388 stats->rx_bytes = rep->stats.rx_bytes;
389 stats->rx_dropped = rep->stats.rx_drops;
390 stats->multicast = rep->stats.rx_mcast_frames;
391
392 stats->tx_packets = rep->stats.tx_frames;
393 stats->tx_bytes = rep->stats.tx_bytes;
394 stats->tx_dropped = rep->stats.tx_drops;
395
396 schedule_delayed_work(&rep->stats_wrk, msecs_to_jiffies(100));
397 }
398
rvu_eswitch_config(struct otx2_nic * priv,u8 ena)399 static int rvu_eswitch_config(struct otx2_nic *priv, u8 ena)
400 {
401 struct esw_cfg_req *req;
402
403 mutex_lock(&priv->mbox.lock);
404 req = otx2_mbox_alloc_msg_esw_cfg(&priv->mbox);
405 if (!req) {
406 mutex_unlock(&priv->mbox.lock);
407 return -ENOMEM;
408 }
409 req->ena = ena;
410 otx2_sync_mbox_msg(&priv->mbox);
411 mutex_unlock(&priv->mbox.lock);
412 return 0;
413 }
414
rvu_rep_xmit(struct sk_buff * skb,struct net_device * dev)415 static netdev_tx_t rvu_rep_xmit(struct sk_buff *skb, struct net_device *dev)
416 {
417 struct rep_dev *rep = netdev_priv(dev);
418 struct otx2_nic *pf = rep->mdev;
419 struct otx2_snd_queue *sq;
420 struct netdev_queue *txq;
421
422 sq = &pf->qset.sq[rep->rep_id];
423 txq = netdev_get_tx_queue(dev, 0);
424
425 if (!otx2_sq_append_skb(pf, txq, sq, skb, rep->rep_id)) {
426 netif_tx_stop_queue(txq);
427
428 /* Check again, in case SQBs got freed up */
429 smp_mb();
430 if (((sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb)
431 > sq->sqe_thresh)
432 netif_tx_wake_queue(txq);
433
434 return NETDEV_TX_BUSY;
435 }
436 return NETDEV_TX_OK;
437 }
438
rvu_rep_open(struct net_device * dev)439 static int rvu_rep_open(struct net_device *dev)
440 {
441 struct rep_dev *rep = netdev_priv(dev);
442 struct otx2_nic *priv = rep->mdev;
443 struct rep_event evt = {0};
444
445 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
446 return 0;
447
448 netif_carrier_on(dev);
449 netif_tx_start_all_queues(dev);
450
451 evt.event = RVU_EVENT_PORT_STATE;
452 evt.evt_data.port_state = 1;
453 evt.pcifunc = rep->pcifunc;
454 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
455 return 0;
456 }
457
rvu_rep_stop(struct net_device * dev)458 static int rvu_rep_stop(struct net_device *dev)
459 {
460 struct rep_dev *rep = netdev_priv(dev);
461 struct otx2_nic *priv = rep->mdev;
462 struct rep_event evt = {0};
463
464 if (!(rep->flags & RVU_REP_VF_INITIALIZED))
465 return 0;
466
467 netif_carrier_off(dev);
468 netif_tx_disable(dev);
469
470 evt.event = RVU_EVENT_PORT_STATE;
471 evt.pcifunc = rep->pcifunc;
472 rvu_rep_notify_pfvf(priv, RVU_EVENT_PORT_STATE, &evt);
473 return 0;
474 }
475
476 static const struct net_device_ops rvu_rep_netdev_ops = {
477 .ndo_open = rvu_rep_open,
478 .ndo_stop = rvu_rep_stop,
479 .ndo_start_xmit = rvu_rep_xmit,
480 .ndo_get_stats64 = rvu_rep_get_stats64,
481 .ndo_change_mtu = rvu_rep_change_mtu,
482 .ndo_has_offload_stats = rvu_rep_has_offload_stats,
483 .ndo_get_offload_stats = rvu_rep_get_offload_stats,
484 .ndo_setup_tc = rvu_rep_setup_tc,
485 };
486
rvu_rep_napi_init(struct otx2_nic * priv,struct netlink_ext_ack * extack)487 static int rvu_rep_napi_init(struct otx2_nic *priv,
488 struct netlink_ext_ack *extack)
489 {
490 struct otx2_qset *qset = &priv->qset;
491 struct otx2_cq_poll *cq_poll = NULL;
492 struct otx2_hw *hw = &priv->hw;
493 int err = 0, qidx, vec;
494 char *irq_name;
495
496 qset->napi = kcalloc(hw->cint_cnt, sizeof(*cq_poll), GFP_KERNEL);
497 if (!qset->napi)
498 return -ENOMEM;
499
500 /* Register NAPI handler */
501 for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
502 cq_poll = &qset->napi[qidx];
503 cq_poll->cint_idx = qidx;
504 cq_poll->cq_ids[CQ_RX] =
505 (qidx < hw->rx_queues) ? qidx : CINT_INVALID_CQ;
506 cq_poll->cq_ids[CQ_TX] = (qidx < hw->tx_queues) ?
507 qidx + hw->rx_queues :
508 CINT_INVALID_CQ;
509 cq_poll->cq_ids[CQ_XDP] = CINT_INVALID_CQ;
510 cq_poll->cq_ids[CQ_QOS] = CINT_INVALID_CQ;
511
512 cq_poll->dev = (void *)priv;
513 netif_napi_add(priv->reps[qidx]->netdev, &cq_poll->napi,
514 otx2_napi_handler);
515 napi_enable(&cq_poll->napi);
516 }
517 /* Register CQ IRQ handlers */
518 vec = hw->nix_msixoff + NIX_LF_CINT_VEC_START;
519 for (qidx = 0; qidx < hw->cint_cnt; qidx++) {
520 irq_name = &hw->irq_name[vec * NAME_SIZE];
521
522 snprintf(irq_name, NAME_SIZE, "rep%d-rxtx-%d", qidx, qidx);
523
524 err = request_irq(pci_irq_vector(priv->pdev, vec),
525 otx2_cq_intr_handler, 0, irq_name,
526 &qset->napi[qidx]);
527 if (err) {
528 NL_SET_ERR_MSG_FMT_MOD(extack,
529 "RVU REP IRQ registration failed for CQ%d",
530 qidx);
531 goto err_free_cints;
532 }
533 vec++;
534
535 /* Enable CQ IRQ */
536 otx2_write64(priv, NIX_LF_CINTX_INT(qidx), BIT_ULL(0));
537 otx2_write64(priv, NIX_LF_CINTX_ENA_W1S(qidx), BIT_ULL(0));
538 }
539 priv->flags &= ~OTX2_FLAG_INTF_DOWN;
540 return 0;
541
542 err_free_cints:
543 otx2_free_cints(priv, qidx);
544 otx2_disable_napi(priv);
545 return err;
546 }
547
rvu_rep_free_cq_rsrc(struct otx2_nic * priv)548 static void rvu_rep_free_cq_rsrc(struct otx2_nic *priv)
549 {
550 struct otx2_qset *qset = &priv->qset;
551 struct otx2_cq_poll *cq_poll = NULL;
552 int qidx, vec;
553
554 /* Cleanup CQ NAPI and IRQ */
555 vec = priv->hw.nix_msixoff + NIX_LF_CINT_VEC_START;
556 for (qidx = 0; qidx < priv->hw.cint_cnt; qidx++) {
557 /* Disable interrupt */
558 otx2_write64(priv, NIX_LF_CINTX_ENA_W1C(qidx), BIT_ULL(0));
559
560 synchronize_irq(pci_irq_vector(priv->pdev, vec));
561
562 cq_poll = &qset->napi[qidx];
563 napi_synchronize(&cq_poll->napi);
564 vec++;
565 }
566 otx2_free_cints(priv, priv->hw.cint_cnt);
567 otx2_disable_napi(priv);
568 }
569
rvu_rep_rsrc_free(struct otx2_nic * priv)570 static void rvu_rep_rsrc_free(struct otx2_nic *priv)
571 {
572 struct otx2_qset *qset = &priv->qset;
573 struct delayed_work *work;
574 int wrk;
575
576 for (wrk = 0; wrk < priv->qset.cq_cnt; wrk++) {
577 work = &priv->refill_wrk[wrk].pool_refill_work;
578 cancel_delayed_work_sync(work);
579 }
580 devm_kfree(priv->dev, priv->refill_wrk);
581
582 otx2_free_hw_resources(priv);
583 otx2_free_queue_mem(qset);
584 }
585
rvu_rep_rsrc_init(struct otx2_nic * priv)586 static int rvu_rep_rsrc_init(struct otx2_nic *priv)
587 {
588 struct otx2_qset *qset = &priv->qset;
589 int err;
590
591 err = otx2_alloc_queue_mem(priv);
592 if (err)
593 return err;
594
595 priv->hw.max_mtu = otx2_get_max_mtu(priv);
596 priv->tx_max_pktlen = priv->hw.max_mtu + OTX2_ETH_HLEN;
597 priv->rbsize = ALIGN(priv->hw.rbuf_len, OTX2_ALIGN) + OTX2_HEAD_ROOM;
598
599 err = otx2_init_hw_resources(priv);
600 if (err)
601 goto err_free_rsrc;
602
603 /* Set maximum frame size allowed in HW */
604 err = otx2_hw_set_mtu(priv, priv->hw.max_mtu);
605 if (err) {
606 dev_err(priv->dev, "Failed to set HW MTU\n");
607 goto err_free_rsrc;
608 }
609 return 0;
610
611 err_free_rsrc:
612 otx2_free_hw_resources(priv);
613 otx2_free_queue_mem(qset);
614 return err;
615 }
616
rvu_rep_destroy(struct otx2_nic * priv)617 void rvu_rep_destroy(struct otx2_nic *priv)
618 {
619 struct rep_dev *rep;
620 int rep_id;
621
622 rvu_eswitch_config(priv, false);
623 priv->flags |= OTX2_FLAG_INTF_DOWN;
624 rvu_rep_free_cq_rsrc(priv);
625 for (rep_id = 0; rep_id < priv->rep_cnt; rep_id++) {
626 rep = priv->reps[rep_id];
627 unregister_netdev(rep->netdev);
628 rvu_rep_devlink_port_unregister(rep);
629 free_netdev(rep->netdev);
630 kfree(rep->flow_cfg);
631 }
632 kfree(priv->reps);
633 rvu_rep_rsrc_free(priv);
634 }
635
rvu_rep_create(struct otx2_nic * priv,struct netlink_ext_ack * extack)636 int rvu_rep_create(struct otx2_nic *priv, struct netlink_ext_ack *extack)
637 {
638 int rep_cnt = priv->rep_cnt;
639 struct net_device *ndev;
640 struct rep_dev *rep;
641 int rep_id, err;
642 u16 pcifunc;
643
644 err = rvu_rep_rsrc_init(priv);
645 if (err)
646 return -ENOMEM;
647
648 priv->reps = kcalloc(rep_cnt, sizeof(struct rep_dev *), GFP_KERNEL);
649 if (!priv->reps)
650 return -ENOMEM;
651
652 for (rep_id = 0; rep_id < rep_cnt; rep_id++) {
653 ndev = alloc_etherdev(sizeof(*rep));
654 if (!ndev) {
655 NL_SET_ERR_MSG_FMT_MOD(extack,
656 "PFVF representor:%d creation failed",
657 rep_id);
658 err = -ENOMEM;
659 goto exit;
660 }
661
662 rep = netdev_priv(ndev);
663 priv->reps[rep_id] = rep;
664 rep->mdev = priv;
665 rep->netdev = ndev;
666 rep->rep_id = rep_id;
667
668 ndev->min_mtu = OTX2_MIN_MTU;
669 ndev->max_mtu = priv->hw.max_mtu;
670 ndev->netdev_ops = &rvu_rep_netdev_ops;
671 pcifunc = priv->rep_pf_map[rep_id];
672 rep->pcifunc = pcifunc;
673
674 snprintf(ndev->name, sizeof(ndev->name), "Rpf%dvf%d",
675 rvu_get_pf(pcifunc), (pcifunc & RVU_PFVF_FUNC_MASK));
676
677 ndev->hw_features = (NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
678 NETIF_F_IPV6_CSUM | NETIF_F_RXHASH |
679 NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6);
680
681 ndev->hw_features |= NETIF_F_HW_TC;
682 ndev->features |= ndev->hw_features;
683 eth_hw_addr_random(ndev);
684 err = rvu_rep_devlink_port_register(rep);
685 if (err) {
686 free_netdev(ndev);
687 goto exit;
688 }
689
690 SET_NETDEV_DEVLINK_PORT(ndev, &rep->dl_port);
691 err = register_netdev(ndev);
692 if (err) {
693 NL_SET_ERR_MSG_MOD(extack,
694 "PFVF representor registration failed");
695 rvu_rep_devlink_port_unregister(rep);
696 free_netdev(ndev);
697 goto exit;
698 }
699
700 INIT_DELAYED_WORK(&rep->stats_wrk, rvu_rep_get_stats);
701 }
702 err = rvu_rep_napi_init(priv, extack);
703 if (err)
704 goto exit;
705
706 rvu_eswitch_config(priv, true);
707 return 0;
708 exit:
709 while (--rep_id >= 0) {
710 rep = priv->reps[rep_id];
711 unregister_netdev(rep->netdev);
712 rvu_rep_devlink_port_unregister(rep);
713 free_netdev(rep->netdev);
714 }
715 kfree(priv->reps);
716 rvu_rep_rsrc_free(priv);
717 return err;
718 }
719
rvu_get_rep_cnt(struct otx2_nic * priv)720 static int rvu_get_rep_cnt(struct otx2_nic *priv)
721 {
722 struct get_rep_cnt_rsp *rsp;
723 struct mbox_msghdr *msghdr;
724 struct msg_req *req;
725 int err, rep;
726
727 mutex_lock(&priv->mbox.lock);
728 req = otx2_mbox_alloc_msg_get_rep_cnt(&priv->mbox);
729 if (!req) {
730 mutex_unlock(&priv->mbox.lock);
731 return -ENOMEM;
732 }
733 err = otx2_sync_mbox_msg(&priv->mbox);
734 if (err)
735 goto exit;
736
737 msghdr = otx2_mbox_get_rsp(&priv->mbox.mbox, 0, &req->hdr);
738 if (IS_ERR(msghdr)) {
739 err = PTR_ERR(msghdr);
740 goto exit;
741 }
742
743 rsp = (struct get_rep_cnt_rsp *)msghdr;
744 priv->hw.tx_queues = rsp->rep_cnt;
745 priv->hw.rx_queues = rsp->rep_cnt;
746 priv->rep_cnt = rsp->rep_cnt;
747 for (rep = 0; rep < priv->rep_cnt; rep++)
748 priv->rep_pf_map[rep] = rsp->rep_pf_map[rep];
749
750 exit:
751 mutex_unlock(&priv->mbox.lock);
752 return err;
753 }
754
rvu_rep_probe(struct pci_dev * pdev,const struct pci_device_id * id)755 static int rvu_rep_probe(struct pci_dev *pdev, const struct pci_device_id *id)
756 {
757 struct device *dev = &pdev->dev;
758 struct otx2_nic *priv;
759 struct otx2_hw *hw;
760 int err;
761
762 err = pcim_enable_device(pdev);
763 if (err) {
764 dev_err(dev, "Failed to enable PCI device\n");
765 return err;
766 }
767
768 err = pci_request_regions(pdev, DRV_NAME);
769 if (err) {
770 dev_err(dev, "PCI request regions failed 0x%x\n", err);
771 return err;
772 }
773
774 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
775 if (err) {
776 dev_err(dev, "DMA mask config failed, abort\n");
777 goto err_release_regions;
778 }
779
780 pci_set_master(pdev);
781
782 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
783 if (!priv) {
784 err = -ENOMEM;
785 goto err_release_regions;
786 }
787
788 pci_set_drvdata(pdev, priv);
789 priv->pdev = pdev;
790 priv->dev = dev;
791 priv->flags |= OTX2_FLAG_INTF_DOWN;
792 priv->flags |= OTX2_FLAG_REP_MODE_ENABLED;
793
794 hw = &priv->hw;
795 hw->pdev = pdev;
796 hw->max_queues = OTX2_MAX_CQ_CNT;
797 hw->rbuf_len = OTX2_DEFAULT_RBUF_LEN;
798 hw->xqe_size = 128;
799
800 err = otx2_init_rsrc(pdev, priv);
801 if (err)
802 goto err_release_regions;
803
804 priv->iommu_domain = iommu_get_domain_for_dev(dev);
805
806 err = rvu_get_rep_cnt(priv);
807 if (err)
808 goto err_detach_rsrc;
809
810 err = otx2_register_dl(priv);
811 if (err)
812 goto err_detach_rsrc;
813
814 return 0;
815
816 err_detach_rsrc:
817 if (priv->hw.lmt_info)
818 free_percpu(priv->hw.lmt_info);
819 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
820 qmem_free(priv->dev, priv->dync_lmt);
821 otx2_detach_resources(&priv->mbox);
822 otx2_disable_mbox_intr(priv);
823 otx2_pfaf_mbox_destroy(priv);
824 pci_free_irq_vectors(pdev);
825 err_release_regions:
826 pci_set_drvdata(pdev, NULL);
827 pci_release_regions(pdev);
828 return err;
829 }
830
rvu_rep_remove(struct pci_dev * pdev)831 static void rvu_rep_remove(struct pci_dev *pdev)
832 {
833 struct otx2_nic *priv = pci_get_drvdata(pdev);
834
835 otx2_unregister_dl(priv);
836 if (!(priv->flags & OTX2_FLAG_INTF_DOWN))
837 rvu_rep_destroy(priv);
838 otx2_detach_resources(&priv->mbox);
839 if (priv->hw.lmt_info)
840 free_percpu(priv->hw.lmt_info);
841 if (test_bit(CN10K_LMTST, &priv->hw.cap_flag))
842 qmem_free(priv->dev, priv->dync_lmt);
843 otx2_disable_mbox_intr(priv);
844 otx2_pfaf_mbox_destroy(priv);
845 pci_free_irq_vectors(priv->pdev);
846 pci_set_drvdata(pdev, NULL);
847 pci_release_regions(pdev);
848 }
849
850 static struct pci_driver rvu_rep_driver = {
851 .name = DRV_NAME,
852 .id_table = rvu_rep_id_table,
853 .probe = rvu_rep_probe,
854 .remove = rvu_rep_remove,
855 .shutdown = rvu_rep_remove,
856 };
857
rvu_rep_init_module(void)858 static int __init rvu_rep_init_module(void)
859 {
860 return pci_register_driver(&rvu_rep_driver);
861 }
862
rvu_rep_cleanup_module(void)863 static void __exit rvu_rep_cleanup_module(void)
864 {
865 pci_unregister_driver(&rvu_rep_driver);
866 }
867
868 module_init(rvu_rep_init_module);
869 module_exit(rvu_rep_cleanup_module);
870