1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) Meta Platforms, Inc. and affiliates. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/ipv6.h>
6 #include <linux/types.h>
7 #include <net/netdev_queues.h>
8
9 #include "fbnic.h"
10 #include "fbnic_netdev.h"
11 #include "fbnic_txrx.h"
12
__fbnic_open(struct fbnic_net * fbn)13 int __fbnic_open(struct fbnic_net *fbn)
14 {
15 struct fbnic_dev *fbd = fbn->fbd;
16 int err;
17
18 err = fbnic_alloc_napi_vectors(fbn);
19 if (err)
20 return err;
21
22 err = fbnic_alloc_resources(fbn);
23 if (err)
24 goto free_napi_vectors;
25
26 err = fbnic_set_netif_queues(fbn);
27 if (err)
28 goto free_resources;
29
30 /* Send ownership message and flush to verify FW has seen it */
31 err = fbnic_fw_xmit_ownership_msg(fbd, true);
32 if (err) {
33 dev_warn(fbd->dev,
34 "Error %d sending host ownership message to the firmware\n",
35 err);
36 goto free_resources;
37 }
38
39 err = fbnic_time_start(fbn);
40 if (err)
41 goto release_ownership;
42
43 err = fbnic_fw_init_heartbeat(fbd, false);
44 if (err)
45 goto time_stop;
46
47 err = fbnic_pcs_irq_enable(fbd);
48 if (err)
49 goto time_stop;
50 /* Pull the BMC config and initialize the RPC */
51 fbnic_bmc_rpc_init(fbd);
52 fbnic_rss_reinit(fbd, fbn);
53
54 return 0;
55 time_stop:
56 fbnic_time_stop(fbn);
57 release_ownership:
58 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
59 free_resources:
60 fbnic_free_resources(fbn);
61 free_napi_vectors:
62 fbnic_free_napi_vectors(fbn);
63 return err;
64 }
65
fbnic_open(struct net_device * netdev)66 static int fbnic_open(struct net_device *netdev)
67 {
68 struct fbnic_net *fbn = netdev_priv(netdev);
69 int err;
70
71 fbnic_napi_name_irqs(fbn->fbd);
72
73 err = __fbnic_open(fbn);
74 if (!err)
75 fbnic_up(fbn);
76
77 return err;
78 }
79
fbnic_stop(struct net_device * netdev)80 static int fbnic_stop(struct net_device *netdev)
81 {
82 struct fbnic_net *fbn = netdev_priv(netdev);
83
84 fbnic_down(fbn);
85 fbnic_pcs_irq_disable(fbn->fbd);
86
87 fbnic_time_stop(fbn);
88 fbnic_fw_xmit_ownership_msg(fbn->fbd, false);
89
90 fbnic_reset_netif_queues(fbn);
91 fbnic_free_resources(fbn);
92 fbnic_free_napi_vectors(fbn);
93
94 return 0;
95 }
96
fbnic_uc_sync(struct net_device * netdev,const unsigned char * addr)97 static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr)
98 {
99 struct fbnic_net *fbn = netdev_priv(netdev);
100 struct fbnic_mac_addr *avail_addr;
101
102 if (WARN_ON(!is_valid_ether_addr(addr)))
103 return -EADDRNOTAVAIL;
104
105 avail_addr = __fbnic_uc_sync(fbn->fbd, addr);
106 if (!avail_addr)
107 return -ENOSPC;
108
109 /* Add type flag indicating this address is in use by the host */
110 set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam);
111
112 return 0;
113 }
114
fbnic_uc_unsync(struct net_device * netdev,const unsigned char * addr)115 static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr)
116 {
117 struct fbnic_net *fbn = netdev_priv(netdev);
118 struct fbnic_dev *fbd = fbn->fbd;
119 int i, ret;
120
121 /* Scan from middle of list to bottom, filling bottom up.
122 * Skip the first entry which is reserved for dev_addr and
123 * leave the last entry to use for promiscuous filtering.
124 */
125 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
126 i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) {
127 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
128
129 if (!ether_addr_equal(mac_addr->value.addr8, addr))
130 continue;
131
132 ret = __fbnic_uc_unsync(mac_addr);
133 }
134
135 return ret;
136 }
137
fbnic_mc_sync(struct net_device * netdev,const unsigned char * addr)138 static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr)
139 {
140 struct fbnic_net *fbn = netdev_priv(netdev);
141 struct fbnic_mac_addr *avail_addr;
142
143 if (WARN_ON(!is_multicast_ether_addr(addr)))
144 return -EADDRNOTAVAIL;
145
146 avail_addr = __fbnic_mc_sync(fbn->fbd, addr);
147 if (!avail_addr)
148 return -ENOSPC;
149
150 /* Add type flag indicating this address is in use by the host */
151 set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam);
152
153 return 0;
154 }
155
fbnic_mc_unsync(struct net_device * netdev,const unsigned char * addr)156 static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr)
157 {
158 struct fbnic_net *fbn = netdev_priv(netdev);
159 struct fbnic_dev *fbd = fbn->fbd;
160 int i, ret;
161
162 /* Scan from middle of list to top, filling top down.
163 * Skip over the address reserved for the BMC MAC and
164 * exclude index 0 as that belongs to the broadcast address
165 */
166 for (i = fbd->mac_addr_boundary, ret = -ENOENT;
167 --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) {
168 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i];
169
170 if (!ether_addr_equal(mac_addr->value.addr8, addr))
171 continue;
172
173 ret = __fbnic_mc_unsync(mac_addr);
174 }
175
176 return ret;
177 }
178
__fbnic_set_rx_mode(struct net_device * netdev)179 void __fbnic_set_rx_mode(struct net_device *netdev)
180 {
181 struct fbnic_net *fbn = netdev_priv(netdev);
182 bool uc_promisc = false, mc_promisc = false;
183 struct fbnic_dev *fbd = fbn->fbd;
184 struct fbnic_mac_addr *mac_addr;
185 int err;
186
187 /* Populate host address from dev_addr */
188 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX];
189 if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) ||
190 mac_addr->state != FBNIC_TCAM_S_VALID) {
191 ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr);
192 mac_addr->state = FBNIC_TCAM_S_UPDATE;
193 set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam);
194 }
195
196 /* Populate broadcast address if broadcast is enabled */
197 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX];
198 if (netdev->flags & IFF_BROADCAST) {
199 if (!is_broadcast_ether_addr(mac_addr->value.addr8) ||
200 mac_addr->state != FBNIC_TCAM_S_VALID) {
201 eth_broadcast_addr(mac_addr->value.addr8);
202 mac_addr->state = FBNIC_TCAM_S_ADD;
203 }
204 set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam);
205 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
206 __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST);
207 }
208
209 /* Synchronize unicast and multicast address lists */
210 err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync);
211 if (err == -ENOSPC)
212 uc_promisc = true;
213 err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync);
214 if (err == -ENOSPC)
215 mc_promisc = true;
216
217 uc_promisc |= !!(netdev->flags & IFF_PROMISC);
218 mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc;
219
220 /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */
221 mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX];
222 if (uc_promisc) {
223 if (!is_zero_ether_addr(mac_addr->value.addr8) ||
224 mac_addr->state != FBNIC_TCAM_S_VALID) {
225 eth_zero_addr(mac_addr->value.addr8);
226 eth_broadcast_addr(mac_addr->mask.addr8);
227 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
228 mac_addr->act_tcam);
229 set_bit(FBNIC_MAC_ADDR_T_PROMISC,
230 mac_addr->act_tcam);
231 mac_addr->state = FBNIC_TCAM_S_ADD;
232 }
233 } else if (mc_promisc &&
234 (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) {
235 /* We have to add a special handler for multicast as the
236 * BMC may have an all-multi rule already in place. As such
237 * adding a rule ourselves won't do any good so we will have
238 * to modify the rules for the ALL MULTI below if the BMC
239 * already has the rule in place.
240 */
241 if (!is_multicast_ether_addr(mac_addr->value.addr8) ||
242 mac_addr->state != FBNIC_TCAM_S_VALID) {
243 eth_zero_addr(mac_addr->value.addr8);
244 eth_broadcast_addr(mac_addr->mask.addr8);
245 mac_addr->value.addr8[0] ^= 1;
246 mac_addr->mask.addr8[0] ^= 1;
247 set_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
248 mac_addr->act_tcam);
249 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
250 mac_addr->act_tcam);
251 mac_addr->state = FBNIC_TCAM_S_ADD;
252 }
253 } else if (mac_addr->state == FBNIC_TCAM_S_VALID) {
254 if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) {
255 clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI,
256 mac_addr->act_tcam);
257 clear_bit(FBNIC_MAC_ADDR_T_PROMISC,
258 mac_addr->act_tcam);
259 } else {
260 mac_addr->state = FBNIC_TCAM_S_DELETE;
261 }
262 }
263
264 /* Add rules for BMC all multicast if it is enabled */
265 fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc);
266
267 /* Sift out any unshared BMC rules and place them in BMC only section */
268 fbnic_sift_macda(fbd);
269
270 /* Write updates to hardware */
271 fbnic_write_rules(fbd);
272 fbnic_write_macda(fbd);
273 fbnic_write_tce_tcam(fbd);
274 }
275
fbnic_set_rx_mode(struct net_device * netdev)276 static void fbnic_set_rx_mode(struct net_device *netdev)
277 {
278 /* No need to update the hardware if we are not running */
279 if (netif_running(netdev))
280 __fbnic_set_rx_mode(netdev);
281 }
282
fbnic_set_mac(struct net_device * netdev,void * p)283 static int fbnic_set_mac(struct net_device *netdev, void *p)
284 {
285 struct sockaddr *addr = p;
286
287 if (!is_valid_ether_addr(addr->sa_data))
288 return -EADDRNOTAVAIL;
289
290 eth_hw_addr_set(netdev, addr->sa_data);
291
292 fbnic_set_rx_mode(netdev);
293
294 return 0;
295 }
296
fbnic_clear_rx_mode(struct net_device * netdev)297 void fbnic_clear_rx_mode(struct net_device *netdev)
298 {
299 struct fbnic_net *fbn = netdev_priv(netdev);
300 struct fbnic_dev *fbd = fbn->fbd;
301 int idx;
302
303 for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) {
304 struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx];
305
306 if (mac_addr->state != FBNIC_TCAM_S_VALID)
307 continue;
308
309 bitmap_clear(mac_addr->act_tcam,
310 FBNIC_MAC_ADDR_T_HOST_START,
311 FBNIC_MAC_ADDR_T_HOST_LEN);
312
313 if (bitmap_empty(mac_addr->act_tcam,
314 FBNIC_RPC_TCAM_ACT_NUM_ENTRIES))
315 mac_addr->state = FBNIC_TCAM_S_DELETE;
316 }
317
318 /* Write updates to hardware */
319 fbnic_write_macda(fbd);
320
321 __dev_uc_unsync(netdev, NULL);
322 __dev_mc_unsync(netdev, NULL);
323 }
324
fbnic_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)325 static int fbnic_hwtstamp_get(struct net_device *netdev,
326 struct kernel_hwtstamp_config *config)
327 {
328 struct fbnic_net *fbn = netdev_priv(netdev);
329
330 *config = fbn->hwtstamp_config;
331
332 return 0;
333 }
334
fbnic_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)335 static int fbnic_hwtstamp_set(struct net_device *netdev,
336 struct kernel_hwtstamp_config *config,
337 struct netlink_ext_ack *extack)
338 {
339 struct fbnic_net *fbn = netdev_priv(netdev);
340 int old_rx_filter;
341
342 if (config->source != HWTSTAMP_SOURCE_NETDEV)
343 return -EOPNOTSUPP;
344
345 if (!kernel_hwtstamp_config_changed(config, &fbn->hwtstamp_config))
346 return 0;
347
348 /* Upscale the filters */
349 switch (config->rx_filter) {
350 case HWTSTAMP_FILTER_NONE:
351 case HWTSTAMP_FILTER_ALL:
352 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
353 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
354 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
355 case HWTSTAMP_FILTER_PTP_V2_EVENT:
356 break;
357 case HWTSTAMP_FILTER_NTP_ALL:
358 config->rx_filter = HWTSTAMP_FILTER_ALL;
359 break;
360 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
361 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
362 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
363 break;
364 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
365 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
366 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
367 break;
368 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
369 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
370 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT;
371 break;
372 case HWTSTAMP_FILTER_PTP_V2_SYNC:
373 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
374 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
375 break;
376 default:
377 return -ERANGE;
378 }
379
380 /* Configure */
381 old_rx_filter = fbn->hwtstamp_config.rx_filter;
382 memcpy(&fbn->hwtstamp_config, config, sizeof(*config));
383
384 if (old_rx_filter != config->rx_filter && netif_running(fbn->netdev)) {
385 fbnic_rss_reinit(fbn->fbd, fbn);
386 fbnic_write_rules(fbn->fbd);
387 }
388
389 /* Save / report back filter configuration
390 * Note that our filter configuration is inexact. Instead of
391 * filtering for a specific UDP port or L2 Ethertype we are
392 * filtering in all UDP or all non-IP packets for timestamping. So
393 * if anything other than FILTER_ALL is requested we report
394 * FILTER_SOME indicating that we will be timestamping a few
395 * additional packets.
396 */
397 if (config->rx_filter > HWTSTAMP_FILTER_ALL)
398 config->rx_filter = HWTSTAMP_FILTER_SOME;
399
400 return 0;
401 }
402
fbnic_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats64)403 static void fbnic_get_stats64(struct net_device *dev,
404 struct rtnl_link_stats64 *stats64)
405 {
406 u64 tx_bytes, tx_packets, tx_dropped = 0;
407 u64 rx_bytes, rx_packets, rx_dropped = 0;
408 struct fbnic_net *fbn = netdev_priv(dev);
409 struct fbnic_queue_stats *stats;
410 unsigned int start, i;
411
412 stats = &fbn->tx_stats;
413
414 tx_bytes = stats->bytes;
415 tx_packets = stats->packets;
416 tx_dropped = stats->dropped;
417
418 stats64->tx_bytes = tx_bytes;
419 stats64->tx_packets = tx_packets;
420 stats64->tx_dropped = tx_dropped;
421
422 for (i = 0; i < fbn->num_tx_queues; i++) {
423 struct fbnic_ring *txr = fbn->tx[i];
424
425 if (!txr)
426 continue;
427
428 stats = &txr->stats;
429 do {
430 start = u64_stats_fetch_begin(&stats->syncp);
431 tx_bytes = stats->bytes;
432 tx_packets = stats->packets;
433 tx_dropped = stats->dropped;
434 } while (u64_stats_fetch_retry(&stats->syncp, start));
435
436 stats64->tx_bytes += tx_bytes;
437 stats64->tx_packets += tx_packets;
438 stats64->tx_dropped += tx_dropped;
439 }
440
441 stats = &fbn->rx_stats;
442
443 rx_bytes = stats->bytes;
444 rx_packets = stats->packets;
445 rx_dropped = stats->dropped;
446
447 stats64->rx_bytes = rx_bytes;
448 stats64->rx_packets = rx_packets;
449 stats64->rx_dropped = rx_dropped;
450
451 for (i = 0; i < fbn->num_rx_queues; i++) {
452 struct fbnic_ring *rxr = fbn->rx[i];
453
454 if (!rxr)
455 continue;
456
457 stats = &rxr->stats;
458 do {
459 start = u64_stats_fetch_begin(&stats->syncp);
460 rx_bytes = stats->bytes;
461 rx_packets = stats->packets;
462 rx_dropped = stats->dropped;
463 } while (u64_stats_fetch_retry(&stats->syncp, start));
464
465 stats64->rx_bytes += rx_bytes;
466 stats64->rx_packets += rx_packets;
467 stats64->rx_dropped += rx_dropped;
468 }
469 }
470
471 static const struct net_device_ops fbnic_netdev_ops = {
472 .ndo_open = fbnic_open,
473 .ndo_stop = fbnic_stop,
474 .ndo_validate_addr = eth_validate_addr,
475 .ndo_start_xmit = fbnic_xmit_frame,
476 .ndo_features_check = fbnic_features_check,
477 .ndo_set_mac_address = fbnic_set_mac,
478 .ndo_set_rx_mode = fbnic_set_rx_mode,
479 .ndo_get_stats64 = fbnic_get_stats64,
480 .ndo_hwtstamp_get = fbnic_hwtstamp_get,
481 .ndo_hwtstamp_set = fbnic_hwtstamp_set,
482 };
483
fbnic_get_queue_stats_rx(struct net_device * dev,int idx,struct netdev_queue_stats_rx * rx)484 static void fbnic_get_queue_stats_rx(struct net_device *dev, int idx,
485 struct netdev_queue_stats_rx *rx)
486 {
487 struct fbnic_net *fbn = netdev_priv(dev);
488 struct fbnic_ring *rxr = fbn->rx[idx];
489 struct fbnic_queue_stats *stats;
490 unsigned int start;
491 u64 bytes, packets;
492
493 if (!rxr)
494 return;
495
496 stats = &rxr->stats;
497 do {
498 start = u64_stats_fetch_begin(&stats->syncp);
499 bytes = stats->bytes;
500 packets = stats->packets;
501 } while (u64_stats_fetch_retry(&stats->syncp, start));
502
503 rx->bytes = bytes;
504 rx->packets = packets;
505 }
506
fbnic_get_queue_stats_tx(struct net_device * dev,int idx,struct netdev_queue_stats_tx * tx)507 static void fbnic_get_queue_stats_tx(struct net_device *dev, int idx,
508 struct netdev_queue_stats_tx *tx)
509 {
510 struct fbnic_net *fbn = netdev_priv(dev);
511 struct fbnic_ring *txr = fbn->tx[idx];
512 struct fbnic_queue_stats *stats;
513 unsigned int start;
514 u64 bytes, packets;
515
516 if (!txr)
517 return;
518
519 stats = &txr->stats;
520 do {
521 start = u64_stats_fetch_begin(&stats->syncp);
522 bytes = stats->bytes;
523 packets = stats->packets;
524 } while (u64_stats_fetch_retry(&stats->syncp, start));
525
526 tx->bytes = bytes;
527 tx->packets = packets;
528 }
529
fbnic_get_base_stats(struct net_device * dev,struct netdev_queue_stats_rx * rx,struct netdev_queue_stats_tx * tx)530 static void fbnic_get_base_stats(struct net_device *dev,
531 struct netdev_queue_stats_rx *rx,
532 struct netdev_queue_stats_tx *tx)
533 {
534 struct fbnic_net *fbn = netdev_priv(dev);
535
536 tx->bytes = fbn->tx_stats.bytes;
537 tx->packets = fbn->tx_stats.packets;
538
539 rx->bytes = fbn->rx_stats.bytes;
540 rx->packets = fbn->rx_stats.packets;
541 }
542
543 static const struct netdev_stat_ops fbnic_stat_ops = {
544 .get_queue_stats_rx = fbnic_get_queue_stats_rx,
545 .get_queue_stats_tx = fbnic_get_queue_stats_tx,
546 .get_base_stats = fbnic_get_base_stats,
547 };
548
fbnic_reset_queues(struct fbnic_net * fbn,unsigned int tx,unsigned int rx)549 void fbnic_reset_queues(struct fbnic_net *fbn,
550 unsigned int tx, unsigned int rx)
551 {
552 struct fbnic_dev *fbd = fbn->fbd;
553 unsigned int max_napis;
554
555 max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS;
556
557 tx = min(tx, max_napis);
558 fbn->num_tx_queues = tx;
559
560 rx = min(rx, max_napis);
561 fbn->num_rx_queues = rx;
562
563 fbn->num_napi = max(tx, rx);
564 }
565
566 /**
567 * fbnic_netdev_free - Free the netdev associate with fbnic
568 * @fbd: Driver specific structure to free netdev from
569 *
570 * Allocate and initialize the netdev and netdev private structure. Bind
571 * together the hardware, netdev, and pci data structures.
572 **/
fbnic_netdev_free(struct fbnic_dev * fbd)573 void fbnic_netdev_free(struct fbnic_dev *fbd)
574 {
575 struct fbnic_net *fbn = netdev_priv(fbd->netdev);
576
577 if (fbn->phylink)
578 phylink_destroy(fbn->phylink);
579
580 free_netdev(fbd->netdev);
581 fbd->netdev = NULL;
582 }
583
584 /**
585 * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic
586 * @fbd: Driver specific structure to associate netdev with
587 *
588 * Allocate and initialize the netdev and netdev private structure. Bind
589 * together the hardware, netdev, and pci data structures.
590 *
591 * Return: 0 on success, negative on failure
592 **/
fbnic_netdev_alloc(struct fbnic_dev * fbd)593 struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd)
594 {
595 struct net_device *netdev;
596 struct fbnic_net *fbn;
597 int default_queues;
598
599 netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS);
600 if (!netdev)
601 return NULL;
602
603 SET_NETDEV_DEV(netdev, fbd->dev);
604 fbd->netdev = netdev;
605
606 netdev->netdev_ops = &fbnic_netdev_ops;
607 netdev->stat_ops = &fbnic_stat_ops;
608
609 fbnic_set_ethtool_ops(netdev);
610
611 fbn = netdev_priv(netdev);
612
613 fbn->netdev = netdev;
614 fbn->fbd = fbd;
615
616 fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT;
617 fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT;
618 fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT;
619 fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT;
620
621 default_queues = netif_get_num_default_rss_queues();
622 if (default_queues > fbd->max_num_queues)
623 default_queues = fbd->max_num_queues;
624
625 fbnic_reset_queues(fbn, default_queues, default_queues);
626
627 fbnic_reset_indir_tbl(fbn);
628 fbnic_rss_key_fill(fbn->rss_key);
629 fbnic_rss_init_en_mask(fbn);
630
631 netdev->features |=
632 NETIF_F_RXHASH |
633 NETIF_F_SG |
634 NETIF_F_HW_CSUM |
635 NETIF_F_RXCSUM;
636
637 netdev->hw_features |= netdev->features;
638 netdev->vlan_features |= netdev->features;
639 netdev->hw_enc_features |= netdev->features;
640
641 netdev->min_mtu = IPV6_MIN_MTU;
642 netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN;
643
644 /* TBD: This is workaround for BMC as phylink doesn't have support
645 * for leavling the link enabled if a BMC is present.
646 */
647 netdev->ethtool->wol_enabled = true;
648
649 fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS;
650 fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2;
651 netif_carrier_off(netdev);
652
653 netif_tx_stop_all_queues(netdev);
654
655 if (fbnic_phylink_init(netdev)) {
656 fbnic_netdev_free(fbd);
657 return NULL;
658 }
659
660 return netdev;
661 }
662
fbnic_dsn_to_mac_addr(u64 dsn,char * addr)663 static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr)
664 {
665 addr[0] = (dsn >> 56) & 0xFF;
666 addr[1] = (dsn >> 48) & 0xFF;
667 addr[2] = (dsn >> 40) & 0xFF;
668 addr[3] = (dsn >> 16) & 0xFF;
669 addr[4] = (dsn >> 8) & 0xFF;
670 addr[5] = dsn & 0xFF;
671
672 return is_valid_ether_addr(addr) ? 0 : -EINVAL;
673 }
674
675 /**
676 * fbnic_netdev_register - Initialize general software structures
677 * @netdev: Netdev containing structure to initialize and register
678 *
679 * Initialize the MAC address for the netdev and register it.
680 *
681 * Return: 0 on success, negative on failure
682 **/
fbnic_netdev_register(struct net_device * netdev)683 int fbnic_netdev_register(struct net_device *netdev)
684 {
685 struct fbnic_net *fbn = netdev_priv(netdev);
686 struct fbnic_dev *fbd = fbn->fbd;
687 u64 dsn = fbd->dsn;
688 u8 addr[ETH_ALEN];
689 int err;
690
691 err = fbnic_dsn_to_mac_addr(dsn, addr);
692 if (!err) {
693 ether_addr_copy(netdev->perm_addr, addr);
694 eth_hw_addr_set(netdev, addr);
695 } else {
696 /* A randomly assigned MAC address will cause provisioning
697 * issues so instead just fail to spawn the netdev and
698 * avoid any confusion.
699 */
700 dev_err(fbd->dev, "MAC addr %pM invalid\n", addr);
701 return err;
702 }
703
704 return register_netdev(netdev);
705 }
706
fbnic_netdev_unregister(struct net_device * netdev)707 void fbnic_netdev_unregister(struct net_device *netdev)
708 {
709 unregister_netdev(netdev);
710 }
711