Lines Matching +full:mss +full:- +full:supply

1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018-2023, Intel Corporation. */
44 static int debug = -1;
56 * ice_hw_to_dev - Get device pointer from the hardware structure
67 return &pf->pdev->dev; in ice_hw_to_dev()
90 return dev && (dev->netdev_ops == &ice_netdev_ops || in netif_is_ice()
91 dev->netdev_ops == &ice_netdev_safe_mode_ops); in netif_is_ice()
95 * ice_get_tx_pending - returns number of Tx descriptors not processed
102 head = ring->next_to_clean; in ice_get_tx_pending()
103 tail = ring->next_to_use; in ice_get_tx_pending()
107 tail - head : (tail + ring->count - head); in ice_get_tx_pending()
112 * ice_check_for_hang_subtask - check for and recover hung queues
124 if (pf->vsi[v] && pf->vsi[v]->type == ICE_VSI_PF) { in ice_check_for_hang_subtask()
125 vsi = pf->vsi[v]; in ice_check_for_hang_subtask()
129 if (!vsi || test_bit(ICE_VSI_DOWN, vsi->state)) in ice_check_for_hang_subtask()
132 if (!(vsi->netdev && netif_carrier_ok(vsi->netdev))) in ice_check_for_hang_subtask()
135 hw = &vsi->back->hw; in ice_check_for_hang_subtask()
138 struct ice_tx_ring *tx_ring = vsi->tx_rings[i]; in ice_check_for_hang_subtask()
146 ring_stats = tx_ring->ring_stats; in ice_check_for_hang_subtask()
150 if (tx_ring->desc) { in ice_check_for_hang_subtask()
158 packets = ring_stats->stats.pkts & INT_MAX; in ice_check_for_hang_subtask()
159 if (ring_stats->tx_stats.prev_pkt == packets) { in ice_check_for_hang_subtask()
161 ice_trigger_sw_intr(hw, tx_ring->q_vector); in ice_check_for_hang_subtask()
169 ring_stats->tx_stats.prev_pkt = in ice_check_for_hang_subtask()
170 ice_get_tx_pending(tx_ring) ? packets : -1; in ice_check_for_hang_subtask()
176 * ice_init_mac_fltr - Set initial MAC filters
190 return -EINVAL; in ice_init_mac_fltr()
192 perm_addr = vsi->port_info->mac.perm_addr; in ice_init_mac_fltr()
197 * ice_add_mac_to_sync_list - creates list of MAC addresses to be synced
209 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_sync_list()
211 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_sync_list, addr, in ice_add_mac_to_sync_list()
213 return -EINVAL; in ice_add_mac_to_sync_list()
219 * ice_add_mac_to_unsync_list - creates list of MAC addresses to be unsynced
231 struct ice_vsi *vsi = np->vsi; in ice_add_mac_to_unsync_list()
238 if (ether_addr_equal(addr, netdev->dev_addr)) in ice_add_mac_to_unsync_list()
241 if (ice_fltr_add_mac_to_list(vsi, &vsi->tmp_unsync_list, addr, in ice_add_mac_to_unsync_list()
243 return -EINVAL; in ice_add_mac_to_unsync_list()
249 * ice_vsi_fltr_changed - check if filter state changed
256 return test_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state) || in ice_vsi_fltr_changed()
257 test_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_fltr_changed()
261 * ice_set_promisc - Enable promiscuous mode for a given PF
270 if (vsi->type != ICE_VSI_PF) in ice_set_promisc()
275 status = ice_fltr_set_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_set_promisc()
278 status = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_set_promisc()
281 if (status && status != -EEXIST) in ice_set_promisc()
284 netdev_dbg(vsi->netdev, "set promisc filter bits for VSI %i: 0x%x\n", in ice_set_promisc()
285 vsi->vsi_num, promisc_m); in ice_set_promisc()
290 * ice_clear_promisc - Disable promiscuous mode for a given PF
299 if (vsi->type != ICE_VSI_PF) in ice_clear_promisc()
304 status = ice_fltr_clear_vlan_vsi_promisc(&vsi->back->hw, vsi, in ice_clear_promisc()
307 status = ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_clear_promisc()
311 netdev_dbg(vsi->netdev, "clear promisc filter bits for VSI %i: 0x%x\n", in ice_clear_promisc()
312 vsi->vsi_num, promisc_m); in ice_clear_promisc()
317 * ice_vsi_sync_fltr - Update the VSI filter list to the HW
325 struct device *dev = ice_pf_to_dev(vsi->back); in ice_vsi_sync_fltr()
326 struct net_device *netdev = vsi->netdev; in ice_vsi_sync_fltr()
328 struct ice_pf *pf = vsi->back; in ice_vsi_sync_fltr()
329 struct ice_hw *hw = &pf->hw; in ice_vsi_sync_fltr()
333 if (!vsi->netdev) in ice_vsi_sync_fltr()
334 return -EINVAL; in ice_vsi_sync_fltr()
336 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vsi_sync_fltr()
339 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags; in ice_vsi_sync_fltr()
340 vsi->current_netdev_flags = vsi->netdev->flags; in ice_vsi_sync_fltr()
342 INIT_LIST_HEAD(&vsi->tmp_sync_list); in ice_vsi_sync_fltr()
343 INIT_LIST_HEAD(&vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
346 clear_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
347 clear_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
360 err = ice_fltr_remove_mac_list(vsi, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
361 ice_fltr_free_list(dev, &vsi->tmp_unsync_list); in ice_vsi_sync_fltr()
365 if (err == -ENOMEM) in ice_vsi_sync_fltr()
370 err = ice_fltr_add_mac_list(vsi, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
371 ice_fltr_free_list(dev, &vsi->tmp_sync_list); in ice_vsi_sync_fltr()
376 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
382 if (hw->adminq.sq_last_status == ICE_AQ_RC_ENOSPC && in ice_vsi_sync_fltr()
384 vsi->state)) { in ice_vsi_sync_fltr()
387 vsi->vsi_num); in ice_vsi_sync_fltr()
395 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vsi_sync_fltr()
398 vsi->current_netdev_flags &= ~IFF_ALLMULTI; in ice_vsi_sync_fltr()
402 /* !(vsi->current_netdev_flags & IFF_ALLMULTI) */ in ice_vsi_sync_fltr()
405 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vsi_sync_fltr()
412 test_bit(ICE_VSI_PROMISC_CHANGED, vsi->state)) { in ice_vsi_sync_fltr()
413 clear_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
414 if (vsi->current_netdev_flags & IFF_PROMISC) { in ice_vsi_sync_fltr()
416 if (!ice_is_dflt_vsi_in_use(vsi->port_info)) { in ice_vsi_sync_fltr()
418 if (err && err != -EEXIST) { in ice_vsi_sync_fltr()
420 err, vsi->vsi_num); in ice_vsi_sync_fltr()
421 vsi->current_netdev_flags &= in ice_vsi_sync_fltr()
426 vlan_ops->dis_rx_filtering(vsi); in ice_vsi_sync_fltr()
444 err, vsi->vsi_num); in ice_vsi_sync_fltr()
445 vsi->current_netdev_flags |= in ice_vsi_sync_fltr()
449 if (vsi->netdev->features & in ice_vsi_sync_fltr()
451 vlan_ops->ena_rx_filtering(vsi); in ice_vsi_sync_fltr()
457 if (!(vsi->current_netdev_flags & IFF_ALLMULTI)) { in ice_vsi_sync_fltr()
462 err, vsi->vsi_num); in ice_vsi_sync_fltr()
470 set_bit(ICE_VSI_PROMISC_CHANGED, vsi->state); in ice_vsi_sync_fltr()
474 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
475 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_vsi_sync_fltr()
477 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vsi_sync_fltr()
482 * ice_sync_fltr_subtask - Sync the VSI filter list with HW
489 if (!pf || !(test_bit(ICE_FLAG_FLTR_SYNC, pf->flags))) in ice_sync_fltr_subtask()
492 clear_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
495 if (pf->vsi[v] && ice_vsi_fltr_changed(pf->vsi[v]) && in ice_sync_fltr_subtask()
496 ice_vsi_sync_fltr(pf->vsi[v])) { in ice_sync_fltr_subtask()
498 set_bit(ICE_FLAG_FLTR_SYNC, pf->flags); in ice_sync_fltr_subtask()
504 * ice_pf_dis_all_vsi - Pause all VSIs on a PF
514 if (pf->vsi[v]) in ice_pf_dis_all_vsi()
515 ice_dis_vsi(pf->vsi[v], locked); in ice_pf_dis_all_vsi()
518 pf->pf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
521 pf->vf_agg_node[node].num_vsis = 0; in ice_pf_dis_all_vsi()
525 * ice_prepare_for_reset - prep for reset
534 struct ice_hw *hw = &pf->hw; in ice_prepare_for_reset()
542 if (test_bit(ICE_PREPARED_FOR_RESET, pf->state)) in ice_prepare_for_reset()
545 synchronize_irq(pf->oicr_irq.virq); in ice_prepare_for_reset()
550 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_reset()
554 mutex_lock(&pf->vfs.table_lock); in ice_prepare_for_reset()
557 mutex_unlock(&pf->vfs.table_lock); in ice_prepare_for_reset()
561 ice_eswitch_br_fdb_flush(pf->eswitch.br_offloads->bridge); in ice_prepare_for_reset()
573 vsi->orig_rss_size = 0; in ice_prepare_for_reset()
575 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_prepare_for_reset()
577 vsi->old_ena_tc = vsi->all_enatc; in ice_prepare_for_reset()
578 vsi->old_numtc = vsi->all_numtc; in ice_prepare_for_reset()
585 vsi->old_ena_tc = 0; in ice_prepare_for_reset()
586 vsi->all_enatc = 0; in ice_prepare_for_reset()
587 vsi->old_numtc = 0; in ice_prepare_for_reset()
588 vsi->all_numtc = 0; in ice_prepare_for_reset()
589 vsi->req_txq = 0; in ice_prepare_for_reset()
590 vsi->req_rxq = 0; in ice_prepare_for_reset()
591 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_prepare_for_reset()
592 memset(&vsi->mqprio_qopt, 0, sizeof(vsi->mqprio_qopt)); in ice_prepare_for_reset()
596 if (vsi->netdev) in ice_prepare_for_reset()
597 netif_device_detach(vsi->netdev); in ice_prepare_for_reset()
603 set_bit(ICE_VSI_REBUILD_PENDING, ice_get_main_vsi(pf)->state); in ice_prepare_for_reset()
606 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_prepare_for_reset()
612 if (hw->port_info) in ice_prepare_for_reset()
613 ice_sched_clear_port(hw->port_info); in ice_prepare_for_reset()
617 set_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_prepare_for_reset()
621 * ice_do_reset - Initiate one of many types of resets
628 struct ice_hw *hw = &pf->hw; in ice_do_reset()
632 if (pf->lag && pf->lag->bonded && reset_type == ICE_RESET_PFR) { in ice_do_reset()
642 set_bit(ICE_RESET_FAILED, pf->state); in ice_do_reset()
643 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_do_reset()
644 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
645 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
646 clear_bit(ICE_CORER_REQ, pf->state); in ice_do_reset()
647 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_do_reset()
648 wake_up(&pf->reset_wait_queue); in ice_do_reset()
653 * interrupt. So for PFR, rebuild after the reset and clear the reset- in ice_do_reset()
657 pf->pfr_count++; in ice_do_reset()
659 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_do_reset()
660 clear_bit(ICE_PFR_REQ, pf->state); in ice_do_reset()
661 wake_up(&pf->reset_wait_queue); in ice_do_reset()
667 * ice_reset_subtask - Set up for resetting the device and driver
676 * of reset is pending and sets bits in pf->state indicating the reset in ice_reset_subtask()
678 * prepare for pending reset if not already (for PF software-initiated in ice_reset_subtask()
684 if (test_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_reset_subtask()
686 if (test_and_clear_bit(ICE_CORER_RECV, pf->state)) in ice_reset_subtask()
688 if (test_and_clear_bit(ICE_GLOBR_RECV, pf->state)) in ice_reset_subtask()
690 if (test_and_clear_bit(ICE_EMPR_RECV, pf->state)) in ice_reset_subtask()
698 if (ice_check_reset(&pf->hw)) { in ice_reset_subtask()
699 set_bit(ICE_RESET_FAILED, pf->state); in ice_reset_subtask()
702 pf->hw.reset_ongoing = false; in ice_reset_subtask()
707 clear_bit(ICE_RESET_OICR_RECV, pf->state); in ice_reset_subtask()
708 clear_bit(ICE_PREPARED_FOR_RESET, pf->state); in ice_reset_subtask()
709 clear_bit(ICE_PFR_REQ, pf->state); in ice_reset_subtask()
710 clear_bit(ICE_CORER_REQ, pf->state); in ice_reset_subtask()
711 clear_bit(ICE_GLOBR_REQ, pf->state); in ice_reset_subtask()
712 wake_up(&pf->reset_wait_queue); in ice_reset_subtask()
720 if (test_bit(ICE_PFR_REQ, pf->state)) { in ice_reset_subtask()
722 if (pf->lag && pf->lag->bonded) { in ice_reset_subtask()
727 if (test_bit(ICE_CORER_REQ, pf->state)) in ice_reset_subtask()
729 if (test_bit(ICE_GLOBR_REQ, pf->state)) in ice_reset_subtask()
736 if (!test_bit(ICE_DOWN, pf->state) && in ice_reset_subtask()
737 !test_bit(ICE_CFG_BUSY, pf->state)) { in ice_reset_subtask()
743 * ice_print_topo_conflict - print topology conflict message
748 switch (vsi->port_info->phy.link_info.topo_media_conflict) { in ice_print_topo_conflict()
754 …netdev_info(vsi->netdev, "Potential misconfiguration of the Ethernet port detected. If it was not … in ice_print_topo_conflict()
757 if (test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, vsi->back->flags)) in ice_print_topo_conflict()
758 …netdev_warn(vsi->netdev, "An unsupported module type was detected. Refer to the Intel(R) Ethernet … in ice_print_topo_conflict()
760 …netdev_err(vsi->netdev, "Rx/Tx is disabled on this device because an unsupported module type was d… in ice_print_topo_conflict()
768 * ice_print_link_msg - print link up or down message
786 if (vsi->current_isup == isup) in ice_print_link_msg()
789 vsi->current_isup = isup; in ice_print_link_msg()
792 netdev_info(vsi->netdev, "NIC Link is Down\n"); in ice_print_link_msg()
796 switch (vsi->port_info->phy.link_info.link_speed) { in ice_print_link_msg()
835 switch (vsi->port_info->fc.current_mode) { in ice_print_link_msg()
854 switch (vsi->port_info->phy.link_info.fec_info) { in ice_print_link_msg()
857 fec = "RS-FEC"; in ice_print_link_msg()
860 fec = "FC-FEC/BASE-R"; in ice_print_link_msg()
868 if (vsi->port_info->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) in ice_print_link_msg()
881 status = ice_aq_get_phy_caps(vsi->port_info, false, in ice_print_link_msg()
884 netdev_info(vsi->netdev, "Get phy capability failed.\n"); in ice_print_link_msg()
888 if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ || in ice_print_link_msg()
889 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ) in ice_print_link_msg()
890 fec_req = "RS-FEC"; in ice_print_link_msg()
891 else if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ || in ice_print_link_msg()
892 caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ) in ice_print_link_msg()
893 fec_req = "FC-FEC/BASE-R"; in ice_print_link_msg()
900 …netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s,… in ice_print_link_msg()
906 * ice_vsi_link_event - update the VSI's netdev
915 if (test_bit(ICE_VSI_DOWN, vsi->state) || !vsi->netdev) in ice_vsi_link_event()
918 if (vsi->type == ICE_VSI_PF) { in ice_vsi_link_event()
919 if (link_up == netif_carrier_ok(vsi->netdev)) in ice_vsi_link_event()
923 netif_carrier_on(vsi->netdev); in ice_vsi_link_event()
924 netif_tx_wake_all_queues(vsi->netdev); in ice_vsi_link_event()
926 netif_carrier_off(vsi->netdev); in ice_vsi_link_event()
927 netif_tx_stop_all_queues(vsi->netdev); in ice_vsi_link_event()
933 * ice_set_dflt_mib - send a default config MIB to the FW
949 struct ice_hw *hw = &pf->hw; in ice_set_dflt_mib()
964 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
967 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
969 buf = tlv->tlvinfo; in ice_set_dflt_mib()
972 /* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0. in ice_set_dflt_mib()
973 * Octets 5 - 12 are BW values, set octet 5 to 100% BW. in ice_set_dflt_mib()
974 * Octets 13 - 20 are TSA values - leave as zeros in ice_set_dflt_mib()
980 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
983 buf = tlv->tlvinfo; in ice_set_dflt_mib()
984 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
988 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
991 * Octets 1 - 4 map UP to TC - all UPs map to zero in ice_set_dflt_mib()
992 * Octets 5 - 12 are BW values - set TC 0 to 100%. in ice_set_dflt_mib()
993 * Octets 13 - 20 are TSA value - leave as zeros in ice_set_dflt_mib()
998 ((char *)tlv + sizeof(tlv->typelen) + len); in ice_set_dflt_mib()
1003 tlv->typelen = htons(typelen); in ice_set_dflt_mib()
1007 tlv->ouisubtype = htonl(ouisubtype); in ice_set_dflt_mib()
1009 /* Octet 1 left as all zeros - PFC disabled */ in ice_set_dflt_mib()
1021 * ice_check_phy_fw_load - check if PHY FW load failed
1030 clear_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1034 if (test_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags)) in ice_check_phy_fw_load()
1039 set_bit(ICE_FLAG_PHY_FW_LOAD_FAILED, pf->flags); in ice_check_phy_fw_load()
1056 clear_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1063 if (test_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags)) in ice_check_module_power()
1068 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1070 …dev_err(ice_pf_to_dev(pf), "The module's power requirements exceed the device's power supply. Cann… in ice_check_module_power()
1071 set_bit(ICE_FLAG_MOD_POWER_UNSUPPORTED, pf->flags); in ice_check_module_power()
1076 * ice_check_link_cfg_err - check if link configuration failed
1090 * ice_link_event - process the link event
1109 phy_info = &pi->phy; in ice_link_event()
1110 phy_info->link_info_old = phy_info->link_info; in ice_link_event()
1112 old_link = !!(phy_info->link_info_old.link_info & ICE_AQ_LINK_UP); in ice_link_event()
1113 old_link_speed = phy_info->link_info_old.link_speed; in ice_link_event()
1115 /* update the link info structures and re-enable link events, in ice_link_event()
1121 pi->lport, status, in ice_link_event()
1122 ice_aq_str(pi->hw->adminq.sq_last_status)); in ice_link_event()
1124 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_link_event()
1129 if (phy_info->link_info.link_info & ICE_AQ_LINK_UP) in ice_link_event()
1133 if (!vsi || !vsi->port_info) in ice_link_event()
1134 return -EINVAL; in ice_link_event()
1137 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags) && in ice_link_event()
1138 !(pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) { in ice_link_event()
1139 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_link_event()
1150 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_link_event()
1165 * ice_watchdog_subtask - periodic tasks not using event driven scheduling
1173 if (test_bit(ICE_DOWN, pf->state) || in ice_watchdog_subtask()
1174 test_bit(ICE_CFG_BUSY, pf->state)) in ice_watchdog_subtask()
1179 pf->serv_tmr_prev + pf->serv_tmr_period)) in ice_watchdog_subtask()
1182 pf->serv_tmr_prev = jiffies; in ice_watchdog_subtask()
1189 if (pf->vsi[i] && pf->vsi[i]->netdev) in ice_watchdog_subtask()
1190 ice_update_vsi_stats(pf->vsi[i]); in ice_watchdog_subtask()
1194 * ice_init_link_events - enable/initialize link events
1197 * Returns -EIO on failure, 0 on success
1207 if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) { in ice_init_link_events()
1208 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n", in ice_init_link_events()
1209 pi->lport); in ice_init_link_events()
1210 return -EIO; in ice_init_link_events()
1214 dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n", in ice_init_link_events()
1215 pi->lport); in ice_init_link_events()
1216 return -EIO; in ice_init_link_events()
1223 * ice_handle_link_event - handle link event via ARQ
1234 link_data = (struct ice_aqc_get_link_status_data *)event->msg_buf; in ice_handle_link_event()
1235 port_info = pf->hw.port_info; in ice_handle_link_event()
1237 return -EINVAL; in ice_handle_link_event()
1240 !!(link_data->link_info & ICE_AQ_LINK_UP), in ice_handle_link_event()
1241 le16_to_cpu(link_data->link_speed)); in ice_handle_link_event()
1250 * ice_get_fwlog_data - copy the FW log data from ARQ event
1258 struct ice_hw *hw = &pf->hw; in ice_get_fwlog_data()
1260 fwlog = &hw->fwlog_ring.rings[hw->fwlog_ring.tail]; in ice_get_fwlog_data()
1262 memset(fwlog->data, 0, PAGE_SIZE); in ice_get_fwlog_data()
1263 fwlog->data_size = le16_to_cpu(event->desc.datalen); in ice_get_fwlog_data()
1265 memcpy(fwlog->data, event->msg_buf, fwlog->data_size); in ice_get_fwlog_data()
1266 ice_fwlog_ring_increment(&hw->fwlog_ring.tail, hw->fwlog_ring.size); in ice_get_fwlog_data()
1268 if (ice_fwlog_ring_full(&hw->fwlog_ring)) { in ice_get_fwlog_data()
1270 ice_fwlog_ring_increment(&hw->fwlog_ring.head, in ice_get_fwlog_data()
1271 hw->fwlog_ring.size); in ice_get_fwlog_data()
1276 * ice_aq_prep_for_event - Prepare to wait for an AdminQ event from firmware
1287 * To obtain only the descriptor contents, pass an task->event with null
1289 * task->event.msg_buf with enough space ahead of time.
1294 INIT_HLIST_NODE(&task->entry); in ice_aq_prep_for_event()
1295 task->opcode = opcode; in ice_aq_prep_for_event()
1296 task->state = ICE_AQ_TASK_WAITING; in ice_aq_prep_for_event()
1298 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1299 hlist_add_head(&task->entry, &pf->aq_wait_list); in ice_aq_prep_for_event()
1300 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_prep_for_event()
1304 * ice_aq_wait_for_event - Wait for an AdminQ event from firmware
1318 enum ice_aq_task_state *state = &task->state; in ice_aq_wait_for_event()
1324 ret = wait_event_interruptible_timeout(pf->aq_wait_queue, in ice_aq_wait_for_event()
1330 err = -EINVAL; in ice_aq_wait_for_event()
1333 err = ret < 0 ? ret : -ETIMEDOUT; in ice_aq_wait_for_event()
1336 err = ret < 0 ? ret : -ECANCELED; in ice_aq_wait_for_event()
1343 err = -EINVAL; in ice_aq_wait_for_event()
1348 jiffies_to_msecs(jiffies - start), in ice_aq_wait_for_event()
1350 task->opcode); in ice_aq_wait_for_event()
1352 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1353 hlist_del(&task->entry); in ice_aq_wait_for_event()
1354 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_wait_for_event()
1360 * ice_aq_check_events - Check if any thread is waiting for an AdminQ event
1371 * Note that event->msg_buf will only be duplicated if the event has a buffer
1384 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1385 hlist_for_each_entry(task, &pf->aq_wait_list, entry) { in ice_aq_check_events()
1386 if (task->state != ICE_AQ_TASK_WAITING) in ice_aq_check_events()
1388 if (task->opcode != opcode) in ice_aq_check_events()
1391 task_ev = &task->event; in ice_aq_check_events()
1392 memcpy(&task_ev->desc, &event->desc, sizeof(event->desc)); in ice_aq_check_events()
1393 task_ev->msg_len = event->msg_len; in ice_aq_check_events()
1396 if (task_ev->msg_buf && task_ev->buf_len >= event->buf_len) { in ice_aq_check_events()
1397 memcpy(task_ev->msg_buf, event->msg_buf, in ice_aq_check_events()
1398 event->buf_len); in ice_aq_check_events()
1399 task_ev->buf_len = event->buf_len; in ice_aq_check_events()
1402 task->state = ICE_AQ_TASK_COMPLETE; in ice_aq_check_events()
1405 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_check_events()
1408 wake_up(&pf->aq_wait_queue); in ice_aq_check_events()
1412 * ice_aq_cancel_waiting_tasks - Immediately cancel all waiting tasks
1416 * This will then cause ice_aq_wait_for_event to exit with -ECANCELED.
1422 spin_lock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1423 hlist_for_each_entry(task, &pf->aq_wait_list, entry) in ice_aq_cancel_waiting_tasks()
1424 task->state = ICE_AQ_TASK_CANCELED; in ice_aq_cancel_waiting_tasks()
1425 spin_unlock_bh(&pf->aq_wait_lock); in ice_aq_cancel_waiting_tasks()
1427 wake_up(&pf->aq_wait_queue); in ice_aq_cancel_waiting_tasks()
1433 * __ice_clean_ctrlq - helper function to clean controlq rings
1441 struct ice_hw *hw = &pf->hw; in __ice_clean_ctrlq()
1448 if (test_bit(ICE_RESET_FAILED, pf->state)) in __ice_clean_ctrlq()
1453 cq = &hw->adminq; in __ice_clean_ctrlq()
1457 cq = &hw->sbq; in __ice_clean_ctrlq()
1461 cq = &hw->mailboxq; in __ice_clean_ctrlq()
1466 hw->mbx_snapshot.mbx_buf.state = ICE_MAL_VF_DETECT_STATE_NEW_SNAPSHOT; in __ice_clean_ctrlq()
1473 /* check for error indications - PF_xx_AxQLEN register layout for in __ice_clean_ctrlq()
1476 val = rd32(hw, cq->rq.len); in __ice_clean_ctrlq()
1493 wr32(hw, cq->rq.len, val); in __ice_clean_ctrlq()
1496 val = rd32(hw, cq->sq.len); in __ice_clean_ctrlq()
1513 wr32(hw, cq->sq.len, val); in __ice_clean_ctrlq()
1516 event.buf_len = cq->rq_buf_size; in __ice_clean_ctrlq()
1527 if (ret == -EALREADY) in __ice_clean_ctrlq()
1553 u16 val = hw->mailboxq.num_rq_entries; in __ice_clean_ctrlq()
1586 * ice_ctrlq_pending - check if there is a difference between ntc and ntu
1596 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask); in ice_ctrlq_pending()
1597 return cq->rq.next_to_clean != ntu; in ice_ctrlq_pending()
1601 * ice_clean_adminq_subtask - clean the AdminQ rings
1606 struct ice_hw *hw = &pf->hw; in ice_clean_adminq_subtask()
1608 if (!test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_clean_adminq_subtask()
1614 clear_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_clean_adminq_subtask()
1621 if (ice_ctrlq_pending(hw, &hw->adminq)) in ice_clean_adminq_subtask()
1628 * ice_clean_mailboxq_subtask - clean the MailboxQ rings
1633 struct ice_hw *hw = &pf->hw; in ice_clean_mailboxq_subtask()
1635 if (!test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state)) in ice_clean_mailboxq_subtask()
1641 clear_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_clean_mailboxq_subtask()
1643 if (ice_ctrlq_pending(hw, &hw->mailboxq)) in ice_clean_mailboxq_subtask()
1650 * ice_clean_sbq_subtask - clean the Sideband Queue rings
1655 struct ice_hw *hw = &pf->hw; in ice_clean_sbq_subtask()
1661 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1665 if (!test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state)) in ice_clean_sbq_subtask()
1671 clear_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_clean_sbq_subtask()
1673 if (ice_ctrlq_pending(hw, &hw->sbq)) in ice_clean_sbq_subtask()
1680 * ice_service_task_schedule - schedule the service task to wake up
1687 if (!test_bit(ICE_SERVICE_DIS, pf->state) && in ice_service_task_schedule()
1688 !test_and_set_bit(ICE_SERVICE_SCHED, pf->state) && in ice_service_task_schedule()
1689 !test_bit(ICE_NEEDS_RESTART, pf->state)) in ice_service_task_schedule()
1690 queue_work(ice_wq, &pf->serv_task); in ice_service_task_schedule()
1694 * ice_service_task_complete - finish up the service task
1699 WARN_ON(!test_bit(ICE_SERVICE_SCHED, pf->state)); in ice_service_task_complete()
1701 /* force memory (pf->state) to sync before next service task */ in ice_service_task_complete()
1703 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_complete()
1707 * ice_service_task_stop - stop service task and cancel works
1717 ret = test_and_set_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_stop()
1719 if (pf->serv_tmr.function) in ice_service_task_stop()
1720 del_timer_sync(&pf->serv_tmr); in ice_service_task_stop()
1721 if (pf->serv_task.func) in ice_service_task_stop()
1722 cancel_work_sync(&pf->serv_task); in ice_service_task_stop()
1724 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_service_task_stop()
1729 * ice_service_task_restart - restart service task and schedule works
1736 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_service_task_restart()
1741 * ice_service_timer - timer callback to schedule service task
1748 mod_timer(&pf->serv_tmr, round_jiffies(pf->serv_tmr_period + jiffies)); in ice_service_timer()
1753 * ice_mdd_maybe_reset_vf - reset VF after MDD event
1761 * mdd-auto-reset-vf.
1768 if (!test_bit(ICE_FLAG_MDD_AUTO_RESET_VF, pf->flags)) in ice_mdd_maybe_reset_vf()
1780 dev_info(dev, "PF-to-VF reset on PF %d VF %d due to MDD event\n", in ice_mdd_maybe_reset_vf()
1781 pf->hw.pf_id, vf->vf_id); in ice_mdd_maybe_reset_vf()
1786 * ice_handle_mdd_event - handle malicious driver detect event
1793 * private flag mdd-auto-reset-vf.
1798 struct ice_hw *hw = &pf->hw; in ice_handle_mdd_event()
1803 if (!test_and_clear_bit(ICE_MDD_EVENT_PENDING, pf->state)) { in ice_handle_mdd_event()
1882 mutex_lock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1886 reg = rd32(hw, VP_MDET_TX_PQM(vf->vf_id)); in ice_handle_mdd_event()
1888 wr32(hw, VP_MDET_TX_PQM(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1889 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1890 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1893 vf->vf_id); in ice_handle_mdd_event()
1898 reg = rd32(hw, VP_MDET_TX_TCLAN(vf->vf_id)); in ice_handle_mdd_event()
1900 wr32(hw, VP_MDET_TX_TCLAN(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1901 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1902 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1905 vf->vf_id); in ice_handle_mdd_event()
1910 reg = rd32(hw, VP_MDET_TX_TDPU(vf->vf_id)); in ice_handle_mdd_event()
1912 wr32(hw, VP_MDET_TX_TDPU(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1913 vf->mdd_tx_events.count++; in ice_handle_mdd_event()
1914 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1917 vf->vf_id); in ice_handle_mdd_event()
1922 reg = rd32(hw, VP_MDET_RX(vf->vf_id)); in ice_handle_mdd_event()
1924 wr32(hw, VP_MDET_RX(vf->vf_id), 0xFFFF); in ice_handle_mdd_event()
1925 vf->mdd_rx_events.count++; in ice_handle_mdd_event()
1926 set_bit(ICE_MDD_VF_PRINT_PENDING, pf->state); in ice_handle_mdd_event()
1929 vf->vf_id); in ice_handle_mdd_event()
1938 mutex_unlock(&pf->vfs.table_lock); in ice_handle_mdd_event()
1944 * ice_force_phys_link_state - Force the physical link state
1963 if (!vsi || !vsi->port_info || !vsi->back) in ice_force_phys_link_state()
1964 return -EINVAL; in ice_force_phys_link_state()
1965 if (vsi->type != ICE_VSI_PF) in ice_force_phys_link_state()
1968 dev = ice_pf_to_dev(vsi->back); in ice_force_phys_link_state()
1970 pi = vsi->port_info; in ice_force_phys_link_state()
1974 return -ENOMEM; in ice_force_phys_link_state()
1980 vsi->vsi_num, retcode); in ice_force_phys_link_state()
1981 retcode = -EIO; in ice_force_phys_link_state()
1986 if (link_up == !!(pcaps->caps & ICE_AQC_PHY_EN_LINK) && in ice_force_phys_link_state()
1987 link_up == !!(pi->phy.link_info.link_info & ICE_AQ_LINK_UP)) in ice_force_phys_link_state()
1994 cfg = kmemdup(&pi->phy.curr_user_phy_cfg, sizeof(*cfg), GFP_KERNEL); in ice_force_phys_link_state()
1996 retcode = -ENOMEM; in ice_force_phys_link_state()
2000 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT; in ice_force_phys_link_state()
2002 cfg->caps |= ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
2004 cfg->caps &= ~ICE_AQ_PHY_ENA_LINK; in ice_force_phys_link_state()
2006 retcode = ice_aq_set_phy_cfg(&vsi->back->hw, pi, cfg, NULL); in ice_force_phys_link_state()
2009 vsi->vsi_num, retcode); in ice_force_phys_link_state()
2010 retcode = -EIO; in ice_force_phys_link_state()
2020 * ice_init_nvm_phy_type - Initialize the NVM PHY type
2028 struct ice_pf *pf = pi->hw->back; in ice_init_nvm_phy_type()
2033 return -ENOMEM; in ice_init_nvm_phy_type()
2043 pf->nvm_phy_type_hi = pcaps->phy_type_high; in ice_init_nvm_phy_type()
2044 pf->nvm_phy_type_lo = pcaps->phy_type_low; in ice_init_nvm_phy_type()
2052 * ice_init_link_dflt_override - Initialize link default override
2060 struct ice_pf *pf = pi->hw->back; in ice_init_link_dflt_override()
2062 ldo = &pf->link_dflt_override; in ice_init_link_dflt_override()
2066 if (!(ldo->options & ICE_LINK_OVERRIDE_PORT_DIS)) in ice_init_link_dflt_override()
2069 /* Enable Total Port Shutdown (override/replace link-down-on-close in ice_init_link_dflt_override()
2072 set_bit(ICE_FLAG_TOTAL_PORT_SHUTDOWN_ENA, pf->flags); in ice_init_link_dflt_override()
2073 set_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags); in ice_init_link_dflt_override()
2077 * ice_init_phy_cfg_dflt_override - Initialize PHY cfg default override settings
2097 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_cfg_dflt_override()
2098 struct ice_pf *pf = pi->hw->back; in ice_init_phy_cfg_dflt_override()
2100 ldo = &pf->link_dflt_override; in ice_init_phy_cfg_dflt_override()
2105 cfg = &phy->curr_user_phy_cfg; in ice_init_phy_cfg_dflt_override()
2107 if (ldo->phy_type_low || ldo->phy_type_high) { in ice_init_phy_cfg_dflt_override()
2108 cfg->phy_type_low = pf->nvm_phy_type_lo & in ice_init_phy_cfg_dflt_override()
2109 cpu_to_le64(ldo->phy_type_low); in ice_init_phy_cfg_dflt_override()
2110 cfg->phy_type_high = pf->nvm_phy_type_hi & in ice_init_phy_cfg_dflt_override()
2111 cpu_to_le64(ldo->phy_type_high); in ice_init_phy_cfg_dflt_override()
2113 cfg->link_fec_opt = ldo->fec_options; in ice_init_phy_cfg_dflt_override()
2114 phy->curr_user_fec_req = ICE_FEC_AUTO; in ice_init_phy_cfg_dflt_override()
2116 set_bit(ICE_LINK_DEFAULT_OVERRIDE_PENDING, pf->state); in ice_init_phy_cfg_dflt_override()
2120 * ice_init_phy_user_cfg - Initialize the PHY user configuration
2136 struct ice_phy_info *phy = &pi->phy; in ice_init_phy_user_cfg()
2137 struct ice_pf *pf = pi->hw->back; in ice_init_phy_user_cfg()
2140 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_init_phy_user_cfg()
2141 return -EIO; in ice_init_phy_user_cfg()
2145 return -ENOMEM; in ice_init_phy_user_cfg()
2147 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_init_phy_user_cfg()
2158 ice_copy_phy_caps_to_cfg(pi, pcaps, &pi->phy.curr_user_phy_cfg); in ice_init_phy_user_cfg()
2161 if (ice_fw_supports_link_override(pi->hw) && in ice_init_phy_user_cfg()
2162 !(pcaps->module_compliance_enforcement & in ice_init_phy_user_cfg()
2164 set_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags); in ice_init_phy_user_cfg()
2170 if (!ice_fw_supports_report_dflt_cfg(pi->hw) && in ice_init_phy_user_cfg()
2171 (pf->link_dflt_override.options & ICE_LINK_OVERRIDE_EN)) { in ice_init_phy_user_cfg()
2180 phy->curr_user_fec_req = ice_caps_to_fec_mode(pcaps->caps, in ice_init_phy_user_cfg()
2181 pcaps->link_fec_options); in ice_init_phy_user_cfg()
2182 phy->curr_user_fc_req = ice_caps_to_fc_mode(pcaps->caps); in ice_init_phy_user_cfg()
2185 phy->curr_user_speed_req = ICE_AQ_LINK_SPEED_M; in ice_init_phy_user_cfg()
2186 set_bit(ICE_PHY_INIT_COMPLETE, pf->state); in ice_init_phy_user_cfg()
2193 * ice_configure_phy - configure PHY
2202 struct device *dev = ice_pf_to_dev(vsi->back); in ice_configure_phy()
2203 struct ice_port_info *pi = vsi->port_info; in ice_configure_phy()
2206 struct ice_phy_info *phy = &pi->phy; in ice_configure_phy()
2207 struct ice_pf *pf = vsi->back; in ice_configure_phy()
2211 if (!(phy->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE)) in ice_configure_phy()
2212 return -ENOMEDIUM; in ice_configure_phy()
2216 if (!test_bit(ICE_FLAG_LINK_LENIENT_MODE_ENA, pf->flags) && in ice_configure_phy()
2217 phy->link_info.topo_media_conflict == ICE_AQ_LINK_TOPO_UNSUPP_MEDIA) in ice_configure_phy()
2218 return -EPERM; in ice_configure_phy()
2220 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) in ice_configure_phy()
2225 return -ENOMEM; in ice_configure_phy()
2232 vsi->vsi_num, err); in ice_configure_phy()
2239 if (pcaps->caps & ICE_AQC_PHY_EN_LINK && in ice_configure_phy()
2240 ice_phy_caps_equals_cfg(pcaps, &phy->curr_user_phy_cfg)) in ice_configure_phy()
2245 if (ice_fw_supports_report_dflt_cfg(pi->hw)) in ice_configure_phy()
2253 vsi->vsi_num, err); in ice_configure_phy()
2259 err = -ENOMEM; in ice_configure_phy()
2265 /* Speed - If default override pending, use curr_user_phy_cfg set in in ice_configure_phy()
2269 vsi->back->state)) { in ice_configure_phy()
2270 cfg->phy_type_low = phy->curr_user_phy_cfg.phy_type_low; in ice_configure_phy()
2271 cfg->phy_type_high = phy->curr_user_phy_cfg.phy_type_high; in ice_configure_phy()
2276 pi->phy.curr_user_speed_req); in ice_configure_phy()
2277 cfg->phy_type_low = pcaps->phy_type_low & cpu_to_le64(phy_low); in ice_configure_phy()
2278 cfg->phy_type_high = pcaps->phy_type_high & in ice_configure_phy()
2283 if (!cfg->phy_type_low && !cfg->phy_type_high) { in ice_configure_phy()
2284 cfg->phy_type_low = pcaps->phy_type_low; in ice_configure_phy()
2285 cfg->phy_type_high = pcaps->phy_type_high; in ice_configure_phy()
2289 ice_cfg_phy_fec(pi, cfg, phy->curr_user_fec_req); in ice_configure_phy()
2292 if (cfg->link_fec_opt != in ice_configure_phy()
2293 (cfg->link_fec_opt & pcaps->link_fec_options)) { in ice_configure_phy()
2294 cfg->caps |= pcaps->caps & ICE_AQC_PHY_EN_AUTO_FEC; in ice_configure_phy()
2295 cfg->link_fec_opt = pcaps->link_fec_options; in ice_configure_phy()
2298 /* Flow Control - always supported; no need to check against in ice_configure_phy()
2301 ice_cfg_phy_fc(pi, cfg, phy->curr_user_fc_req); in ice_configure_phy()
2304 cfg->caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT | ICE_AQ_PHY_ENA_LINK; in ice_configure_phy()
2306 err = ice_aq_set_phy_cfg(&pf->hw, pi, cfg, NULL); in ice_configure_phy()
2309 vsi->vsi_num, err); in ice_configure_phy()
2318 * ice_check_media_subtask - Check for media
2331 if (!test_bit(ICE_FLAG_NO_MEDIA, pf->flags)) in ice_check_media_subtask()
2339 pi = vsi->port_info; in ice_check_media_subtask()
2344 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_check_media_subtask()
2346 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_check_media_subtask()
2347 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) in ice_check_media_subtask()
2353 if (test_bit(ICE_VSI_DOWN, vsi->state) && in ice_check_media_subtask()
2354 test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) in ice_check_media_subtask()
2359 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_check_media_subtask()
2371 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_service_task_recovery_mode()
2376 mod_timer(&pf->serv_tmr, jiffies + msecs_to_jiffies(100)); in ice_service_task_recovery_mode()
2380 * ice_service_task - manage and run subtasks
2388 if (pf->health_reporters.tx_hang_buf.tx_ring) { in ice_service_task()
2390 pf->health_reporters.tx_hang_buf.tx_ring = NULL; in ice_service_task()
2396 if (ice_is_reset_in_progress(pf->state) || in ice_service_task()
2397 test_bit(ICE_SUSPENDED, pf->state) || in ice_service_task()
2398 test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_service_task()
2403 if (test_and_clear_bit(ICE_AUX_ERR_PENDING, pf->state)) { in ice_service_task()
2408 set_bit(IIDC_EVENT_CRIT_ERR, event->type); in ice_service_task()
2410 swap(event->reg, pf->oicr_err_reg); in ice_service_task()
2419 if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) in ice_service_task()
2423 if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) in ice_service_task()
2426 if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { in ice_service_task()
2431 set_bit(IIDC_EVENT_AFTER_MTU_CHANGE, event->type); in ice_service_task()
2462 if (time_after(jiffies, (start_time + pf->serv_tmr_period)) || in ice_service_task()
2463 test_bit(ICE_MDD_EVENT_PENDING, pf->state) || in ice_service_task()
2464 test_bit(ICE_VFLR_EVENT_PENDING, pf->state) || in ice_service_task()
2465 test_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state) || in ice_service_task()
2466 test_bit(ICE_FD_VF_FLUSH_CTX, pf->state) || in ice_service_task()
2467 test_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state) || in ice_service_task()
2468 test_bit(ICE_ADMINQ_EVENT_PENDING, pf->state)) in ice_service_task()
2469 mod_timer(&pf->serv_tmr, jiffies); in ice_service_task()
2473 * ice_set_ctrlq_len - helper function to set controlq length
2478 hw->adminq.num_rq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2479 hw->adminq.num_sq_entries = ICE_AQ_LEN; in ice_set_ctrlq_len()
2480 hw->adminq.rq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2481 hw->adminq.sq_buf_size = ICE_AQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2482 hw->mailboxq.num_rq_entries = PF_MBX_ARQLEN_ARQLEN_M; in ice_set_ctrlq_len()
2483 hw->mailboxq.num_sq_entries = ICE_MBXSQ_LEN; in ice_set_ctrlq_len()
2484 hw->mailboxq.rq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2485 hw->mailboxq.sq_buf_size = ICE_MBXQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2486 hw->sbq.num_rq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2487 hw->sbq.num_sq_entries = ICE_SBQ_LEN; in ice_set_ctrlq_len()
2488 hw->sbq.rq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2489 hw->sbq.sq_buf_size = ICE_SBQ_MAX_BUF_LEN; in ice_set_ctrlq_len()
2493 * ice_schedule_reset - schedule a reset
2502 if (test_bit(ICE_RESET_FAILED, pf->state)) { in ice_schedule_reset()
2504 return -EIO; in ice_schedule_reset()
2507 if (ice_is_reset_in_progress(pf->state)) { in ice_schedule_reset()
2509 return -EBUSY; in ice_schedule_reset()
2514 set_bit(ICE_PFR_REQ, pf->state); in ice_schedule_reset()
2517 set_bit(ICE_CORER_REQ, pf->state); in ice_schedule_reset()
2520 set_bit(ICE_GLOBR_REQ, pf->state); in ice_schedule_reset()
2523 return -EINVAL; in ice_schedule_reset()
2531 * ice_irq_affinity_notify - Callback for affinity changes
2545 cpumask_copy(&q_vector->affinity_mask, mask); in ice_irq_affinity_notify()
2549 * ice_irq_affinity_release - Callback for affinity notifier release
2559 * ice_vsi_ena_irq - Enable IRQ for the given VSI
2564 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_ena_irq()
2568 ice_irq_dynamic_ena(hw, vsi, vsi->q_vectors[i]); in ice_vsi_ena_irq()
2575 * ice_vsi_req_irq_msix - get MSI-X vectors from the OS for the VSI
2581 int q_vectors = vsi->num_q_vectors; in ice_vsi_req_irq_msix()
2582 struct ice_pf *pf = vsi->back; in ice_vsi_req_irq_msix()
2591 struct ice_q_vector *q_vector = vsi->q_vectors[vector]; in ice_vsi_req_irq_msix()
2593 irq_num = q_vector->irq.virq; in ice_vsi_req_irq_msix()
2595 if (q_vector->tx.tx_ring && q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2596 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2597 "%s-%s-%d", basename, "TxRx", rx_int_idx++); in ice_vsi_req_irq_msix()
2599 } else if (q_vector->rx.rx_ring) { in ice_vsi_req_irq_msix()
2600 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2601 "%s-%s-%d", basename, "rx", rx_int_idx++); in ice_vsi_req_irq_msix()
2602 } else if (q_vector->tx.tx_ring) { in ice_vsi_req_irq_msix()
2603 snprintf(q_vector->name, sizeof(q_vector->name) - 1, in ice_vsi_req_irq_msix()
2604 "%s-%s-%d", basename, "tx", tx_int_idx++); in ice_vsi_req_irq_msix()
2609 if (vsi->type == ICE_VSI_CTRL && vsi->vf) in ice_vsi_req_irq_msix()
2610 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2611 IRQF_SHARED, q_vector->name, in ice_vsi_req_irq_msix()
2614 err = devm_request_irq(dev, irq_num, vsi->irq_handler, in ice_vsi_req_irq_msix()
2615 0, q_vector->name, q_vector); in ice_vsi_req_irq_msix()
2617 netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n", in ice_vsi_req_irq_msix()
2626 affinity_notify = &q_vector->affinity_notify; in ice_vsi_req_irq_msix()
2627 affinity_notify->notify = ice_irq_affinity_notify; in ice_vsi_req_irq_msix()
2628 affinity_notify->release = ice_irq_affinity_release; in ice_vsi_req_irq_msix()
2633 irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); in ice_vsi_req_irq_msix()
2638 netdev_err(vsi->netdev, "Failed to setup CPU RMAP on VSI %u: %pe\n", in ice_vsi_req_irq_msix()
2639 vsi->vsi_num, ERR_PTR(err)); in ice_vsi_req_irq_msix()
2643 vsi->irqs_ready = true; in ice_vsi_req_irq_msix()
2647 while (vector--) { in ice_vsi_req_irq_msix()
2648 irq_num = vsi->q_vectors[vector]->irq.virq; in ice_vsi_req_irq_msix()
2652 devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); in ice_vsi_req_irq_msix()
2658 * ice_xdp_alloc_setup_rings - Allocate and setup Tx rings for XDP
2665 struct device *dev = ice_pf_to_dev(vsi->back); in ice_xdp_alloc_setup_rings()
2670 u16 xdp_q_idx = vsi->alloc_txq + i; in ice_xdp_alloc_setup_rings()
2684 xdp_ring->ring_stats = ring_stats; in ice_xdp_alloc_setup_rings()
2685 xdp_ring->q_index = xdp_q_idx; in ice_xdp_alloc_setup_rings()
2686 xdp_ring->reg_idx = vsi->txq_map[xdp_q_idx]; in ice_xdp_alloc_setup_rings()
2687 xdp_ring->vsi = vsi; in ice_xdp_alloc_setup_rings()
2688 xdp_ring->netdev = NULL; in ice_xdp_alloc_setup_rings()
2689 xdp_ring->dev = dev; in ice_xdp_alloc_setup_rings()
2690 xdp_ring->count = vsi->num_tx_desc; in ice_xdp_alloc_setup_rings()
2691 WRITE_ONCE(vsi->xdp_rings[i], xdp_ring); in ice_xdp_alloc_setup_rings()
2695 spin_lock_init(&xdp_ring->tx_lock); in ice_xdp_alloc_setup_rings()
2696 for (j = 0; j < xdp_ring->count; j++) { in ice_xdp_alloc_setup_rings()
2698 tx_desc->cmd_type_offset_bsz = 0; in ice_xdp_alloc_setup_rings()
2705 for (; i >= 0; i--) { in ice_xdp_alloc_setup_rings()
2706 if (vsi->xdp_rings[i] && vsi->xdp_rings[i]->desc) { in ice_xdp_alloc_setup_rings()
2707 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_xdp_alloc_setup_rings()
2708 vsi->xdp_rings[i]->ring_stats = NULL; in ice_xdp_alloc_setup_rings()
2709 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_xdp_alloc_setup_rings()
2712 return -ENOMEM; in ice_xdp_alloc_setup_rings()
2716 * ice_vsi_assign_bpf_prog - set or clear bpf prog pointer on VSI
2725 old_prog = xchg(&vsi->xdp_prog, prog); in ice_vsi_assign_bpf_prog()
2727 WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); in ice_vsi_assign_bpf_prog()
2739 return vsi->xdp_rings[qid % vsi->num_xdp_txq]; in ice_xdp_ring_from_qid()
2741 q_vector = vsi->rx_rings[qid]->q_vector; in ice_xdp_ring_from_qid()
2742 ice_for_each_tx_ring(ring, q_vector->tx) in ice_xdp_ring_from_qid()
2750 * ice_map_xdp_rings - Map XDP rings to interrupt vectors
2758 int xdp_rings_rem = vsi->num_xdp_txq; in ice_map_xdp_rings()
2763 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_map_xdp_rings()
2767 vsi->num_q_vectors - v_idx); in ice_map_xdp_rings()
2768 q_base = vsi->num_xdp_txq - xdp_rings_rem; in ice_map_xdp_rings()
2771 struct ice_tx_ring *xdp_ring = vsi->xdp_rings[q_id]; in ice_map_xdp_rings()
2773 xdp_ring->q_vector = q_vector; in ice_map_xdp_rings()
2774 xdp_ring->next = q_vector->tx.tx_ring; in ice_map_xdp_rings()
2775 q_vector->tx.tx_ring = xdp_ring; in ice_map_xdp_rings()
2777 xdp_rings_rem -= xdp_rings_per_v; in ice_map_xdp_rings()
2781 vsi->rx_rings[q_idx]->xdp_ring = ice_xdp_ring_from_qid(vsi, in ice_map_xdp_rings()
2788 * ice_prepare_xdp_rings - Allocate, configure and setup Tx rings for XDP
2799 struct ice_pf *pf = vsi->back; in ice_prepare_xdp_rings()
2801 .qs_mutex = &pf->avail_q_mutex, in ice_prepare_xdp_rings()
2802 .pf_map = pf->avail_txqs, in ice_prepare_xdp_rings()
2803 .pf_map_size = pf->max_pf_txqs, in ice_prepare_xdp_rings()
2804 .q_count = vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2806 .vsi_map = vsi->txq_map, in ice_prepare_xdp_rings()
2807 .vsi_map_offset = vsi->alloc_txq, in ice_prepare_xdp_rings()
2814 vsi->xdp_rings = devm_kcalloc(dev, vsi->num_xdp_txq, in ice_prepare_xdp_rings()
2815 sizeof(*vsi->xdp_rings), GFP_KERNEL); in ice_prepare_xdp_rings()
2816 if (!vsi->xdp_rings) in ice_prepare_xdp_rings()
2817 return -ENOMEM; in ice_prepare_xdp_rings()
2819 vsi->xdp_mapping_mode = xdp_qs_cfg.mapping_mode; in ice_prepare_xdp_rings()
2824 netdev_warn(vsi->netdev, in ice_prepare_xdp_rings()
2842 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_prepare_xdp_rings()
2843 max_txqs[i] = vsi->num_txq + vsi->num_xdp_txq; in ice_prepare_xdp_rings()
2845 status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_prepare_xdp_rings()
2854 * this flow is a subject of both ethtool -L and ndo_bpf flows; in ice_prepare_xdp_rings()
2855 * VSI rebuild that happens under ethtool -L can expose us to in ice_prepare_xdp_rings()
2857 * bpf_prog pointers from vsi->xdp_prog and calling bpf_prog_put in ice_prepare_xdp_rings()
2868 if (vsi->xdp_rings[i]) { in ice_prepare_xdp_rings()
2869 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_prepare_xdp_rings()
2870 vsi->xdp_rings[i] = NULL; in ice_prepare_xdp_rings()
2874 mutex_lock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2876 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_prepare_xdp_rings()
2877 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_prepare_xdp_rings()
2879 mutex_unlock(&pf->avail_q_mutex); in ice_prepare_xdp_rings()
2881 devm_kfree(dev, vsi->xdp_rings); in ice_prepare_xdp_rings()
2882 return -ENOMEM; in ice_prepare_xdp_rings()
2886 * ice_destroy_xdp_rings - undo the configuration made by ice_prepare_xdp_rings
2896 struct ice_pf *pf = vsi->back; in ice_destroy_xdp_rings()
2906 struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; in ice_destroy_xdp_rings()
2909 ice_for_each_tx_ring(ring, q_vector->tx) in ice_destroy_xdp_rings()
2910 if (!ring->tx_buf || !ice_ring_is_xdp(ring)) in ice_destroy_xdp_rings()
2914 q_vector->tx.tx_ring = ring; in ice_destroy_xdp_rings()
2918 mutex_lock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2920 clear_bit(vsi->txq_map[i + vsi->alloc_txq], pf->avail_txqs); in ice_destroy_xdp_rings()
2921 vsi->txq_map[i + vsi->alloc_txq] = ICE_INVAL_Q_INDEX; in ice_destroy_xdp_rings()
2923 mutex_unlock(&pf->avail_q_mutex); in ice_destroy_xdp_rings()
2926 if (vsi->xdp_rings[i]) { in ice_destroy_xdp_rings()
2927 if (vsi->xdp_rings[i]->desc) { in ice_destroy_xdp_rings()
2929 ice_free_tx_ring(vsi->xdp_rings[i]); in ice_destroy_xdp_rings()
2931 kfree_rcu(vsi->xdp_rings[i]->ring_stats, rcu); in ice_destroy_xdp_rings()
2932 vsi->xdp_rings[i]->ring_stats = NULL; in ice_destroy_xdp_rings()
2933 kfree_rcu(vsi->xdp_rings[i], rcu); in ice_destroy_xdp_rings()
2934 vsi->xdp_rings[i] = NULL; in ice_destroy_xdp_rings()
2937 devm_kfree(ice_pf_to_dev(pf), vsi->xdp_rings); in ice_destroy_xdp_rings()
2938 vsi->xdp_rings = NULL; in ice_destroy_xdp_rings()
2951 for (i = 0; i < vsi->tc_cfg.numtc; i++) in ice_destroy_xdp_rings()
2952 max_txqs[i] = vsi->num_txq; in ice_destroy_xdp_rings()
2955 vsi->num_xdp_txq = 0; in ice_destroy_xdp_rings()
2957 return ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, in ice_destroy_xdp_rings()
2962 * ice_vsi_rx_napi_schedule - Schedule napi on RX queues from VSI
2970 struct ice_rx_ring *rx_ring = vsi->rx_rings[i]; in ice_vsi_rx_napi_schedule()
2972 if (READ_ONCE(rx_ring->xsk_pool)) in ice_vsi_rx_napi_schedule()
2973 napi_schedule(&rx_ring->q_vector->napi); in ice_vsi_rx_napi_schedule()
2978 * ice_vsi_determine_xdp_res - figure out how many Tx qs can XDP have
2982 * -ENOMEM otherwise
2986 u16 avail = ice_get_avail_txq_count(vsi->back); in ice_vsi_determine_xdp_res()
2990 return -ENOMEM; in ice_vsi_determine_xdp_res()
2992 if (vsi->type == ICE_VSI_SF) in ice_vsi_determine_xdp_res()
2993 avail = vsi->alloc_txq; in ice_vsi_determine_xdp_res()
2995 vsi->num_xdp_txq = min_t(u16, avail, cpus); in ice_vsi_determine_xdp_res()
2997 if (vsi->num_xdp_txq < cpus) in ice_vsi_determine_xdp_res()
3004 * ice_max_xdp_frame_size - returns the maximum allowed frame size for XDP
3009 if (test_bit(ICE_FLAG_LEGACY_RX, vsi->back->flags)) in ice_max_xdp_frame_size()
3016 * ice_xdp_setup_prog - Add or remove XDP eBPF program
3025 unsigned int frame_size = vsi->netdev->mtu + ICE_ETH_PKT_HDR_PAD; in ice_xdp_setup_prog()
3029 if (prog && !prog->aux->xdp_has_frags) { in ice_xdp_setup_prog()
3033 return -EOPNOTSUPP; in ice_xdp_setup_prog()
3039 test_bit(ICE_VSI_REBUILD_PENDING, vsi->state)) { in ice_xdp_setup_prog()
3044 if_running = netif_running(vsi->netdev) && in ice_xdp_setup_prog()
3045 !test_and_set_bit(ICE_VSI_DOWN, vsi->state); in ice_xdp_setup_prog()
3066 xdp_features_set_redirect_target(vsi->netdev, true); in ice_xdp_setup_prog()
3067 /* reallocate Rx queues that are used for zero-copy */ in ice_xdp_setup_prog()
3072 xdp_features_clear_redirect_target(vsi->netdev); in ice_xdp_setup_prog()
3076 /* reallocate Rx queues that were used for zero-copy */ in ice_xdp_setup_prog()
3088 return (ret || xdp_ring_err) ? -ENOMEM : 0; in ice_xdp_setup_prog()
3092 * ice_xdp_safe_mode - XDP handler for safe mode
3099 NL_SET_ERR_MSG_MOD(xdp->extack, in ice_xdp_safe_mode()
3102 return -EOPNOTSUPP; in ice_xdp_safe_mode()
3106 * ice_xdp - implements XDP handler
3113 struct ice_vsi *vsi = np->vsi; in ice_xdp()
3116 if (vsi->type != ICE_VSI_PF && vsi->type != ICE_VSI_SF) { in ice_xdp()
3117 NL_SET_ERR_MSG_MOD(xdp->extack, "XDP can be loaded only on PF or SF VSI"); in ice_xdp()
3118 return -EINVAL; in ice_xdp()
3121 mutex_lock(&vsi->xdp_state_lock); in ice_xdp()
3123 switch (xdp->command) { in ice_xdp()
3125 ret = ice_xdp_setup_prog(vsi, xdp->prog, xdp->extack); in ice_xdp()
3128 ret = ice_xsk_pool_setup(vsi, xdp->xsk.pool, xdp->xsk.queue_id); in ice_xdp()
3131 ret = -EINVAL; in ice_xdp()
3134 mutex_unlock(&vsi->xdp_state_lock); in ice_xdp()
3139 * ice_ena_misc_vector - enable the non-queue interrupts
3144 struct ice_hw *hw = &pf->hw; in ice_ena_misc_vector()
3148 /* Disable anti-spoof detection interrupt to prevent spurious event in ice_ena_misc_vector()
3149 * interrupts during a function reset. Anti-spoof functionally is in ice_ena_misc_vector()
3172 wr32(hw, GLINT_DYN_CTL(pf->oicr_irq.index), in ice_ena_misc_vector()
3175 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_ena_misc_vector()
3178 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ena_misc_vector()
3183 * ice_ll_ts_intr - ll_ts interrupt handler
3197 hw = &pf->hw; in ice_ll_ts_intr()
3198 tx = &pf->ptp.port.tx; in ice_ll_ts_intr()
3199 spin_lock_irqsave(&tx->lock, flags); in ice_ll_ts_intr()
3202 idx = find_next_bit_wrap(tx->in_use, tx->len, in ice_ll_ts_intr()
3203 tx->last_ll_ts_idx_read + 1); in ice_ll_ts_intr()
3204 if (idx != tx->len) in ice_ll_ts_intr()
3206 spin_unlock_irqrestore(&tx->lock, flags); in ice_ll_ts_intr()
3211 wr32(hw, GLINT_DYN_CTL(pf->ll_ts_irq.index + pf_intr_start_offset), in ice_ll_ts_intr()
3218 * ice_misc_intr - misc interrupt handler
3226 struct ice_hw *hw = &pf->hw; in ice_misc_intr()
3231 set_bit(ICE_ADMINQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3232 set_bit(ICE_MAILBOXQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3233 set_bit(ICE_SIDEBANDQ_EVENT_PENDING, pf->state); in ice_misc_intr()
3240 pf->sw_int_count++; in ice_misc_intr()
3245 set_bit(ICE_MDD_EVENT_PENDING, pf->state); in ice_misc_intr()
3249 if (test_bit(ICE_VF_RESETS_DISABLED, pf->state)) { in ice_misc_intr()
3256 set_bit(ICE_VFLR_EVENT_PENDING, pf->state); in ice_misc_intr()
3269 pf->corer_count++; in ice_misc_intr()
3271 pf->globr_count++; in ice_misc_intr()
3273 pf->empr_count++; in ice_misc_intr()
3278 * pf->state so that the service task can start a reset/rebuild. in ice_misc_intr()
3280 if (!test_and_set_bit(ICE_RESET_OICR_RECV, pf->state)) { in ice_misc_intr()
3282 set_bit(ICE_CORER_RECV, pf->state); in ice_misc_intr()
3284 set_bit(ICE_GLOBR_RECV, pf->state); in ice_misc_intr()
3286 set_bit(ICE_EMPR_RECV, pf->state); in ice_misc_intr()
3289 * hw->reset_ongoing indicates whether the hardware is in ice_misc_intr()
3294 * ICE_RESET_OICR_RECV in pf->state indicates in ice_misc_intr()
3301 hw->reset_ongoing = true; in ice_misc_intr()
3308 pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) { in ice_misc_intr()
3309 struct ice_ptp_tx *tx = &pf->ptp.port.tx; in ice_misc_intr()
3313 spin_lock_irqsave(&tx->lock, flags); in ice_misc_intr()
3314 idx = find_next_bit_wrap(tx->in_use, tx->len, in ice_misc_intr()
3315 tx->last_ll_ts_idx_read + 1); in ice_misc_intr()
3316 if (idx != tx->len) in ice_misc_intr()
3318 spin_unlock_irqrestore(&tx->lock, flags); in ice_misc_intr()
3320 set_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread); in ice_misc_intr()
3326 u8 tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; in ice_misc_intr()
3333 pf->ptp.ext_ts_irq |= gltsyn_stat & in ice_misc_intr()
3344 pf->oicr_err_reg |= oicr; in ice_misc_intr()
3345 set_bit(ICE_AUX_ERR_PENDING, pf->state); in ice_misc_intr()
3358 set_bit(ICE_PFR_REQ, pf->state); in ice_misc_intr()
3369 * ice_misc_intr_thread_fn - misc interrupt thread function
3378 hw = &pf->hw; in ice_misc_intr_thread_fn()
3380 if (ice_is_reset_in_progress(pf->state)) in ice_misc_intr_thread_fn()
3383 if (test_and_clear_bit(ICE_MISC_THREAD_TX_TSTAMP, pf->misc_thread)) { in ice_misc_intr_thread_fn()
3385 * re-arm the interrupt to trigger again. in ice_misc_intr_thread_fn()
3400 * ice_dis_ctrlq_interrupts - disable control queue interrupts
3424 * ice_free_irq_msix_ll_ts- Unroll ll_ts vector setup
3429 int irq_num = pf->ll_ts_irq.virq; in ice_free_irq_msix_ll_ts()
3434 ice_free_irq(pf, pf->ll_ts_irq); in ice_free_irq_msix_ll_ts()
3438 * ice_free_irq_msix_misc - Unroll misc vector setup
3443 int misc_irq_num = pf->oicr_irq.virq; in ice_free_irq_msix_misc()
3444 struct ice_hw *hw = &pf->hw; in ice_free_irq_msix_misc()
3455 ice_free_irq(pf, pf->oicr_irq); in ice_free_irq_msix_misc()
3456 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_free_irq_msix_misc()
3461 * ice_ena_ctrlq_interrupts - enable control queue interrupts
3483 if (!hw->dev_caps.ts_dev_info.ts_ll_int_read) { in ice_ena_ctrlq_interrupts()
3494 * ice_req_irq_msix_misc - Setup the misc vector to handle non queue events
3498 * non-queue interrupts, e.g. AdminQ and errors. This is not used
3504 struct ice_hw *hw = &pf->hw; in ice_req_irq_msix_misc()
3509 if (!pf->int_name[0]) in ice_req_irq_msix_misc()
3510 snprintf(pf->int_name, sizeof(pf->int_name) - 1, "%s-%s:misc", in ice_req_irq_msix_misc()
3513 if (!pf->int_name_ll_ts[0]) in ice_req_irq_msix_misc()
3514 snprintf(pf->int_name_ll_ts, sizeof(pf->int_name_ll_ts) - 1, in ice_req_irq_msix_misc()
3515 "%s-%s:ll_ts", dev_driver_string(dev), dev_name(dev)); in ice_req_irq_msix_misc()
3520 if (ice_is_reset_in_progress(pf->state)) in ice_req_irq_msix_misc()
3528 pf->oicr_irq = irq; in ice_req_irq_msix_misc()
3529 err = devm_request_threaded_irq(dev, pf->oicr_irq.virq, ice_misc_intr, in ice_req_irq_msix_misc()
3531 pf->int_name, pf); in ice_req_irq_msix_misc()
3534 pf->int_name, err); in ice_req_irq_msix_misc()
3535 ice_free_irq(pf, pf->oicr_irq); in ice_req_irq_msix_misc()
3540 if (!pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3547 pf->ll_ts_irq = irq; in ice_req_irq_msix_misc()
3548 err = devm_request_irq(dev, pf->ll_ts_irq.virq, ice_ll_ts_intr, 0, in ice_req_irq_msix_misc()
3549 pf->int_name_ll_ts, pf); in ice_req_irq_msix_misc()
3552 pf->int_name_ll_ts, err); in ice_req_irq_msix_misc()
3553 ice_free_irq(pf, pf->ll_ts_irq); in ice_req_irq_msix_misc()
3560 ice_ena_ctrlq_interrupts(hw, pf->oicr_irq.index); in ice_req_irq_msix_misc()
3563 if (pf->hw.dev_caps.ts_dev_info.ts_ll_int_read) in ice_req_irq_msix_misc()
3565 ((pf->ll_ts_irq.index + pf_intr_start_offset) & in ice_req_irq_msix_misc()
3567 wr32(hw, GLINT_ITR(ICE_RX_ITR, pf->oicr_irq.index), in ice_req_irq_msix_misc()
3577 * ice_set_ops - set netdev and ethtools ops for the given netdev
3582 struct net_device *netdev = vsi->netdev; in ice_set_ops()
3586 netdev->netdev_ops = &ice_netdev_safe_mode_ops; in ice_set_ops()
3591 netdev->netdev_ops = &ice_netdev_ops; in ice_set_ops()
3592 netdev->udp_tunnel_nic_info = &pf->hw.udp_tunnel_nic; in ice_set_ops()
3593 netdev->xdp_metadata_ops = &ice_xdp_md_ops; in ice_set_ops()
3596 if (vsi->type != ICE_VSI_PF) in ice_set_ops()
3599 netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in ice_set_ops()
3602 netdev->xdp_zc_max_segs = ICE_MAX_BUF_TXD; in ice_set_ops()
3606 * ice_set_netdev_features - set features for the given netdev
3612 bool is_dvm_ena = ice_is_dvm_ena(&pf->hw); in ice_set_netdev_features()
3620 netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA; in ice_set_netdev_features()
3621 netdev->hw_features = netdev->features; in ice_set_netdev_features()
3655 netdev->gso_partial_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | in ice_set_netdev_features()
3658 netdev->hw_features = dflt_features | csumo_features | in ice_set_netdev_features()
3662 netdev->mpls_features = NETIF_F_HW_CSUM | in ice_set_netdev_features()
3667 netdev->features |= netdev->hw_features; in ice_set_netdev_features()
3669 netdev->hw_features |= NETIF_F_HW_TC; in ice_set_netdev_features()
3670 netdev->hw_features |= NETIF_F_LOOPBACK; in ice_set_netdev_features()
3673 netdev->hw_enc_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3675 netdev->vlan_features |= dflt_features | csumo_features | in ice_set_netdev_features()
3684 netdev->hw_features |= NETIF_F_HW_VLAN_STAG_RX | in ice_set_netdev_features()
3690 netdev->hw_features |= NETIF_F_RXFCS; in ice_set_netdev_features()
3696 * ice_fill_rss_lut - Fill the RSS lookup table with default values
3710 * ice_pf_vsi_setup - Set up a PF VSI
3744 * ice_ctrl_vsi_setup - Set up a control VSI
3764 * ice_lb_vsi_setup - Set up a loopback VSI
3784 * ice_vlan_rx_add_vid - Add a VLAN ID filter to HW offload
3795 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_add_vid()
3803 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_add_vid()
3807 * all-multicast is currently enabled. in ice_vlan_rx_add_vid()
3809 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_add_vid()
3810 ret = ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3823 ret = vlan_ops->add_vlan(vsi, &vlan); in ice_vlan_rx_add_vid()
3827 /* If all-multicast is currently enabled and this VLAN ID is only one in ice_vlan_rx_add_vid()
3828 * besides VLAN-0 we have to update look-up type of multicast promisc in ice_vlan_rx_add_vid()
3829 * rule for VLAN-0 from ICE_SW_LKUP_PROMISC to ICE_SW_LKUP_PROMISC_VLAN. in ice_vlan_rx_add_vid()
3831 if ((vsi->current_netdev_flags & IFF_ALLMULTI) && in ice_vlan_rx_add_vid()
3833 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3835 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_add_vid()
3840 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_add_vid()
3846 * ice_vlan_rx_kill_vid - Remove a VLAN ID filter from HW offload
3857 struct ice_vsi *vsi = np->vsi; in ice_vlan_rx_kill_vid()
3865 while (test_and_set_bit(ICE_CFG_BUSY, vsi->state)) in ice_vlan_rx_kill_vid()
3868 ret = ice_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3872 vsi->vsi_num); in ice_vlan_rx_kill_vid()
3873 vsi->current_netdev_flags |= IFF_ALLMULTI; in ice_vlan_rx_kill_vid()
3882 ret = vlan_ops->del_vlan(vsi, &vlan); in ice_vlan_rx_kill_vid()
3887 * all-multicast is enabled. in ice_vlan_rx_kill_vid()
3889 if (vsi->current_netdev_flags & IFF_ALLMULTI) in ice_vlan_rx_kill_vid()
3890 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3894 /* Update look-up type of multicast promisc rule for VLAN 0 in ice_vlan_rx_kill_vid()
3896 * all-multicast is enabled and VLAN 0 is the only VLAN rule. in ice_vlan_rx_kill_vid()
3898 if (vsi->current_netdev_flags & IFF_ALLMULTI) { in ice_vlan_rx_kill_vid()
3899 ice_fltr_clear_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3902 ice_fltr_set_vsi_promisc(&vsi->back->hw, vsi->idx, in ice_vlan_rx_kill_vid()
3908 clear_bit(ICE_CFG_BUSY, vsi->state); in ice_vlan_rx_kill_vid()
3921 list_del(&indr_priv->list); in ice_rep_indr_tc_block_unbind()
3926 * ice_tc_indir_block_unregister - Unregister TC indirect block notifications
3931 struct ice_netdev_priv *np = netdev_priv(vsi->netdev); in ice_tc_indir_block_unregister()
3938 * ice_tc_indir_block_register - Register TC indirect block notifications
3947 if (!vsi || !vsi->netdev) in ice_tc_indir_block_register()
3948 return -EINVAL; in ice_tc_indir_block_register()
3950 np = netdev_priv(vsi->netdev); in ice_tc_indir_block_register()
3952 INIT_LIST_HEAD(&np->tc_indr_block_priv_list); in ice_tc_indir_block_register()
3957 * ice_get_avail_q_count - Get count of queues in use
3977 * ice_get_avail_txq_count - Get count of Tx queues in use
3982 return ice_get_avail_q_count(pf->avail_txqs, &pf->avail_q_mutex, in ice_get_avail_txq_count()
3983 pf->max_pf_txqs); in ice_get_avail_txq_count()
3987 * ice_get_avail_rxq_count - Get count of Rx queues in use
3992 return ice_get_avail_q_count(pf->avail_rxqs, &pf->avail_q_mutex, in ice_get_avail_rxq_count()
3993 pf->max_pf_rxqs); in ice_get_avail_rxq_count()
3997 * ice_deinit_pf - Unrolls initialziations done by ice_init_pf
4003 mutex_destroy(&pf->lag_mutex); in ice_deinit_pf()
4004 mutex_destroy(&pf->adev_mutex); in ice_deinit_pf()
4005 mutex_destroy(&pf->sw_mutex); in ice_deinit_pf()
4006 mutex_destroy(&pf->tc_mutex); in ice_deinit_pf()
4007 mutex_destroy(&pf->avail_q_mutex); in ice_deinit_pf()
4008 mutex_destroy(&pf->vfs.table_lock); in ice_deinit_pf()
4010 if (pf->avail_txqs) { in ice_deinit_pf()
4011 bitmap_free(pf->avail_txqs); in ice_deinit_pf()
4012 pf->avail_txqs = NULL; in ice_deinit_pf()
4015 if (pf->avail_rxqs) { in ice_deinit_pf()
4016 bitmap_free(pf->avail_rxqs); in ice_deinit_pf()
4017 pf->avail_rxqs = NULL; in ice_deinit_pf()
4020 if (pf->ptp.clock) in ice_deinit_pf()
4021 ptp_clock_unregister(pf->ptp.clock); in ice_deinit_pf()
4023 xa_destroy(&pf->dyn_ports); in ice_deinit_pf()
4024 xa_destroy(&pf->sf_nums); in ice_deinit_pf()
4028 * ice_set_pf_caps - set PFs capability flags
4033 struct ice_hw_func_caps *func_caps = &pf->hw.func_caps; in ice_set_pf_caps()
4035 clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
4036 if (func_caps->common_cap.rdma) in ice_set_pf_caps()
4037 set_bit(ICE_FLAG_RDMA_ENA, pf->flags); in ice_set_pf_caps()
4038 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
4039 if (func_caps->common_cap.dcb) in ice_set_pf_caps()
4040 set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_set_pf_caps()
4041 clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4042 if (func_caps->common_cap.sr_iov_1_1) { in ice_set_pf_caps()
4043 set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags); in ice_set_pf_caps()
4044 pf->vfs.num_supported = min_t(int, func_caps->num_allocd_vfs, in ice_set_pf_caps()
4047 clear_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4048 if (func_caps->common_cap.rss_table_size) in ice_set_pf_caps()
4049 set_bit(ICE_FLAG_RSS_ENA, pf->flags); in ice_set_pf_caps()
4051 clear_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4052 if (func_caps->fd_fltr_guar > 0 || func_caps->fd_fltr_best_effort > 0) { in ice_set_pf_caps()
4058 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_set_pf_caps()
4059 set_bit(ICE_FLAG_FD_ENA, pf->flags); in ice_set_pf_caps()
4061 ice_alloc_fd_guar_item(&pf->hw, &unused, in ice_set_pf_caps()
4062 func_caps->fd_fltr_guar); in ice_set_pf_caps()
4064 ice_alloc_fd_shrd_item(&pf->hw, &unused, in ice_set_pf_caps()
4065 func_caps->fd_fltr_best_effort); in ice_set_pf_caps()
4068 clear_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4069 if (func_caps->common_cap.ieee_1588 && in ice_set_pf_caps()
4070 !(pf->hw.mac_type == ICE_MAC_E830)) in ice_set_pf_caps()
4071 set_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags); in ice_set_pf_caps()
4073 pf->max_pf_txqs = func_caps->common_cap.num_txq; in ice_set_pf_caps()
4074 pf->max_pf_rxqs = func_caps->common_cap.num_rxq; in ice_set_pf_caps()
4078 * ice_init_pf - Initialize general software structures (struct ice_pf)
4085 mutex_init(&pf->sw_mutex); in ice_init_pf()
4086 mutex_init(&pf->tc_mutex); in ice_init_pf()
4087 mutex_init(&pf->adev_mutex); in ice_init_pf()
4088 mutex_init(&pf->lag_mutex); in ice_init_pf()
4090 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_init_pf()
4091 spin_lock_init(&pf->aq_wait_lock); in ice_init_pf()
4092 init_waitqueue_head(&pf->aq_wait_queue); in ice_init_pf()
4094 init_waitqueue_head(&pf->reset_wait_queue); in ice_init_pf()
4097 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_init_pf()
4098 pf->serv_tmr_period = HZ; in ice_init_pf()
4099 INIT_WORK(&pf->serv_task, ice_service_task); in ice_init_pf()
4100 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_init_pf()
4102 mutex_init(&pf->avail_q_mutex); in ice_init_pf()
4103 pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL); in ice_init_pf()
4104 if (!pf->avail_txqs) in ice_init_pf()
4105 return -ENOMEM; in ice_init_pf()
4107 pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL); in ice_init_pf()
4108 if (!pf->avail_rxqs) { in ice_init_pf()
4109 bitmap_free(pf->avail_txqs); in ice_init_pf()
4110 pf->avail_txqs = NULL; in ice_init_pf()
4111 return -ENOMEM; in ice_init_pf()
4114 mutex_init(&pf->vfs.table_lock); in ice_init_pf()
4115 hash_init(pf->vfs.table); in ice_init_pf()
4117 wr32(&pf->hw, E830_MBX_PF_IN_FLIGHT_VF_MSGS_THRESH, in ice_init_pf()
4120 ice_mbx_init_snapshot(&pf->hw); in ice_init_pf()
4122 xa_init(&pf->dyn_ports); in ice_init_pf()
4123 xa_init(&pf->sf_nums); in ice_init_pf()
4129 * ice_is_wol_supported - check if WoL is supported
4145 return !(BIT(hw->port_info->lport) & wol_ctrl); in ice_is_wol_supported()
4149 * ice_vsi_recfg_qs - Change the number of queues on a VSI
4155 * Only change the number of queues if new_tx, or new_rx is non-0.
4161 struct ice_pf *pf = vsi->back; in ice_vsi_recfg_qs()
4165 return -EINVAL; in ice_vsi_recfg_qs()
4167 while (test_and_set_bit(ICE_CFG_BUSY, pf->state)) { in ice_vsi_recfg_qs()
4168 timeout--; in ice_vsi_recfg_qs()
4170 return -EBUSY; in ice_vsi_recfg_qs()
4175 vsi->req_txq = (u16)new_tx; in ice_vsi_recfg_qs()
4177 vsi->req_rxq = (u16)new_rx; in ice_vsi_recfg_qs()
4180 if (!netif_running(vsi->netdev)) { in ice_vsi_recfg_qs()
4194 if (vsi->tc_cfg.ena_tc & BIT(i)) in ice_vsi_recfg_qs()
4195 netdev_set_tc_queue(vsi->netdev, in ice_vsi_recfg_qs()
4196 vsi->tc_cfg.tc_info[i].netdev_tc, in ice_vsi_recfg_qs()
4197 vsi->tc_cfg.tc_info[i].qcount_tx, in ice_vsi_recfg_qs()
4198 vsi->tc_cfg.tc_info[i].qoffset); in ice_vsi_recfg_qs()
4208 clear_bit(ICE_CFG_BUSY, pf->state); in ice_vsi_recfg_qs()
4213 * ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
4233 hw = &pf->hw; in ice_set_safe_mode_vlan_cfg()
4234 ctxt->info = vsi->info; in ice_set_safe_mode_vlan_cfg()
4236 ctxt->info.valid_sections = in ice_set_safe_mode_vlan_cfg()
4241 /* disable VLAN anti-spoof */ in ice_set_safe_mode_vlan_cfg()
4242 ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA << in ice_set_safe_mode_vlan_cfg()
4246 ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA; in ice_set_safe_mode_vlan_cfg()
4249 ctxt->info.inner_vlan_flags = ICE_AQ_VSI_INNER_VLAN_TX_MODE_ALL | in ice_set_safe_mode_vlan_cfg()
4252 status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_set_safe_mode_vlan_cfg()
4254 dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %d aq_err %s\n", in ice_set_safe_mode_vlan_cfg()
4255 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_safe_mode_vlan_cfg()
4257 vsi->info.sec_flags = ctxt->info.sec_flags; in ice_set_safe_mode_vlan_cfg()
4258 vsi->info.sw_flags2 = ctxt->info.sw_flags2; in ice_set_safe_mode_vlan_cfg()
4259 vsi->info.inner_vlan_flags = ctxt->info.inner_vlan_flags; in ice_set_safe_mode_vlan_cfg()
4266 * ice_log_pkg_init - log result of DDP package load
4272 struct ice_pf *pf = hw->back; in ice_log_pkg_init()
4280 hw->active_pkg_name, in ice_log_pkg_init()
4281 hw->active_pkg_ver.major, in ice_log_pkg_init()
4282 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4283 hw->active_pkg_ver.update, in ice_log_pkg_init()
4284 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4288 hw->active_pkg_name, in ice_log_pkg_init()
4289 hw->active_pkg_ver.major, in ice_log_pkg_init()
4290 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4291 hw->active_pkg_ver.update, in ice_log_pkg_init()
4292 hw->active_pkg_ver.draft); in ice_log_pkg_init()
4296 hw->active_pkg_name, in ice_log_pkg_init()
4297 hw->active_pkg_ver.major, in ice_log_pkg_init()
4298 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4303 hw->active_pkg_name, in ice_log_pkg_init()
4304 hw->active_pkg_ver.major, in ice_log_pkg_init()
4305 hw->active_pkg_ver.minor, in ice_log_pkg_init()
4306 hw->active_pkg_ver.update, in ice_log_pkg_init()
4307 hw->active_pkg_ver.draft, in ice_log_pkg_init()
4308 hw->pkg_name, in ice_log_pkg_init()
4309 hw->pkg_ver.major, in ice_log_pkg_init()
4310 hw->pkg_ver.minor, in ice_log_pkg_init()
4311 hw->pkg_ver.update, in ice_log_pkg_init()
4312 hw->pkg_ver.draft); in ice_log_pkg_init()
4347 * ice_load_pkg - load/reload the DDP Package file
4359 struct ice_hw *hw = &pf->hw; in ice_load_pkg()
4362 if (firmware && !hw->pkg_copy) { in ice_load_pkg()
4363 state = ice_copy_and_init_pkg(hw, firmware->data, in ice_load_pkg()
4364 firmware->size); in ice_load_pkg()
4366 } else if (!firmware && hw->pkg_copy) { in ice_load_pkg()
4368 state = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size); in ice_load_pkg()
4376 clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4383 set_bit(ICE_FLAG_ADV_FEATURES, pf->flags); in ice_load_pkg()
4387 * ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
4396 if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M) in ice_verify_cacheline_size()
4402 * ice_send_version - update firmware with driver version
4417 return ice_aq_send_driver_ver(&pf->hw, &dv, NULL); in ice_send_version()
4421 * ice_init_fdir - Initialize flow director VSI and configuration
4435 ctrl_vsi = ice_ctrl_vsi_setup(pf, pf->hw.port_info); in ice_init_fdir()
4438 return -ENOMEM; in ice_init_fdir()
4447 mutex_init(&pf->hw.fdir_fltr_lock); in ice_init_fdir()
4456 ice_fdir_release_flows(&pf->hw); in ice_init_fdir()
4460 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_init_fdir()
4461 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_init_fdir()
4462 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_init_fdir()
4476 if (pf->ctrl_vsi_idx != ICE_NO_VSI) { in ice_deinit_fdir()
4477 pf->vsi[pf->ctrl_vsi_idx] = NULL; in ice_deinit_fdir()
4478 pf->ctrl_vsi_idx = ICE_NO_VSI; in ice_deinit_fdir()
4481 mutex_destroy(&(&pf->hw)->fdir_fltr_lock); in ice_deinit_fdir()
4485 * ice_get_opt_fw_name - return optional firmware file name or NULL
4491 * followed by a EUI-64 identifier (PCIe Device Serial Number) in ice_get_opt_fw_name()
4493 struct pci_dev *pdev = pf->pdev; in ice_get_opt_fw_name()
4508 snprintf(opt_fw_filename, NAME_MAX, "%sice-%016llx.pkg", in ice_get_opt_fw_name()
4515 * ice_request_fw - Device initialization routine
4527 /* optional device-specific DDP (if present) overrides the default DDP in ice_request_fw()
4545 * ice_init_tx_topology - performs Tx topology initialization
4554 u8 num_tx_sched_layers = hw->num_tx_sched_layers; in ice_init_tx_topology()
4555 struct ice_pf *pf = hw->back; in ice_init_tx_topology()
4560 err = ice_cfg_tx_topo(hw, firmware->data, firmware->size); in ice_init_tx_topology()
4562 if (hw->num_tx_sched_layers > num_tx_sched_layers) in ice_init_tx_topology()
4567 * a CORER and we need to re-init hw in ice_init_tx_topology()
4573 } else if (err == -EIO) { in ice_init_tx_topology()
4574 …dev_info(dev, "DDP package does not support Tx scheduling layers switching feature - please update… in ice_init_tx_topology()
4581 * ice_init_supported_rxdids - Initialize supported Rx descriptor IDs
4585 * The pf->supported_rxdids bitmap is used to indicate to VFs which descriptor
4590 * Note that the legacy 32-byte RXDID 0 is always supported but is not listed
4591 * in the DDP package. The 16-byte legacy descriptor is never supported by
4596 pf->supported_rxdids = BIT(ICE_RXDID_LEGACY_1); in ice_init_supported_rxdids()
4604 pf->supported_rxdids |= BIT(i); in ice_init_supported_rxdids()
4609 * ice_init_ddp_config - DDP related configuration
4649 * ice_print_wake_reason - show the wake up cause in the log
4654 u32 wus = pf->wakeup_reason; in ice_print_wake_reason()
4676 * ice_pf_fwlog_update_module - update 1 module
4683 struct ice_hw *hw = &pf->hw; in ice_pf_fwlog_update_module()
4685 hw->fwlog_cfg.module_entries[module].log_level = log_level; in ice_pf_fwlog_update_module()
4689 * ice_register_netdev - register netdev
4696 if (!vsi || !vsi->netdev) in ice_register_netdev()
4697 return -EIO; in ice_register_netdev()
4699 err = register_netdev(vsi->netdev); in ice_register_netdev()
4703 set_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_register_netdev()
4704 netif_carrier_off(vsi->netdev); in ice_register_netdev()
4705 netif_tx_stop_all_queues(vsi->netdev); in ice_register_netdev()
4712 if (!vsi || !vsi->netdev) in ice_unregister_netdev()
4715 unregister_netdev(vsi->netdev); in ice_unregister_netdev()
4716 clear_bit(ICE_VSI_NETDEV_REGISTERED, vsi->state); in ice_unregister_netdev()
4720 * ice_cfg_netdev - Allocate, configure and register a netdev
4731 netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq, in ice_cfg_netdev()
4732 vsi->alloc_rxq); in ice_cfg_netdev()
4734 return -ENOMEM; in ice_cfg_netdev()
4736 set_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_cfg_netdev()
4737 vsi->netdev = netdev; in ice_cfg_netdev()
4739 np->vsi = vsi; in ice_cfg_netdev()
4744 if (vsi->type == ICE_VSI_PF) { in ice_cfg_netdev()
4745 SET_NETDEV_DEV(netdev, ice_pf_to_dev(vsi->back)); in ice_cfg_netdev()
4746 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_cfg_netdev()
4750 netdev->priv_flags |= IFF_UNICAST_FLT; in ice_cfg_netdev()
4753 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_cfg_netdev()
4755 netdev->max_mtu = ICE_MAX_MTU; in ice_cfg_netdev()
4762 clear_bit(ICE_VSI_NETDEV_ALLOCD, vsi->state); in ice_decfg_netdev()
4763 free_netdev(vsi->netdev); in ice_decfg_netdev()
4764 vsi->netdev = NULL; in ice_decfg_netdev()
4770 struct ice_hw *hw = &pf->hw; in ice_init_dev()
4778 * set in pf->state, which will cause ice_is_safe_mode to return in ice_init_dev()
4796 pf->hw.udp_tunnel_nic.set_port = ice_udp_tunnel_set_port; in ice_init_dev()
4797 pf->hw.udp_tunnel_nic.unset_port = ice_udp_tunnel_unset_port; in ice_init_dev()
4798 pf->hw.udp_tunnel_nic.flags = UDP_TUNNEL_NIC_INFO_MAY_SLEEP; in ice_init_dev()
4799 pf->hw.udp_tunnel_nic.shared = &pf->hw.udp_tunnel_shared; in ice_init_dev()
4800 if (pf->hw.tnl.valid_count[TNL_VXLAN]) { in ice_init_dev()
4801 pf->hw.udp_tunnel_nic.tables[0].n_entries = in ice_init_dev()
4802 pf->hw.tnl.valid_count[TNL_VXLAN]; in ice_init_dev()
4803 pf->hw.udp_tunnel_nic.tables[0].tunnel_types = in ice_init_dev()
4806 if (pf->hw.tnl.valid_count[TNL_GENEVE]) { in ice_init_dev()
4807 pf->hw.udp_tunnel_nic.tables[1].n_entries = in ice_init_dev()
4808 pf->hw.tnl.valid_count[TNL_GENEVE]; in ice_init_dev()
4809 pf->hw.udp_tunnel_nic.tables[1].tunnel_types = in ice_init_dev()
4816 err = -EIO; in ice_init_dev()
4844 ice_deinit_hw(&pf->hw); in ice_deinit_dev()
4847 ice_reset(&pf->hw, ICE_RESET_PFR); in ice_deinit_dev()
4848 pci_wait_for_pending_transaction(pf->pdev); in ice_deinit_dev()
4860 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_init_features()
4870 /* Note: Flow director init failure is non-fatal to load */ in ice_init_features()
4874 /* Note: DCB init failure is non-fatal to load */ in ice_init_features()
4876 clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags); in ice_init_features()
4877 clear_bit(ICE_FLAG_DCB_ENA, pf->flags); in ice_init_features()
4879 ice_cfg_lldp_mib_change(&pf->hw, true); in ice_init_features()
4894 if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) in ice_deinit_features()
4895 ice_cfg_lldp_mib_change(&pf->hw, false); in ice_deinit_features()
4899 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_deinit_features()
4901 if (test_bit(ICE_FLAG_DPLL, pf->flags)) in ice_deinit_features()
4903 if (pf->eswitch_mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) in ice_deinit_features()
4904 xa_destroy(&pf->eswitch.reprs); in ice_deinit_features()
4910 pf->wakeup_reason = rd32(&pf->hw, PFPM_WUS); in ice_init_wakeup()
4916 wr32(&pf->hw, PFPM_WUS, U32_MAX); in ice_init_wakeup()
4927 err = ice_init_link_events(pf->hw.port_info); in ice_init_link()
4934 err = ice_init_nvm_phy_type(pf->hw.port_info); in ice_init_link()
4939 err = ice_update_link_info(pf->hw.port_info); in ice_init_link()
4943 ice_init_link_dflt_override(pf->hw.port_info); in ice_init_link()
4946 pf->hw.port_info->phy.link_info.link_cfg_err); in ice_init_link()
4949 if (pf->hw.port_info->phy.link_info.link_info & in ice_init_link()
4952 err = ice_init_phy_user_cfg(pf->hw.port_info); in ice_init_link()
4956 if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) { in ice_init_link()
4963 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_init_link()
4971 bool dvm = ice_is_dvm_ena(&pf->hw); in ice_init_pf_sw()
4976 pf->first_sw = kzalloc(sizeof(*pf->first_sw), GFP_KERNEL); in ice_init_pf_sw()
4977 if (!pf->first_sw) in ice_init_pf_sw()
4978 return -ENOMEM; in ice_init_pf_sw()
4980 if (pf->hw.evb_veb) in ice_init_pf_sw()
4981 pf->first_sw->bridge_mode = BRIDGE_MODE_VEB; in ice_init_pf_sw()
4983 pf->first_sw->bridge_mode = BRIDGE_MODE_VEPA; in ice_init_pf_sw()
4985 pf->first_sw->pf = pf; in ice_init_pf_sw()
4988 pf->first_sw->sw_id = pf->hw.port_info->sw_id; in ice_init_pf_sw()
4990 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_init_pf_sw()
4994 vsi = ice_pf_vsi_setup(pf, pf->hw.port_info); in ice_init_pf_sw()
4996 err = -ENOMEM; in ice_init_pf_sw()
5004 kfree(pf->first_sw); in ice_init_pf_sw()
5016 kfree(pf->first_sw); in ice_deinit_pf_sw()
5023 pf->num_alloc_vsi = pf->hw.func_caps.guar_num_vsi; in ice_alloc_vsis()
5024 if (!pf->num_alloc_vsi) in ice_alloc_vsis()
5025 return -EIO; in ice_alloc_vsis()
5027 if (pf->num_alloc_vsi > UDP_TUNNEL_NIC_MAX_SHARING_DEVICES) { in ice_alloc_vsis()
5030 pf->num_alloc_vsi, UDP_TUNNEL_NIC_MAX_SHARING_DEVICES); in ice_alloc_vsis()
5031 pf->num_alloc_vsi = UDP_TUNNEL_NIC_MAX_SHARING_DEVICES; in ice_alloc_vsis()
5034 pf->vsi = devm_kcalloc(dev, pf->num_alloc_vsi, sizeof(*pf->vsi), in ice_alloc_vsis()
5036 if (!pf->vsi) in ice_alloc_vsis()
5037 return -ENOMEM; in ice_alloc_vsis()
5039 pf->vsi_stats = devm_kcalloc(dev, pf->num_alloc_vsi, in ice_alloc_vsis()
5040 sizeof(*pf->vsi_stats), GFP_KERNEL); in ice_alloc_vsis()
5041 if (!pf->vsi_stats) { in ice_alloc_vsis()
5042 devm_kfree(dev, pf->vsi); in ice_alloc_vsis()
5043 return -ENOMEM; in ice_alloc_vsis()
5051 devm_kfree(ice_pf_to_dev(pf), pf->vsi_stats); in ice_dealloc_vsis()
5052 pf->vsi_stats = NULL; in ice_dealloc_vsis()
5054 pf->num_alloc_vsi = 0; in ice_dealloc_vsis()
5055 devm_kfree(ice_pf_to_dev(pf), pf->vsi); in ice_dealloc_vsis()
5056 pf->vsi = NULL; in ice_dealloc_vsis()
5114 pcie_print_link_status(pf->pdev); in ice_init()
5117 clear_bit(ICE_DOWN, pf->state); in ice_init()
5118 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_init()
5121 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_init()
5136 set_bit(ICE_SERVICE_DIS, pf->state); in ice_deinit()
5137 set_bit(ICE_DOWN, pf->state); in ice_deinit()
5145 * ice_load - load pf by init hw and starting VSI
5160 INIT_LIST_HEAD(&vsi->ch_list); in ice_load()
5177 SET_NETDEV_DEVLINK_PORT(vsi->netdev, &pf->devlink_port); in ice_load()
5196 clear_bit(ICE_DOWN, pf->state); in ice_load()
5213 * ice_unload - unload pf by stopping VSI and deinit hw
5239 INIT_HLIST_HEAD(&pf->aq_wait_list); in ice_probe_recovery_mode()
5240 spin_lock_init(&pf->aq_wait_lock); in ice_probe_recovery_mode()
5241 init_waitqueue_head(&pf->aq_wait_queue); in ice_probe_recovery_mode()
5243 timer_setup(&pf->serv_tmr, ice_service_timer, 0); in ice_probe_recovery_mode()
5244 pf->serv_tmr_period = HZ; in ice_probe_recovery_mode()
5245 INIT_WORK(&pf->serv_task, ice_service_task_recovery_mode); in ice_probe_recovery_mode()
5246 clear_bit(ICE_SERVICE_SCHED, pf->state); in ice_probe_recovery_mode()
5247 err = ice_create_all_ctrlq(&pf->hw); in ice_probe_recovery_mode()
5263 * ice_probe - Device initialization routine
5272 struct device *dev = &pdev->dev; in ice_probe()
5278 if (pdev->is_virtfn) { in ice_probe()
5280 return -EINVAL; in ice_probe()
5298 * Documentation/driver-api/driver-model/devres.rst in ice_probe()
5312 return -ENOMEM; in ice_probe()
5315 pf->aux_idx = -1; in ice_probe()
5325 pf->pdev = pdev; in ice_probe()
5327 set_bit(ICE_DOWN, pf->state); in ice_probe()
5329 set_bit(ICE_SERVICE_DIS, pf->state); in ice_probe()
5331 hw = &pf->hw; in ice_probe()
5332 hw->hw_addr = pcim_iomap_table(pdev)[ICE_BAR0]; in ice_probe()
5335 hw->back = pf; in ice_probe()
5336 hw->port_info = NULL; in ice_probe()
5337 hw->vendor_id = pdev->vendor; in ice_probe()
5338 hw->device_id = pdev->device; in ice_probe()
5339 pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); in ice_probe()
5340 hw->subsystem_vendor_id = pdev->subsystem_vendor; in ice_probe()
5341 hw->subsystem_device_id = pdev->subsystem_device; in ice_probe()
5342 hw->bus.device = PCI_SLOT(pdev->devfn); in ice_probe()
5343 hw->bus.func = PCI_FUNC(pdev->devfn); in ice_probe()
5346 pf->msg_enable = netif_msg_init(debug, ICE_DFLT_NETIF_M); in ice_probe()
5349 if (debug < -1) in ice_probe()
5350 hw->debug_mask = debug; in ice_probe()
5367 pf->adapter = adapter; in ice_probe()
5398 * ice_set_wake - enable or disable Wake on LAN
5405 struct ice_hw *hw = &pf->hw; in ice_set_wake()
5406 bool wol = pf->wol_ena; in ice_set_wake()
5419 * ice_setup_mc_magic_wake - setup device to wake on multicast magic packet
5429 struct ice_hw *hw = &pf->hw; in ice_setup_mc_magic_wake()
5435 if (!pf->wol_ena) in ice_setup_mc_magic_wake()
5443 if (vsi->netdev) in ice_setup_mc_magic_wake()
5444 ether_addr_copy(mac_addr, vsi->netdev->dev_addr); in ice_setup_mc_magic_wake()
5446 ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr); in ice_setup_mc_magic_wake()
5455 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_setup_mc_magic_wake()
5459 * ice_remove - Device removal routine
5468 if (!ice_is_reset_in_progress(pf->state)) in ice_remove()
5473 if (ice_is_recovery_mode(&pf->hw)) { in ice_remove()
5481 if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) { in ice_remove()
5482 set_bit(ICE_VF_RESETS_DISABLED, pf->state); in ice_remove()
5490 set_bit(ICE_DOWN, pf->state); in ice_remove()
5512 * ice_shutdown - PCI callback for shutting down device
5522 pci_wake_from_d3(pdev, pf->wol_ena); in ice_shutdown()
5528 * ice_prepare_for_shutdown - prep for PCI shutdown
5535 struct ice_hw *hw = &pf->hw; in ice_prepare_for_shutdown()
5539 if (ice_check_sq_alive(hw, &hw->mailboxq)) in ice_prepare_for_shutdown()
5548 if (pf->vsi[v]) in ice_prepare_for_shutdown()
5549 pf->vsi[v]->vsi_num = 0; in ice_prepare_for_shutdown()
5555 * ice_reinit_interrupt_scheme - Reinitialize interrupt scheme
5561 * This should be called during resume routine to re-allocate the q_vectors
5575 dev_err(dev, "Failed to re-initialize interrupt %d\n", ret); in ice_reinit_interrupt_scheme()
5579 /* Remap vectors and rings, after successful re-init interrupts */ in ice_reinit_interrupt_scheme()
5581 if (!pf->vsi[v]) in ice_reinit_interrupt_scheme()
5584 ret = ice_vsi_alloc_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5587 ice_vsi_map_rings_to_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5589 ice_vsi_set_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5603 while (v--) in ice_reinit_interrupt_scheme()
5604 if (pf->vsi[v]) { in ice_reinit_interrupt_scheme()
5606 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5608 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_reinit_interrupt_scheme()
5631 return -EBUSY; in ice_suspend()
5645 if (test_and_set_bit(ICE_SUSPENDED, pf->state)) { in ice_suspend()
5651 if (test_bit(ICE_DOWN, pf->state) || in ice_suspend()
5652 ice_is_reset_in_progress(pf->state)) { in ice_suspend()
5672 if (!pf->vsi[v]) in ice_suspend()
5675 ice_vsi_clear_napi_queues(pf->vsi[v]); in ice_suspend()
5677 ice_vsi_free_q_vectors(pf->vsi[v]); in ice_suspend()
5682 pci_wake_from_d3(pdev, pf->wol_ena); in ice_suspend()
5688 * ice_resume - PM callback for waking up from D3
5704 return -ENODEV; in ice_resume()
5713 hw = &pf->hw; in ice_resume()
5715 pf->wakeup_reason = rd32(hw, PFPM_WUS); in ice_resume()
5730 clear_bit(ICE_DOWN, pf->state); in ice_resume()
5733 /* re-enable service task for reset, but allow reset to schedule it */ in ice_resume()
5734 clear_bit(ICE_SERVICE_DIS, pf->state); in ice_resume()
5739 clear_bit(ICE_SUSPENDED, pf->state); in ice_resume()
5743 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_resume()
5749 * ice_pci_err_detected - warning that PCI error has been detected
5762 dev_err(&pdev->dev, "%s: unrecoverable device error %d\n", in ice_pci_err_detected()
5767 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_detected()
5770 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_detected()
5771 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_detected()
5780 * ice_pci_err_slot_reset - a PCI slot reset has just happened
5795 dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n", in ice_pci_err_slot_reset()
5805 reg = rd32(&pf->hw, GLGEN_RTRIG); in ice_pci_err_slot_reset()
5816 * ice_pci_err_resume - restart operations after PCI error recovery
5827 dev_err(&pdev->dev, "%s failed, device is unrecoverable\n", in ice_pci_err_resume()
5832 if (test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_resume()
5833 dev_dbg(&pdev->dev, "%s failed to resume normal operations!\n", in ice_pci_err_resume()
5842 mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period)); in ice_pci_err_resume()
5846 * ice_pci_err_reset_prepare - prepare device driver for PCI reset
5853 if (!test_bit(ICE_SUSPENDED, pf->state)) { in ice_pci_err_reset_prepare()
5856 if (!test_bit(ICE_PREPARED_FOR_RESET, pf->state)) { in ice_pci_err_reset_prepare()
5857 set_bit(ICE_PFR_REQ, pf->state); in ice_pci_err_reset_prepare()
5864 * ice_pci_err_reset_done - PCI reset done, device driver reset can begin
5872 /* ice_pci_tbl - PCI Device ID Table
5950 * ice_module_init - Driver registration routine
5957 int status = -ENOMEM; in ice_module_init()
6004 * ice_module_exit - Driver exit cleanup routine
6021 * ice_set_mac_address - NDO callback to set MAC address
6030 struct ice_vsi *vsi = np->vsi; in ice_set_mac_address()
6031 struct ice_pf *pf = vsi->back; in ice_set_mac_address()
6032 struct ice_hw *hw = &pf->hw; in ice_set_mac_address()
6039 mac = (u8 *)addr->sa_data; in ice_set_mac_address()
6042 return -EADDRNOTAVAIL; in ice_set_mac_address()
6044 if (test_bit(ICE_DOWN, pf->state) || in ice_set_mac_address()
6045 ice_is_reset_in_progress(pf->state)) { in ice_set_mac_address()
6048 return -EBUSY; in ice_set_mac_address()
6052 …netdev_err(netdev, "can't set mac %pM. Device has tc-flower filters, delete all of them and try ag… in ice_set_mac_address()
6054 return -EAGAIN; in ice_set_mac_address()
6058 ether_addr_copy(old_mac, netdev->dev_addr); in ice_set_mac_address()
6065 if (err && err != -ENOENT) { in ice_set_mac_address()
6066 err = -EADDRNOTAVAIL; in ice_set_mac_address()
6072 if (err == -EEXIST) { in ice_set_mac_address()
6083 err = -EADDRNOTAVAIL; in ice_set_mac_address()
6096 netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", in ice_set_mac_address()
6097 netdev->dev_addr); in ice_set_mac_address()
6110 * ice_set_rx_mode - NDO callback to set the netdev filters
6116 struct ice_vsi *vsi = np->vsi; in ice_set_rx_mode()
6118 if (!vsi || ice_is_switchdev_running(vsi->back)) in ice_set_rx_mode()
6125 set_bit(ICE_VSI_UMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6126 set_bit(ICE_VSI_MMAC_FLTR_CHANGED, vsi->state); in ice_set_rx_mode()
6127 set_bit(ICE_FLAG_FLTR_SYNC, vsi->back->flags); in ice_set_rx_mode()
6132 ice_service_task_schedule(vsi->back); in ice_set_rx_mode()
6136 * ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
6145 struct ice_vsi *vsi = np->vsi; in ice_set_tx_maxrate()
6154 return -EINVAL; in ice_set_tx_maxrate()
6157 q_handle = vsi->tx_rings[queue_index]->q_handle; in ice_set_tx_maxrate()
6164 return -EINVAL; in ice_set_tx_maxrate()
6169 status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6172 status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc, in ice_set_tx_maxrate()
6182 * ice_fdb_add - add an entry to the hardware database
6202 return -EINVAL; in ice_fdb_add()
6204 if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { in ice_fdb_add()
6206 return -EINVAL; in ice_fdb_add()
6214 err = -EINVAL; in ice_fdb_add()
6217 if (err == -EEXIST && !(flags & NLM_F_EXCL)) in ice_fdb_add()
6224 * ice_fdb_del - delete an entry from the hardware database
6241 if (ndm->ndm_state & NUD_PERMANENT) { in ice_fdb_del()
6243 return -EINVAL; in ice_fdb_del()
6251 err = -EINVAL; in ice_fdb_del()
6268 * ice_fix_features - fix the netdev features flags based on device limitations
6311 cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; in ice_fix_features()
6320 if (ice_is_dvm_ena(&np->vsi->back->hw)) { in ice_fix_features()
6350 if (!(netdev->features & NETIF_F_RXFCS) && in ice_fix_features()
6353 !ice_vsi_has_non_zero_vlans(np->vsi)) { in ice_fix_features()
6362 * ice_set_rx_rings_vlan_proto - update rings with new stripped VLAN proto
6375 vsi->rx_rings[i]->pkt_ctx.vlan_proto = vlan_ethertype; in ice_set_rx_rings_vlan_proto()
6379 * ice_set_vlan_offload_features - set VLAN offload features for the PF VSI
6408 strip_err = vlan_ops->ena_stripping(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6410 strip_err = vlan_ops->dis_stripping(vsi); in ice_set_vlan_offload_features()
6413 insert_err = vlan_ops->ena_insertion(vsi, vlan_ethertype); in ice_set_vlan_offload_features()
6415 insert_err = vlan_ops->dis_insertion(vsi); in ice_set_vlan_offload_features()
6418 return -EIO; in ice_set_vlan_offload_features()
6427 * ice_set_vlan_filtering_features - set VLAN filtering features for the PF VSI
6446 !ice_is_eswitch_mode_switchdev(vsi->back)) in ice_set_vlan_filtering_features()
6447 err = vlan_ops->ena_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6449 err = vlan_ops->dis_rx_filtering(vsi); in ice_set_vlan_filtering_features()
6455 * ice_set_vlan_features - set VLAN settings based on suggested feature set
6467 struct ice_vsi *vsi = np->vsi; in ice_set_vlan_features()
6470 current_vlan_features = netdev->features & NETIF_VLAN_OFFLOAD_FEATURES; in ice_set_vlan_features()
6475 dev_err(ice_pf_to_dev(vsi->back), in ice_set_vlan_features()
6477 return -EIO; in ice_set_vlan_features()
6485 current_vlan_features = netdev->features & in ice_set_vlan_features()
6498 * ice_set_loopback - turn on/off loopback mode on underlying PF
6504 bool if_running = netif_running(vsi->netdev); in ice_set_loopback()
6507 if (if_running && !test_and_set_bit(ICE_VSI_DOWN, vsi->state)) { in ice_set_loopback()
6510 netdev_err(vsi->netdev, "Preparing device to toggle loopback failed\n"); in ice_set_loopback()
6514 ret = ice_aq_set_mac_loopback(&vsi->back->hw, ena, NULL); in ice_set_loopback()
6516 netdev_err(vsi->netdev, "Failed to toggle loopback state\n"); in ice_set_loopback()
6524 * ice_set_features - set the netdev feature flags
6531 netdev_features_t changed = netdev->features ^ features; in ice_set_features()
6533 struct ice_vsi *vsi = np->vsi; in ice_set_features()
6534 struct ice_pf *pf = vsi->back; in ice_set_features()
6540 "Device is in Safe Mode - not enabling advanced netdev features\n"); in ice_set_features()
6545 if (ice_is_reset_in_progress(pf->state)) { in ice_set_features()
6548 return -EBUSY; in ice_set_features()
6567 dev_err(ice_pf_to_dev(vsi->back), in ice_set_features()
6569 return -EIO; in ice_set_features()
6588 return -EACCES; in ice_set_features()
6594 assign_bit(ICE_FLAG_CLS_FLOWER, pf->flags, ena); in ice_set_features()
6604 * ice_vsi_vlan_setup - Setup VLAN offload properties on a PF VSI
6611 err = ice_set_vlan_offload_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6615 err = ice_set_vlan_filtering_features(vsi, vsi->netdev->features); in ice_vsi_vlan_setup()
6623 * ice_vsi_cfg_lan - Setup the VSI lan related config
6632 if (vsi->netdev && vsi->type == ICE_VSI_PF) { in ice_vsi_cfg_lan()
6633 ice_set_rx_mode(vsi->netdev); in ice_vsi_cfg_lan()
6654 * which is hard-coded to a limit of 250,000 ints/second.
6656 * by ethtool rx-usecs-high.
6695 rc = dim->priv; in ice_tx_dim_work()
6697 WARN_ON(dim->profile_ix >= ARRAY_SIZE(tx_profile)); in ice_tx_dim_work()
6700 itr = tx_profile[dim->profile_ix].itr; in ice_tx_dim_work()
6705 dim->state = DIM_START_MEASURE; in ice_tx_dim_work()
6715 rc = dim->priv; in ice_rx_dim_work()
6717 WARN_ON(dim->profile_ix >= ARRAY_SIZE(rx_profile)); in ice_rx_dim_work()
6720 itr = rx_profile[dim->profile_ix].itr; in ice_rx_dim_work()
6725 dim->state = DIM_START_MEASURE; in ice_rx_dim_work()
6731 * ice_init_moderation - set up interrupt moderation
6745 rc = &q_vector->tx; in ice_init_moderation()
6746 INIT_WORK(&rc->dim.work, ice_tx_dim_work); in ice_init_moderation()
6747 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6748 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6749 rc->dim.priv = rc; in ice_init_moderation()
6754 tx_profile[rc->dim.profile_ix].itr : rc->itr_setting); in ice_init_moderation()
6756 rc = &q_vector->rx; in ice_init_moderation()
6757 INIT_WORK(&rc->dim.work, ice_rx_dim_work); in ice_init_moderation()
6758 rc->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; in ice_init_moderation()
6759 rc->dim.profile_ix = ICE_DIM_DEFAULT_PROFILE_IX; in ice_init_moderation()
6760 rc->dim.priv = rc; in ice_init_moderation()
6764 ice_write_itr(rc, rx_dynamic ? rx_profile[rc->dim.profile_ix].itr : in ice_init_moderation()
6765 rc->itr_setting); in ice_init_moderation()
6771 * ice_napi_enable_all - Enable NAPI for all q_vectors in the VSI
6778 if (!vsi->netdev) in ice_napi_enable_all()
6782 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_enable_all()
6786 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_enable_all()
6787 napi_enable(&q_vector->napi); in ice_napi_enable_all()
6792 * ice_up_complete - Finish the last steps of bringing up a connection
6799 struct ice_pf *pf = vsi->back; in ice_up_complete()
6812 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_up_complete()
6816 if (vsi->port_info && in ice_up_complete()
6817 (vsi->port_info->phy.link_info.link_info & ICE_AQ_LINK_UP) && in ice_up_complete()
6818 ((vsi->netdev && (vsi->type == ICE_VSI_PF || in ice_up_complete()
6819 vsi->type == ICE_VSI_SF)))) { in ice_up_complete()
6821 netif_tx_start_all_queues(vsi->netdev); in ice_up_complete()
6822 netif_carrier_on(vsi->netdev); in ice_up_complete()
6831 if (vsi->type == ICE_VSI_PF) in ice_up_complete()
6838 * ice_up - Bring the connection back up after being down
6853 * ice_fetch_u64_stats_per_ring - get packets and bytes stats per ring
6876 * ice_update_vsi_tx_ring_stats - Update VSI Tx ring stats counters
6894 if (!ring || !ring->ring_stats) in ice_update_vsi_tx_ring_stats()
6896 ice_fetch_u64_stats_per_ring(&ring->ring_stats->syncp, in ice_update_vsi_tx_ring_stats()
6897 ring->ring_stats->stats, &pkts, in ice_update_vsi_tx_ring_stats()
6899 vsi_stats->tx_packets += pkts; in ice_update_vsi_tx_ring_stats()
6900 vsi_stats->tx_bytes += bytes; in ice_update_vsi_tx_ring_stats()
6901 vsi->tx_restart += ring->ring_stats->tx_stats.restart_q; in ice_update_vsi_tx_ring_stats()
6902 vsi->tx_busy += ring->ring_stats->tx_stats.tx_busy; in ice_update_vsi_tx_ring_stats()
6903 vsi->tx_linearize += ring->ring_stats->tx_stats.tx_linearize; in ice_update_vsi_tx_ring_stats()
6908 * ice_update_vsi_ring_stats - Update VSI stats counters
6915 struct ice_pf *pf = vsi->back; in ice_update_vsi_ring_stats()
6923 /* reset non-netdev (extended) stats */ in ice_update_vsi_ring_stats()
6924 vsi->tx_restart = 0; in ice_update_vsi_ring_stats()
6925 vsi->tx_busy = 0; in ice_update_vsi_ring_stats()
6926 vsi->tx_linearize = 0; in ice_update_vsi_ring_stats()
6927 vsi->rx_buf_failed = 0; in ice_update_vsi_ring_stats()
6928 vsi->rx_page_failed = 0; in ice_update_vsi_ring_stats()
6933 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->tx_rings, in ice_update_vsi_ring_stats()
6934 vsi->num_txq); in ice_update_vsi_ring_stats()
6938 struct ice_rx_ring *ring = READ_ONCE(vsi->rx_rings[i]); in ice_update_vsi_ring_stats()
6941 ring_stats = ring->ring_stats; in ice_update_vsi_ring_stats()
6942 ice_fetch_u64_stats_per_ring(&ring_stats->syncp, in ice_update_vsi_ring_stats()
6943 ring_stats->stats, &pkts, in ice_update_vsi_ring_stats()
6945 vsi_stats->rx_packets += pkts; in ice_update_vsi_ring_stats()
6946 vsi_stats->rx_bytes += bytes; in ice_update_vsi_ring_stats()
6947 vsi->rx_buf_failed += ring_stats->rx_stats.alloc_buf_failed; in ice_update_vsi_ring_stats()
6948 vsi->rx_page_failed += ring_stats->rx_stats.alloc_page_failed; in ice_update_vsi_ring_stats()
6953 ice_update_vsi_tx_ring_stats(vsi, vsi_stats, vsi->xdp_rings, in ice_update_vsi_ring_stats()
6954 vsi->num_xdp_txq); in ice_update_vsi_ring_stats()
6958 net_stats = &vsi->net_stats; in ice_update_vsi_ring_stats()
6959 stats_prev = &vsi->net_stats_prev; in ice_update_vsi_ring_stats()
6963 * diff of Prev-Cur, we need to be sure that Prev is valid. If it's not, in ice_update_vsi_ring_stats()
6966 if (likely(pf->stat_prev_loaded)) { in ice_update_vsi_ring_stats()
6967 net_stats->tx_packets += vsi_stats->tx_packets - stats_prev->tx_packets; in ice_update_vsi_ring_stats()
6968 net_stats->tx_bytes += vsi_stats->tx_bytes - stats_prev->tx_bytes; in ice_update_vsi_ring_stats()
6969 net_stats->rx_packets += vsi_stats->rx_packets - stats_prev->rx_packets; in ice_update_vsi_ring_stats()
6970 net_stats->rx_bytes += vsi_stats->rx_bytes - stats_prev->rx_bytes; in ice_update_vsi_ring_stats()
6973 stats_prev->tx_packets = vsi_stats->tx_packets; in ice_update_vsi_ring_stats()
6974 stats_prev->tx_bytes = vsi_stats->tx_bytes; in ice_update_vsi_ring_stats()
6975 stats_prev->rx_packets = vsi_stats->rx_packets; in ice_update_vsi_ring_stats()
6976 stats_prev->rx_bytes = vsi_stats->rx_bytes; in ice_update_vsi_ring_stats()
6982 * ice_update_vsi_stats - Update VSI stats counters
6987 struct rtnl_link_stats64 *cur_ns = &vsi->net_stats; in ice_update_vsi_stats()
6988 struct ice_eth_stats *cur_es = &vsi->eth_stats; in ice_update_vsi_stats()
6989 struct ice_pf *pf = vsi->back; in ice_update_vsi_stats()
6991 if (test_bit(ICE_VSI_DOWN, vsi->state) || in ice_update_vsi_stats()
6992 test_bit(ICE_CFG_BUSY, pf->state)) in ice_update_vsi_stats()
7001 cur_ns->tx_errors = cur_es->tx_errors; in ice_update_vsi_stats()
7002 cur_ns->rx_dropped = cur_es->rx_discards; in ice_update_vsi_stats()
7003 cur_ns->tx_dropped = cur_es->tx_discards; in ice_update_vsi_stats()
7004 cur_ns->multicast = cur_es->rx_multicast; in ice_update_vsi_stats()
7007 if (vsi->type == ICE_VSI_PF) { in ice_update_vsi_stats()
7008 cur_ns->rx_crc_errors = pf->stats.crc_errors; in ice_update_vsi_stats()
7009 cur_ns->rx_errors = pf->stats.crc_errors + in ice_update_vsi_stats()
7010 pf->stats.illegal_bytes + in ice_update_vsi_stats()
7011 pf->stats.rx_undersize + in ice_update_vsi_stats()
7012 pf->hw_csum_rx_error + in ice_update_vsi_stats()
7013 pf->stats.rx_jabber + in ice_update_vsi_stats()
7014 pf->stats.rx_fragments + in ice_update_vsi_stats()
7015 pf->stats.rx_oversize; in ice_update_vsi_stats()
7017 cur_ns->rx_missed_errors = pf->stats.eth.rx_discards; in ice_update_vsi_stats()
7022 * ice_update_pf_stats - Update PF port stats counters
7028 struct ice_hw *hw = &pf->hw; in ice_update_pf_stats()
7032 port = hw->port_info->lport; in ice_update_pf_stats()
7033 prev_ps = &pf->stats_prev; in ice_update_pf_stats()
7034 cur_ps = &pf->stats; in ice_update_pf_stats()
7036 if (ice_is_reset_in_progress(pf->state)) in ice_update_pf_stats()
7037 pf->stat_prev_loaded = false; in ice_update_pf_stats()
7039 ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7040 &prev_ps->eth.rx_bytes, in ice_update_pf_stats()
7041 &cur_ps->eth.rx_bytes); in ice_update_pf_stats()
7043 ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7044 &prev_ps->eth.rx_unicast, in ice_update_pf_stats()
7045 &cur_ps->eth.rx_unicast); in ice_update_pf_stats()
7047 ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7048 &prev_ps->eth.rx_multicast, in ice_update_pf_stats()
7049 &cur_ps->eth.rx_multicast); in ice_update_pf_stats()
7051 ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7052 &prev_ps->eth.rx_broadcast, in ice_update_pf_stats()
7053 &cur_ps->eth.rx_broadcast); in ice_update_pf_stats()
7055 ice_stat_update32(hw, PRTRPB_RDPC, pf->stat_prev_loaded, in ice_update_pf_stats()
7056 &prev_ps->eth.rx_discards, in ice_update_pf_stats()
7057 &cur_ps->eth.rx_discards); in ice_update_pf_stats()
7059 ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7060 &prev_ps->eth.tx_bytes, in ice_update_pf_stats()
7061 &cur_ps->eth.tx_bytes); in ice_update_pf_stats()
7063 ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7064 &prev_ps->eth.tx_unicast, in ice_update_pf_stats()
7065 &cur_ps->eth.tx_unicast); in ice_update_pf_stats()
7067 ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7068 &prev_ps->eth.tx_multicast, in ice_update_pf_stats()
7069 &cur_ps->eth.tx_multicast); in ice_update_pf_stats()
7071 ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7072 &prev_ps->eth.tx_broadcast, in ice_update_pf_stats()
7073 &cur_ps->eth.tx_broadcast); in ice_update_pf_stats()
7075 ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7076 &prev_ps->tx_dropped_link_down, in ice_update_pf_stats()
7077 &cur_ps->tx_dropped_link_down); in ice_update_pf_stats()
7079 ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7080 &prev_ps->rx_size_64, &cur_ps->rx_size_64); in ice_update_pf_stats()
7082 ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7083 &prev_ps->rx_size_127, &cur_ps->rx_size_127); in ice_update_pf_stats()
7085 ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7086 &prev_ps->rx_size_255, &cur_ps->rx_size_255); in ice_update_pf_stats()
7088 ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7089 &prev_ps->rx_size_511, &cur_ps->rx_size_511); in ice_update_pf_stats()
7091 ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7092 &prev_ps->rx_size_1023, &cur_ps->rx_size_1023); in ice_update_pf_stats()
7094 ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7095 &prev_ps->rx_size_1522, &cur_ps->rx_size_1522); in ice_update_pf_stats()
7097 ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7098 &prev_ps->rx_size_big, &cur_ps->rx_size_big); in ice_update_pf_stats()
7100 ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7101 &prev_ps->tx_size_64, &cur_ps->tx_size_64); in ice_update_pf_stats()
7103 ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7104 &prev_ps->tx_size_127, &cur_ps->tx_size_127); in ice_update_pf_stats()
7106 ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7107 &prev_ps->tx_size_255, &cur_ps->tx_size_255); in ice_update_pf_stats()
7109 ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7110 &prev_ps->tx_size_511, &cur_ps->tx_size_511); in ice_update_pf_stats()
7112 ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7113 &prev_ps->tx_size_1023, &cur_ps->tx_size_1023); in ice_update_pf_stats()
7115 ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7116 &prev_ps->tx_size_1522, &cur_ps->tx_size_1522); in ice_update_pf_stats()
7118 ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7119 &prev_ps->tx_size_big, &cur_ps->tx_size_big); in ice_update_pf_stats()
7121 fd_ctr_base = hw->fd_ctr_base; in ice_update_pf_stats()
7125 pf->stat_prev_loaded, &prev_ps->fd_sb_match, in ice_update_pf_stats()
7126 &cur_ps->fd_sb_match); in ice_update_pf_stats()
7127 ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7128 &prev_ps->link_xon_rx, &cur_ps->link_xon_rx); in ice_update_pf_stats()
7130 ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7131 &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); in ice_update_pf_stats()
7133 ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7134 &prev_ps->link_xon_tx, &cur_ps->link_xon_tx); in ice_update_pf_stats()
7136 ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7137 &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); in ice_update_pf_stats()
7141 ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7142 &prev_ps->crc_errors, &cur_ps->crc_errors); in ice_update_pf_stats()
7144 ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7145 &prev_ps->illegal_bytes, &cur_ps->illegal_bytes); in ice_update_pf_stats()
7147 ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7148 &prev_ps->mac_local_faults, in ice_update_pf_stats()
7149 &cur_ps->mac_local_faults); in ice_update_pf_stats()
7151 ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7152 &prev_ps->mac_remote_faults, in ice_update_pf_stats()
7153 &cur_ps->mac_remote_faults); in ice_update_pf_stats()
7155 ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7156 &prev_ps->rx_undersize, &cur_ps->rx_undersize); in ice_update_pf_stats()
7158 ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7159 &prev_ps->rx_fragments, &cur_ps->rx_fragments); in ice_update_pf_stats()
7161 ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7162 &prev_ps->rx_oversize, &cur_ps->rx_oversize); in ice_update_pf_stats()
7164 ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded, in ice_update_pf_stats()
7165 &prev_ps->rx_jabber, &cur_ps->rx_jabber); in ice_update_pf_stats()
7167 cur_ps->fd_sb_status = test_bit(ICE_FLAG_FD_ENA, pf->flags) ? 1 : 0; in ice_update_pf_stats()
7169 pf->stat_prev_loaded = true; in ice_update_pf_stats()
7173 * ice_get_stats64 - get statistics for network device structure
7181 struct ice_vsi *vsi = np->vsi; in ice_get_stats64()
7183 vsi_stats = &vsi->net_stats; in ice_get_stats64()
7185 if (!vsi->num_txq || !vsi->num_rxq) in ice_get_stats64()
7193 if (!test_bit(ICE_VSI_DOWN, vsi->state)) in ice_get_stats64()
7195 stats->tx_packets = vsi_stats->tx_packets; in ice_get_stats64()
7196 stats->tx_bytes = vsi_stats->tx_bytes; in ice_get_stats64()
7197 stats->rx_packets = vsi_stats->rx_packets; in ice_get_stats64()
7198 stats->rx_bytes = vsi_stats->rx_bytes; in ice_get_stats64()
7204 stats->multicast = vsi_stats->multicast; in ice_get_stats64()
7205 stats->tx_errors = vsi_stats->tx_errors; in ice_get_stats64()
7206 stats->tx_dropped = vsi_stats->tx_dropped; in ice_get_stats64()
7207 stats->rx_errors = vsi_stats->rx_errors; in ice_get_stats64()
7208 stats->rx_dropped = vsi_stats->rx_dropped; in ice_get_stats64()
7209 stats->rx_crc_errors = vsi_stats->rx_crc_errors; in ice_get_stats64()
7210 stats->rx_length_errors = vsi_stats->rx_length_errors; in ice_get_stats64()
7214 * ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
7221 if (!vsi->netdev) in ice_napi_disable_all()
7225 struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; in ice_napi_disable_all()
7227 if (q_vector->rx.rx_ring || q_vector->tx.tx_ring) in ice_napi_disable_all()
7228 napi_disable(&q_vector->napi); in ice_napi_disable_all()
7230 cancel_work_sync(&q_vector->tx.dim.work); in ice_napi_disable_all()
7231 cancel_work_sync(&q_vector->rx.dim.work); in ice_napi_disable_all()
7236 * ice_vsi_dis_irq - Mask off queue interrupt generation on the VSI
7237 * @vsi: the VSI being un-configured
7241 struct ice_pf *pf = vsi->back; in ice_vsi_dis_irq()
7242 struct ice_hw *hw = &pf->hw; in ice_vsi_dis_irq()
7249 if (vsi->rx_rings) { in ice_vsi_dis_irq()
7251 if (vsi->rx_rings[i]) { in ice_vsi_dis_irq()
7254 reg = vsi->rx_rings[i]->reg_idx; in ice_vsi_dis_irq()
7264 if (!vsi->q_vectors[i]) in ice_vsi_dis_irq()
7266 wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0); in ice_vsi_dis_irq()
7272 if (vsi->type == ICE_VSI_VF) in ice_vsi_dis_irq()
7276 synchronize_irq(vsi->q_vectors[i]->irq.virq); in ice_vsi_dis_irq()
7280 * ice_down - Shutdown the connection
7283 * Caller of this function is expected to set the vsi->state ICE_DOWN bit
7289 WARN_ON(!test_bit(ICE_VSI_DOWN, vsi->state)); in ice_down()
7291 if (vsi->netdev) { in ice_down()
7293 ice_ptp_link_change(vsi->back, false); in ice_down()
7294 netif_carrier_off(vsi->netdev); in ice_down()
7295 netif_tx_disable(vsi->netdev); in ice_down()
7302 netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n", in ice_down()
7303 vsi->vsi_num, tx_err); in ice_down()
7304 if (!tx_err && vsi->xdp_rings) { in ice_down()
7307 netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n", in ice_down()
7308 vsi->vsi_num, tx_err); in ice_down()
7313 netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n", in ice_down()
7314 vsi->vsi_num, rx_err); in ice_down()
7319 ice_clean_tx_ring(vsi->tx_rings[i]); in ice_down()
7321 if (vsi->xdp_rings) in ice_down()
7323 ice_clean_tx_ring(vsi->xdp_rings[i]); in ice_down()
7326 ice_clean_rx_ring(vsi->rx_rings[i]); in ice_down()
7329 netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n", in ice_down()
7330 vsi->vsi_num, vsi->vsw->sw_id); in ice_down()
7331 return -EIO; in ice_down()
7338 * ice_down_up - shutdown the VSI connection and bring it up
7346 if (test_and_set_bit(ICE_VSI_DOWN, vsi->state)) in ice_down_up()
7355 …netdev_err(vsi->netdev, "reallocating resources failed during netdev features change, may need to … in ice_down_up()
7363 * ice_vsi_setup_tx_rings - Allocate VSI Tx queue resources
7372 if (!vsi->num_txq) { in ice_vsi_setup_tx_rings()
7373 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n", in ice_vsi_setup_tx_rings()
7374 vsi->vsi_num); in ice_vsi_setup_tx_rings()
7375 return -EINVAL; in ice_vsi_setup_tx_rings()
7379 struct ice_tx_ring *ring = vsi->tx_rings[i]; in ice_vsi_setup_tx_rings()
7382 return -EINVAL; in ice_vsi_setup_tx_rings()
7384 if (vsi->netdev) in ice_vsi_setup_tx_rings()
7385 ring->netdev = vsi->netdev; in ice_vsi_setup_tx_rings()
7395 * ice_vsi_setup_rx_rings - Allocate VSI Rx queue resources
7404 if (!vsi->num_rxq) { in ice_vsi_setup_rx_rings()
7405 dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n", in ice_vsi_setup_rx_rings()
7406 vsi->vsi_num); in ice_vsi_setup_rx_rings()
7407 return -EINVAL; in ice_vsi_setup_rx_rings()
7411 struct ice_rx_ring *ring = vsi->rx_rings[i]; in ice_vsi_setup_rx_rings()
7414 return -EINVAL; in ice_vsi_setup_rx_rings()
7416 if (vsi->netdev) in ice_vsi_setup_rx_rings()
7417 ring->netdev = vsi->netdev; in ice_vsi_setup_rx_rings()
7427 * ice_vsi_open_ctrl - open control VSI for use
7437 struct ice_pf *pf = vsi->back; in ice_vsi_open_ctrl()
7455 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:ctrl", in ice_vsi_open_ctrl()
7467 clear_bit(ICE_VSI_DOWN, vsi->state); in ice_vsi_open_ctrl()
7483 * ice_vsi_open - Called when a network interface is made active
7493 struct ice_pf *pf = vsi->back; in ice_vsi_open()
7509 snprintf(int_name, sizeof(int_name) - 1, "%s-%s", in ice_vsi_open()
7510 dev_driver_string(ice_pf_to_dev(pf)), vsi->netdev->name); in ice_vsi_open()
7515 ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc); in ice_vsi_open()
7517 if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_SF) { in ice_vsi_open()
7519 err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_txq); in ice_vsi_open()
7523 err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_rxq); in ice_vsi_open()
7549 * ice_vsi_release_all - Delete all VSIs
7556 if (!pf->vsi) in ice_vsi_release_all()
7560 if (!pf->vsi[i]) in ice_vsi_release_all()
7563 if (pf->vsi[i]->type == ICE_VSI_CHNL) in ice_vsi_release_all()
7566 err = ice_vsi_release(pf->vsi[i]); in ice_vsi_release_all()
7568 dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n", in ice_vsi_release_all()
7569 i, err, pf->vsi[i]->vsi_num); in ice_vsi_release_all()
7574 * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
7578 * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
7586 struct ice_vsi *vsi = pf->vsi[i]; in ice_vsi_rebuild_by_type()
7588 if (!vsi || vsi->type != type) in ice_vsi_rebuild_by_type()
7595 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7600 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7603 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7607 /* Re-map HW VSI number, using VSI handle that has been in ice_vsi_rebuild_by_type()
7610 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_vsi_rebuild_by_type()
7616 err, vsi->idx, ice_vsi_type_str(type)); in ice_vsi_rebuild_by_type()
7620 dev_info(dev, "VSI rebuilt. VSI index %d, type %s\n", vsi->idx, in ice_vsi_rebuild_by_type()
7628 * ice_update_pf_netdev_link - Update PF netdev link status
7637 struct ice_vsi *vsi = pf->vsi[i]; in ice_update_pf_netdev_link()
7639 if (!vsi || vsi->type != ICE_VSI_PF) in ice_update_pf_netdev_link()
7642 ice_get_link_status(pf->vsi[i]->port_info, &link_up); in ice_update_pf_netdev_link()
7644 netif_carrier_on(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7645 netif_tx_wake_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7647 netif_carrier_off(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7648 netif_tx_stop_all_queues(pf->vsi[i]->netdev); in ice_update_pf_netdev_link()
7654 * ice_rebuild - rebuild after reset
7667 struct ice_hw *hw = &pf->hw; in ice_rebuild()
7671 if (test_bit(ICE_DOWN, pf->state)) in ice_rebuild()
7682 pf->fw_emp_reset_disabled = false; in ice_rebuild()
7731 err = ice_aq_set_port_params(pf->hw.port_info, dvm, NULL); in ice_rebuild()
7735 err = ice_sched_init_port(hw->port_info); in ice_rebuild()
7746 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7751 guar = hw->func_caps.fd_fltr_guar; in ice_rebuild()
7752 b_effort = hw->func_caps.fd_fltr_best_effort; in ice_rebuild()
7761 if (test_bit(ICE_FLAG_DCB_ENA, pf->flags)) in ice_rebuild()
7768 if (test_bit(ICE_FLAG_PTP_SUPPORTED, pf->flags)) in ice_rebuild()
7791 if (test_bit(ICE_FLAG_FD_ENA, pf->flags)) { in ice_rebuild()
7799 if (hw->fdir_prof) in ice_rebuild()
7808 if (vsi && vsi->netdev) in ice_rebuild()
7809 netif_device_attach(vsi->netdev); in ice_rebuild()
7824 clear_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7841 set_bit(ICE_RESET_FAILED, pf->state); in ice_rebuild()
7844 set_bit(ICE_NEEDS_RESTART, pf->state); in ice_rebuild()
7849 * ice_change_mtu - NDO callback to change the MTU
7858 struct ice_vsi *vsi = np->vsi; in ice_change_mtu()
7859 struct ice_pf *pf = vsi->back; in ice_change_mtu()
7864 if (new_mtu == (int)netdev->mtu) { in ice_change_mtu()
7865 netdev_warn(netdev, "MTU is already %u\n", netdev->mtu); in ice_change_mtu()
7869 prog = vsi->xdp_prog; in ice_change_mtu()
7870 if (prog && !prog->aux->xdp_has_frags) { in ice_change_mtu()
7875 frame_size - ICE_ETH_PKT_HDR_PAD); in ice_change_mtu()
7876 return -EINVAL; in ice_change_mtu()
7878 } else if (test_bit(ICE_FLAG_LEGACY_RX, pf->flags)) { in ice_change_mtu()
7880 netdev_err(netdev, "Too big MTU for legacy-rx; Max is %d\n", in ice_change_mtu()
7881 ICE_MAX_FRAME_LEGACY_RX - ICE_ETH_PKT_HDR_PAD); in ice_change_mtu()
7882 return -EINVAL; in ice_change_mtu()
7888 if (ice_is_reset_in_progress(pf->state)) { in ice_change_mtu()
7899 return -EBUSY; in ice_change_mtu()
7902 WRITE_ONCE(netdev->mtu, (unsigned int)new_mtu); in ice_change_mtu()
7908 set_bit(ICE_FLAG_MTU_CHANGED, pf->flags); in ice_change_mtu()
7914 * ice_eth_ioctl - Access the hwtstamp interface
7922 struct ice_pf *pf = np->vsi->back; in ice_eth_ioctl()
7930 return -EOPNOTSUPP; in ice_eth_ioctl()
7935 * ice_aq_str - convert AQ err code to a string
7977 * ice_set_rss_lut - Set RSS LUT
7987 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_lut()
7991 return -EINVAL; in ice_set_rss_lut()
7993 params.vsi_handle = vsi->idx; in ice_set_rss_lut()
7995 params.lut_type = vsi->rss_lut_type; in ice_set_rss_lut()
8000 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS lut, err %d aq_err %s\n", in ice_set_rss_lut()
8001 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_lut()
8007 * ice_set_rss_key - Set RSS key
8015 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_key()
8019 return -EINVAL; in ice_set_rss_key()
8021 status = ice_aq_set_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_set_rss_key()
8023 dev_err(ice_pf_to_dev(vsi->back), "Cannot set RSS key, err %d aq_err %s\n", in ice_set_rss_key()
8024 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_set_rss_key()
8030 * ice_get_rss_lut - Get RSS LUT
8040 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_lut()
8044 return -EINVAL; in ice_get_rss_lut()
8046 params.vsi_handle = vsi->idx; in ice_get_rss_lut()
8048 params.lut_type = vsi->rss_lut_type; in ice_get_rss_lut()
8053 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS lut, err %d aq_err %s\n", in ice_get_rss_lut()
8054 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_lut()
8060 * ice_get_rss_key - Get RSS key
8068 struct ice_hw *hw = &vsi->back->hw; in ice_get_rss_key()
8072 return -EINVAL; in ice_get_rss_key()
8074 status = ice_aq_get_rss_key(hw, vsi->idx, (struct ice_aqc_get_set_rss_keys *)seed); in ice_get_rss_key()
8076 dev_err(ice_pf_to_dev(vsi->back), "Cannot get RSS key, err %d aq_err %s\n", in ice_get_rss_key()
8077 status, ice_aq_str(hw->adminq.sq_last_status)); in ice_get_rss_key()
8083 * ice_set_rss_hfunc - Set RSS HASH function
8091 struct ice_hw *hw = &vsi->back->hw; in ice_set_rss_hfunc()
8096 if (hfunc == vsi->rss_hfunc) in ice_set_rss_hfunc()
8101 return -EOPNOTSUPP; in ice_set_rss_hfunc()
8105 return -ENOMEM; in ice_set_rss_hfunc()
8107 ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_Q_OPT_VALID); in ice_set_rss_hfunc()
8108 ctx->info.q_opt_rss = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8109 ctx->info.q_opt_rss &= ~ICE_AQ_VSI_Q_OPT_RSS_HASH_M; in ice_set_rss_hfunc()
8110 ctx->info.q_opt_rss |= in ice_set_rss_hfunc()
8112 ctx->info.q_opt_tc = vsi->info.q_opt_tc; in ice_set_rss_hfunc()
8113 ctx->info.q_opt_flags = vsi->info.q_opt_rss; in ice_set_rss_hfunc()
8115 err = ice_update_vsi(hw, vsi->idx, ctx, NULL); in ice_set_rss_hfunc()
8117 dev_err(ice_pf_to_dev(vsi->back), "Failed to configure RSS hash for VSI %d, error %d\n", in ice_set_rss_hfunc()
8118 vsi->vsi_num, err); in ice_set_rss_hfunc()
8120 vsi->info.q_opt_rss = ctx->info.q_opt_rss; in ice_set_rss_hfunc()
8121 vsi->rss_hfunc = hfunc; in ice_set_rss_hfunc()
8122 netdev_info(vsi->netdev, "Hash function set to: %sToeplitz\n", in ice_set_rss_hfunc()
8136 * ice_bridge_getlink - Get the hardware bridge mode
8151 struct ice_vsi *vsi = np->vsi; in ice_bridge_getlink()
8152 struct ice_pf *pf = vsi->back; in ice_bridge_getlink()
8155 bmode = pf->first_sw->bridge_mode; in ice_bridge_getlink()
8162 * ice_vsi_update_bridge_mode - Update VSI for switching bridge mode (VEB/VEPA)
8171 struct ice_hw *hw = &vsi->back->hw; in ice_vsi_update_bridge_mode()
8175 vsi_props = &vsi->info; in ice_vsi_update_bridge_mode()
8179 return -ENOMEM; in ice_vsi_update_bridge_mode()
8181 ctxt->info = vsi->info; in ice_vsi_update_bridge_mode()
8185 ctxt->info.sw_flags |= ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
8188 ctxt->info.sw_flags &= ~ICE_AQ_VSI_SW_FLAG_ALLOW_LB; in ice_vsi_update_bridge_mode()
8189 ctxt->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_SW_VALID); in ice_vsi_update_bridge_mode()
8191 ret = ice_update_vsi(hw, vsi->idx, ctxt, NULL); in ice_vsi_update_bridge_mode()
8193 …dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %s\… in ice_vsi_update_bridge_mode()
8194 bmode, ret, ice_aq_str(hw->adminq.sq_last_status)); in ice_vsi_update_bridge_mode()
8198 vsi_props->sw_flags = ctxt->info.sw_flags; in ice_vsi_update_bridge_mode()
8206 * ice_bridge_setlink - Set the hardware bridge mode
8223 struct ice_pf *pf = np->vsi->back; in ice_bridge_setlink()
8225 struct ice_hw *hw = &pf->hw; in ice_bridge_setlink()
8229 pf_sw = pf->first_sw; in ice_bridge_setlink()
8233 return -EINVAL; in ice_bridge_setlink()
8239 return -EINVAL; in ice_bridge_setlink()
8241 if (mode == pf_sw->bridge_mode) in ice_bridge_setlink()
8247 if (!pf->vsi[v]) in ice_bridge_setlink()
8249 err = ice_vsi_update_bridge_mode(pf->vsi[v], mode); in ice_bridge_setlink()
8254 hw->evb_veb = (mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
8262 ice_aq_str(hw->adminq.sq_last_status)); in ice_bridge_setlink()
8263 /* revert hw->evb_veb */ in ice_bridge_setlink()
8264 hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); in ice_bridge_setlink()
8268 pf_sw->bridge_mode = mode; in ice_bridge_setlink()
8275 * ice_tx_timeout - Respond to a Tx Hang
8283 struct ice_vsi *vsi = np->vsi; in ice_tx_timeout()
8284 struct ice_pf *pf = vsi->back; in ice_tx_timeout()
8287 pf->tx_timeout_count++; in ice_tx_timeout()
8301 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) in ice_tx_timeout()
8302 if (txqueue == vsi->tx_rings[i]->q_index) { in ice_tx_timeout()
8303 tx_ring = vsi->tx_rings[i]; in ice_tx_timeout()
8310 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ * 20))) in ice_tx_timeout()
8311 pf->tx_timeout_recovery_level = 1; in ice_tx_timeout()
8312 else if (time_before(jiffies, (pf->tx_timeout_last_recovery + in ice_tx_timeout()
8313 netdev->watchdog_timeo))) in ice_tx_timeout()
8317 struct ice_hw *hw = &pf->hw; in ice_tx_timeout()
8321 rd32(hw, QTX_COMM_HEAD(vsi->txq_map[txqueue]))); in ice_tx_timeout()
8323 intr = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->reg_idx)); in ice_tx_timeout()
8326 vsi->vsi_num, txqueue, tx_ring->next_to_clean, in ice_tx_timeout()
8327 head, tx_ring->next_to_use, intr); in ice_tx_timeout()
8329 ice_prep_tx_hang_report(pf, tx_ring, vsi->vsi_num, head, intr); in ice_tx_timeout()
8332 pf->tx_timeout_last_recovery = jiffies; in ice_tx_timeout()
8334 pf->tx_timeout_recovery_level, txqueue); in ice_tx_timeout()
8336 switch (pf->tx_timeout_recovery_level) { in ice_tx_timeout()
8338 set_bit(ICE_PFR_REQ, pf->state); in ice_tx_timeout()
8341 set_bit(ICE_CORER_REQ, pf->state); in ice_tx_timeout()
8344 set_bit(ICE_GLOBR_REQ, pf->state); in ice_tx_timeout()
8348 set_bit(ICE_DOWN, pf->state); in ice_tx_timeout()
8349 set_bit(ICE_VSI_NEEDS_RESTART, vsi->state); in ice_tx_timeout()
8350 set_bit(ICE_SERVICE_DIS, pf->state); in ice_tx_timeout()
8355 pf->tx_timeout_recovery_level++; in ice_tx_timeout()
8359 * ice_setup_tc_cls_flower - flower classifier offloads
8369 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_cls_flower()
8371 if (cls_flower->common.chain_index) in ice_setup_tc_cls_flower()
8372 return -EOPNOTSUPP; in ice_setup_tc_cls_flower()
8374 switch (cls_flower->command) { in ice_setup_tc_cls_flower()
8380 return -EINVAL; in ice_setup_tc_cls_flower()
8385 * ice_setup_tc_block_cb - callback handler registered for TC block
8397 return ice_setup_tc_cls_flower(np, np->vsi->netdev, in ice_setup_tc_block_cb()
8400 return -EOPNOTSUPP; in ice_setup_tc_block_cb()
8405 * ice_validate_mqprio_qopt - Validate TCF input parameters
8418 struct ice_pf *pf = vsi->back; in ice_validate_mqprio_qopt()
8425 if (vsi->type != ICE_VSI_PF) in ice_validate_mqprio_qopt()
8426 return -EINVAL; in ice_validate_mqprio_qopt()
8428 if (mqprio_qopt->qopt.offset[0] != 0 || in ice_validate_mqprio_qopt()
8429 mqprio_qopt->qopt.num_tc < 1 || in ice_validate_mqprio_qopt()
8430 mqprio_qopt->qopt.num_tc > ICE_CHNL_MAX_TC) in ice_validate_mqprio_qopt()
8431 return -EINVAL; in ice_validate_mqprio_qopt()
8434 vsi->ch_rss_size = 0; in ice_validate_mqprio_qopt()
8435 num_tc = mqprio_qopt->qopt.num_tc; in ice_validate_mqprio_qopt()
8439 int qcount = mqprio_qopt->qopt.count[i]; in ice_validate_mqprio_qopt()
8443 return -EINVAL; in ice_validate_mqprio_qopt()
8450 return -EINVAL; in ice_validate_mqprio_qopt()
8459 return -EINVAL; in ice_validate_mqprio_qopt()
8464 return -EINVAL; in ice_validate_mqprio_qopt()
8475 max_rate = mqprio_qopt->max_rate[i]; in ice_validate_mqprio_qopt()
8479 min_rate = mqprio_qopt->min_rate[i]; in ice_validate_mqprio_qopt()
8486 return -EINVAL; in ice_validate_mqprio_qopt()
8492 return -EINVAL; in ice_validate_mqprio_qopt()
8499 return -EINVAL; in ice_validate_mqprio_qopt()
8506 return -EINVAL; in ice_validate_mqprio_qopt()
8516 return -EINVAL; in ice_validate_mqprio_qopt()
8519 if (i >= mqprio_qopt->qopt.num_tc - 1) in ice_validate_mqprio_qopt()
8521 if (mqprio_qopt->qopt.offset[i + 1] != in ice_validate_mqprio_qopt()
8522 (mqprio_qopt->qopt.offset[i] + qcount)) in ice_validate_mqprio_qopt()
8523 return -EINVAL; in ice_validate_mqprio_qopt()
8525 if (vsi->num_rxq < in ice_validate_mqprio_qopt()
8526 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
8527 return -EINVAL; in ice_validate_mqprio_qopt()
8528 if (vsi->num_txq < in ice_validate_mqprio_qopt()
8529 (mqprio_qopt->qopt.offset[i] + mqprio_qopt->qopt.count[i])) in ice_validate_mqprio_qopt()
8530 return -EINVAL; in ice_validate_mqprio_qopt()
8535 return -EINVAL; in ice_validate_mqprio_qopt()
8538 /* make sure vsi->ch_rss_size is set correctly based on TC's qcount */ in ice_validate_mqprio_qopt()
8539 vsi->ch_rss_size = max_rss_q_cnt; in ice_validate_mqprio_qopt()
8545 * ice_add_vsi_to_fdir - add a VSI to the flow director group for PF
8556 if (!(vsi->num_gfltr || vsi->num_bfltr)) in ice_add_vsi_to_fdir()
8557 return -EINVAL; in ice_add_vsi_to_fdir()
8559 hw = &pf->hw; in ice_add_vsi_to_fdir()
8565 if (!(hw->fdir_prof && hw->fdir_prof[flow] && in ice_add_vsi_to_fdir()
8566 hw->fdir_prof[flow]->cnt)) in ice_add_vsi_to_fdir()
8574 prof = hw->fdir_prof[flow]; in ice_add_vsi_to_fdir()
8576 prof->prof_id[tun], in ice_add_vsi_to_fdir()
8577 prof->vsi_h[0], vsi->idx, in ice_add_vsi_to_fdir()
8578 prio, prof->fdir_seg[tun], in ice_add_vsi_to_fdir()
8582 vsi->idx, flow); in ice_add_vsi_to_fdir()
8586 prof->entry_h[prof->cnt][tun] = entry_h; in ice_add_vsi_to_fdir()
8590 prof->vsi_h[prof->cnt] = vsi->idx; in ice_add_vsi_to_fdir()
8591 prof->cnt++; in ice_add_vsi_to_fdir()
8594 dev_dbg(dev, "VSI idx %d added to fdir group %d\n", vsi->idx, in ice_add_vsi_to_fdir()
8599 dev_dbg(dev, "VSI idx %d not added to fdir groups\n", vsi->idx); in ice_add_vsi_to_fdir()
8605 * ice_add_channel - add a channel by adding VSI
8617 if (ch->type != ICE_VSI_CHNL) { in ice_add_channel()
8618 dev_err(dev, "add new VSI failed, ch->type %d\n", ch->type); in ice_add_channel()
8619 return -EINVAL; in ice_add_channel()
8622 vsi = ice_chnl_vsi_setup(pf, pf->hw.port_info, ch); in ice_add_channel()
8623 if (!vsi || vsi->type != ICE_VSI_CHNL) { in ice_add_channel()
8625 return -EINVAL; in ice_add_channel()
8630 ch->sw_id = sw_id; in ice_add_channel()
8631 ch->vsi_num = vsi->vsi_num; in ice_add_channel()
8632 ch->info.mapping_flags = vsi->info.mapping_flags; in ice_add_channel()
8633 ch->ch_vsi = vsi; in ice_add_channel()
8635 vsi->ch = ch; in ice_add_channel()
8637 memcpy(&ch->info.q_mapping, &vsi->info.q_mapping, in ice_add_channel()
8638 sizeof(vsi->info.q_mapping)); in ice_add_channel()
8639 memcpy(&ch->info.tc_mapping, vsi->info.tc_mapping, in ice_add_channel()
8640 sizeof(vsi->info.tc_mapping)); in ice_add_channel()
8656 for (i = 0; i < ch->num_txq; i++) { in ice_chnl_cfg_res()
8662 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8663 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_chnl_cfg_res()
8668 tx_ring->ch = ch; in ice_chnl_cfg_res()
8669 rx_ring->ch = ch; in ice_chnl_cfg_res()
8672 tx_q_vector = tx_ring->q_vector; in ice_chnl_cfg_res()
8673 rx_q_vector = rx_ring->q_vector; in ice_chnl_cfg_res()
8678 tx_q_vector->ch = ch; in ice_chnl_cfg_res()
8680 rc = &tx_q_vector->tx; in ice_chnl_cfg_res()
8682 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8685 rx_q_vector->ch = ch; in ice_chnl_cfg_res()
8687 rc = &rx_q_vector->rx; in ice_chnl_cfg_res()
8689 ice_write_itr(rc, rc->itr_setting); in ice_chnl_cfg_res()
8693 /* it is safe to assume that, if channel has non-zero num_t[r]xq, then in ice_chnl_cfg_res()
8694 * GLINT_ITR register would have written to perform in-context in ice_chnl_cfg_res()
8697 if (ch->num_txq || ch->num_rxq) in ice_chnl_cfg_res()
8698 ice_flush(&vsi->back->hw); in ice_chnl_cfg_res()
8702 * ice_cfg_chnl_all_res - configure channel resources
8706 * This function configures channel specific resources such as flow-director
8719 * ice_setup_hw_channel - setup new channel
8736 ch->base_q = vsi->next_base_q; in ice_setup_hw_channel()
8737 ch->type = type; in ice_setup_hw_channel()
8751 vsi->next_base_q = vsi->next_base_q + ch->num_rxq; in ice_setup_hw_channel()
8752 dev_dbg(dev, "added channel: vsi_num %u, num_rxq %u\n", ch->vsi_num, in ice_setup_hw_channel()
8753 ch->num_rxq); in ice_setup_hw_channel()
8759 * ice_setup_channel - setup new channel using uplink element
8775 if (vsi->type != ICE_VSI_PF) { in ice_setup_channel()
8776 dev_err(dev, "unsupported parent VSI type(%d)\n", vsi->type); in ice_setup_channel()
8780 sw_id = pf->first_sw->sw_id; in ice_setup_channel()
8790 return ch->ch_vsi ? true : false; in ice_setup_channel()
8794 * ice_set_bw_limit - setup BW limit for Tx traffic based on max_tx_rate
8812 * ice_create_q_channel - function to create channel
8821 struct ice_pf *pf = vsi->back; in ice_create_q_channel()
8825 return -EINVAL; in ice_create_q_channel()
8828 if (!ch->num_txq || !ch->num_rxq) { in ice_create_q_channel()
8829 dev_err(dev, "Invalid num_queues requested: %d\n", ch->num_rxq); in ice_create_q_channel()
8830 return -EINVAL; in ice_create_q_channel()
8833 if (!vsi->cnt_q_avail || vsi->cnt_q_avail < ch->num_txq) { in ice_create_q_channel()
8835 vsi->cnt_q_avail, ch->num_txq); in ice_create_q_channel()
8836 return -EINVAL; in ice_create_q_channel()
8841 return -EINVAL; in ice_create_q_channel()
8844 if (ch->ch_vsi && (ch->max_tx_rate || ch->min_tx_rate)) { in ice_create_q_channel()
8847 ret = ice_set_bw_limit(ch->ch_vsi, ch->max_tx_rate, in ice_create_q_channel()
8848 ch->min_tx_rate); in ice_create_q_channel()
8851 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8854 ch->max_tx_rate, ch->ch_vsi->vsi_num); in ice_create_q_channel()
8857 vsi->cnt_q_avail -= ch->num_txq; in ice_create_q_channel()
8863 * ice_rem_all_chnl_fltrs - removes all channel filters
8864 * @pf: ptr to PF, TC-flower based filter are tracked at PF level
8867 * tc-flower based filter
8876 &pf->tc_flower_fltr_list, in ice_rem_all_chnl_fltrs()
8885 rule.rid = fltr->rid; in ice_rem_all_chnl_fltrs()
8886 rule.rule_id = fltr->rule_id; in ice_rem_all_chnl_fltrs()
8887 rule.vsi_handle = fltr->dest_vsi_handle; in ice_rem_all_chnl_fltrs()
8888 status = ice_rem_adv_rule_by_id(&pf->hw, &rule); in ice_rem_all_chnl_fltrs()
8890 if (status == -ENOENT) in ice_rem_all_chnl_fltrs()
8896 } else if (fltr->dest_vsi) { in ice_rem_all_chnl_fltrs()
8898 if (fltr->dest_vsi->type == ICE_VSI_CHNL) { in ice_rem_all_chnl_fltrs()
8899 u32 flags = fltr->flags; in ice_rem_all_chnl_fltrs()
8901 fltr->dest_vsi->num_chnl_fltr--; in ice_rem_all_chnl_fltrs()
8904 pf->num_dmac_chnl_fltrs--; in ice_rem_all_chnl_fltrs()
8908 hlist_del(&fltr->tc_flower_node); in ice_rem_all_chnl_fltrs()
8914 * ice_remove_q_channels - Remove queue channels for the TCs
8923 struct ice_pf *pf = vsi->back; in ice_remove_q_channels()
8926 /* remove all tc-flower based filter if they are channel filters only */ in ice_remove_q_channels()
8931 if (vsi->netdev->features & NETIF_F_NTUPLE) { in ice_remove_q_channels()
8932 struct ice_hw *hw = &pf->hw; in ice_remove_q_channels()
8934 mutex_lock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8936 mutex_unlock(&hw->fdir_fltr_lock); in ice_remove_q_channels()
8940 list_for_each_entry_safe(ch, ch_tmp, &vsi->ch_list, list) { in ice_remove_q_channels()
8943 list_del(&ch->list); in ice_remove_q_channels()
8944 ch_vsi = ch->ch_vsi; in ice_remove_q_channels()
8951 for (i = 0; i < ch->num_rxq; i++) { in ice_remove_q_channels()
8955 tx_ring = vsi->tx_rings[ch->base_q + i]; in ice_remove_q_channels()
8956 rx_ring = vsi->rx_rings[ch->base_q + i]; in ice_remove_q_channels()
8958 tx_ring->ch = NULL; in ice_remove_q_channels()
8959 if (tx_ring->q_vector) in ice_remove_q_channels()
8960 tx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8963 rx_ring->ch = NULL; in ice_remove_q_channels()
8964 if (rx_ring->q_vector) in ice_remove_q_channels()
8965 rx_ring->q_vector->ch = NULL; in ice_remove_q_channels()
8970 ice_fdir_rem_adq_chnl(&pf->hw, ch->ch_vsi->idx); in ice_remove_q_channels()
8973 ice_rm_vsi_lan_cfg(ch->ch_vsi->port_info, ch->ch_vsi->idx); in ice_remove_q_channels()
8976 ice_vsi_delete(ch->ch_vsi); in ice_remove_q_channels()
8984 vsi->tc_map_vsi[i] = NULL; in ice_remove_q_channels()
8987 vsi->all_enatc = 0; in ice_remove_q_channels()
8988 vsi->all_numtc = 0; in ice_remove_q_channels()
8992 * ice_rebuild_channels - rebuild channel
9011 if (!test_bit(ICE_FLAG_TC_MQPRIO, pf->flags) || in ice_rebuild_channels()
9012 main_vsi->old_numtc == 1) in ice_rebuild_channels()
9018 err = ice_vsi_cfg_tc(main_vsi, main_vsi->old_ena_tc); in ice_rebuild_channels()
9021 main_vsi->old_ena_tc, main_vsi->vsi_num); in ice_rebuild_channels()
9029 vsi = pf->vsi[i]; in ice_rebuild_channels()
9030 if (!vsi || vsi->type != ICE_VSI_CHNL) in ice_rebuild_channels()
9033 type = vsi->type; in ice_rebuild_channels()
9039 ice_vsi_type_str(type), vsi->idx, err); in ice_rebuild_channels()
9043 /* Re-map HW VSI number, using VSI handle that has been in ice_rebuild_channels()
9046 vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx); in ice_rebuild_channels()
9049 err = ice_replay_vsi(&pf->hw, vsi->idx); in ice_rebuild_channels()
9052 ice_vsi_type_str(type), err, vsi->idx); in ice_rebuild_channels()
9057 ice_vsi_type_str(type), vsi->idx); in ice_rebuild_channels()
9062 main_vsi->tc_map_vsi[tc_idx++] = vsi; in ice_rebuild_channels()
9068 list_for_each_entry(ch, &main_vsi->ch_list, list) { in ice_rebuild_channels()
9071 ch_vsi = ch->ch_vsi; in ice_rebuild_channels()
9078 /* replay BW rate limit if it is non-zero */ in ice_rebuild_channels()
9079 if (!ch->max_tx_rate && !ch->min_tx_rate) in ice_rebuild_channels()
9082 err = ice_set_bw_limit(ch_vsi, ch->max_tx_rate, in ice_rebuild_channels()
9083 ch->min_tx_rate); in ice_rebuild_channels()
9086 err, ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
9087 ch_vsi->vsi_num); in ice_rebuild_channels()
9090 ch->max_tx_rate, ch->min_tx_rate, in ice_rebuild_channels()
9091 ch_vsi->vsi_num); in ice_rebuild_channels()
9095 if (main_vsi->ch_rss_size) in ice_rebuild_channels()
9106 * ice_create_q_channels - Add queue channel for the given TCs
9113 struct ice_pf *pf = vsi->back; in ice_create_q_channels()
9118 if (!(vsi->all_enatc & BIT(i))) in ice_create_q_channels()
9123 ret = -ENOMEM; in ice_create_q_channels()
9126 INIT_LIST_HEAD(&ch->list); in ice_create_q_channels()
9127 ch->num_rxq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9128 ch->num_txq = vsi->mqprio_qopt.qopt.count[i]; in ice_create_q_channels()
9129 ch->base_q = vsi->mqprio_qopt.qopt.offset[i]; in ice_create_q_channels()
9130 ch->max_tx_rate = vsi->mqprio_qopt.max_rate[i]; in ice_create_q_channels()
9131 ch->min_tx_rate = vsi->mqprio_qopt.min_rate[i]; in ice_create_q_channels()
9134 if (ch->max_tx_rate) in ice_create_q_channels()
9135 ch->max_tx_rate = div_u64(ch->max_tx_rate, in ice_create_q_channels()
9137 if (ch->min_tx_rate) in ice_create_q_channels()
9138 ch->min_tx_rate = div_u64(ch->min_tx_rate, in ice_create_q_channels()
9148 list_add_tail(&ch->list, &vsi->ch_list); in ice_create_q_channels()
9149 vsi->tc_map_vsi[i] = ch->ch_vsi; in ice_create_q_channels()
9151 "successfully created channel: VSI %pK\n", ch->ch_vsi); in ice_create_q_channels()
9162 * ice_setup_tc_mqprio_qdisc - configure multiple traffic classes
9170 struct ice_vsi *vsi = np->vsi; in ice_setup_tc_mqprio_qdisc()
9171 struct ice_pf *pf = vsi->back; in ice_setup_tc_mqprio_qdisc()
9179 num_tcf = mqprio_qopt->qopt.num_tc; in ice_setup_tc_mqprio_qdisc()
9180 hw = mqprio_qopt->qopt.hw; in ice_setup_tc_mqprio_qdisc()
9181 mode = mqprio_qopt->mode; in ice_setup_tc_mqprio_qdisc()
9183 clear_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9184 vsi->ch_rss_size = 0; in ice_setup_tc_mqprio_qdisc()
9185 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9196 if (pf->hw.port_info->is_custom_tx_enabled) { in ice_setup_tc_mqprio_qdisc()
9198 return -EBUSY; in ice_setup_tc_mqprio_qdisc()
9208 memcpy(&vsi->mqprio_qopt, mqprio_qopt, sizeof(*mqprio_qopt)); in ice_setup_tc_mqprio_qdisc()
9209 set_bit(ICE_FLAG_TC_MQPRIO, pf->flags); in ice_setup_tc_mqprio_qdisc()
9214 if (vsi->netdev->features & NETIF_F_HW_TC) in ice_setup_tc_mqprio_qdisc()
9215 set_bit(ICE_FLAG_CLS_FLOWER, pf->flags); in ice_setup_tc_mqprio_qdisc()
9218 return -EINVAL; in ice_setup_tc_mqprio_qdisc()
9224 if (ena_tc_qdisc == vsi->tc_cfg.ena_tc && in ice_setup_tc_mqprio_qdisc()
9231 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) in ice_setup_tc_mqprio_qdisc()
9234 if (!hw && !test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9235 vsi->req_txq = min_t(int, ice_get_avail_txq_count(pf), in ice_setup_tc_mqprio_qdisc()
9237 vsi->req_rxq = min_t(int, ice_get_avail_rxq_count(pf), in ice_setup_tc_mqprio_qdisc()
9240 /* logic to rebuild VSI, same like ethtool -L */ in ice_setup_tc_mqprio_qdisc()
9247 offset = vsi->mqprio_qopt.qopt.offset[i]; in ice_setup_tc_mqprio_qdisc()
9248 qcount_rx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9249 qcount_tx = vsi->mqprio_qopt.qopt.count[i]; in ice_setup_tc_mqprio_qdisc()
9251 vsi->req_txq = offset + qcount_tx; in ice_setup_tc_mqprio_qdisc()
9252 vsi->req_rxq = offset + qcount_rx; in ice_setup_tc_mqprio_qdisc()
9255 * form ice_vsi_rebuild during tc-qdisc delete stage - to in ice_setup_tc_mqprio_qdisc()
9258 vsi->orig_rss_size = vsi->rss_size; in ice_setup_tc_mqprio_qdisc()
9264 cur_txq = vsi->num_txq; in ice_setup_tc_mqprio_qdisc()
9265 cur_rxq = vsi->num_rxq; in ice_setup_tc_mqprio_qdisc()
9272 vsi->req_txq = cur_txq; in ice_setup_tc_mqprio_qdisc()
9273 vsi->req_rxq = cur_rxq; in ice_setup_tc_mqprio_qdisc()
9274 clear_bit(ICE_RESET_FAILED, pf->state); in ice_setup_tc_mqprio_qdisc()
9281 vsi->all_numtc = num_tcf; in ice_setup_tc_mqprio_qdisc()
9282 vsi->all_enatc = ena_tc_qdisc; in ice_setup_tc_mqprio_qdisc()
9286 vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9290 if (test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) { in ice_setup_tc_mqprio_qdisc()
9291 u64 max_tx_rate = vsi->mqprio_qopt.max_rate[0]; in ice_setup_tc_mqprio_qdisc()
9292 u64 min_tx_rate = vsi->mqprio_qopt.min_rate[0]; in ice_setup_tc_mqprio_qdisc()
9305 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9308 max_tx_rate, min_tx_rate, vsi->vsi_num); in ice_setup_tc_mqprio_qdisc()
9321 if (vsi->ch_rss_size) in ice_setup_tc_mqprio_qdisc()
9327 vsi->all_numtc = 0; in ice_setup_tc_mqprio_qdisc()
9328 vsi->all_enatc = 0; in ice_setup_tc_mqprio_qdisc()
9343 struct ice_pf *pf = np->vsi->back; in ice_setup_tc()
9356 return -EOPNOTSUPP; in ice_setup_tc()
9359 if (pf->adev) { in ice_setup_tc()
9360 mutex_lock(&pf->adev_mutex); in ice_setup_tc()
9361 device_lock(&pf->adev->dev); in ice_setup_tc()
9363 if (pf->adev->dev.driver) { in ice_setup_tc()
9365 err = -EBUSY; in ice_setup_tc()
9371 mutex_lock(&pf->tc_mutex); in ice_setup_tc()
9373 mutex_unlock(&pf->tc_mutex); in ice_setup_tc()
9377 device_unlock(&pf->adev->dev); in ice_setup_tc()
9378 mutex_unlock(&pf->adev_mutex); in ice_setup_tc()
9382 return -EOPNOTSUPP; in ice_setup_tc()
9384 return -EOPNOTSUPP; in ice_setup_tc()
9393 list_for_each_entry(cb_priv, &np->tc_indr_block_priv_list, list) { in ice_indr_block_priv_lookup()
9394 if (!cb_priv->netdev) in ice_indr_block_priv_lookup()
9396 if (cb_priv->netdev == netdev) in ice_indr_block_priv_lookup()
9407 struct ice_netdev_priv *np = priv->np; in ice_indr_setup_block_cb()
9411 return ice_setup_tc_cls_flower(np, priv->netdev, in ice_indr_setup_block_cb()
9415 return -EOPNOTSUPP; in ice_indr_setup_block_cb()
9430 vlan_dev_real_dev(netdev) == np->vsi->netdev)) in ice_indr_setup_tc_block()
9431 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9433 if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) in ice_indr_setup_tc_block()
9434 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9436 switch (f->command) { in ice_indr_setup_tc_block()
9440 return -EEXIST; in ice_indr_setup_tc_block()
9444 return -ENOMEM; in ice_indr_setup_tc_block()
9446 indr_priv->netdev = netdev; in ice_indr_setup_tc_block()
9447 indr_priv->np = np; in ice_indr_setup_tc_block()
9448 list_add(&indr_priv->list, &np->tc_indr_block_priv_list); in ice_indr_setup_tc_block()
9458 list_del(&indr_priv->list); in ice_indr_setup_tc_block()
9463 list_add_tail(&block_cb->driver_list, &ice_block_cb_list); in ice_indr_setup_tc_block()
9468 return -ENOENT; in ice_indr_setup_tc_block()
9470 block_cb = flow_block_cb_lookup(f->block, in ice_indr_setup_tc_block()
9474 return -ENOENT; in ice_indr_setup_tc_block()
9478 list_del(&block_cb->driver_list); in ice_indr_setup_tc_block()
9481 return -EOPNOTSUPP; in ice_indr_setup_tc_block()
9498 return -EOPNOTSUPP; in ice_indr_setup_tc_cb()
9503 * ice_open - Called when a network interface becomes active
9517 struct ice_pf *pf = np->vsi->back; in ice_open()
9519 if (ice_is_reset_in_progress(pf->state)) { in ice_open()
9521 return -EBUSY; in ice_open()
9528 * ice_open_internal - Called when a network interface becomes active
9539 struct ice_vsi *vsi = np->vsi; in ice_open_internal()
9540 struct ice_pf *pf = vsi->back; in ice_open_internal()
9544 if (test_bit(ICE_NEEDS_RESTART, pf->state)) { in ice_open_internal()
9546 return -EIO; in ice_open_internal()
9551 pi = vsi->port_info; in ice_open_internal()
9558 ice_check_link_cfg_err(pf, pi->phy.link_info.link_cfg_err); in ice_open_internal()
9561 if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) { in ice_open_internal()
9562 clear_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9563 if (!test_bit(ICE_PHY_INIT_COMPLETE, pf->state)) { in ice_open_internal()
9579 set_bit(ICE_FLAG_NO_MEDIA, pf->flags); in ice_open_internal()
9586 vsi->vsi_num, vsi->vsw->sw_id); in ice_open_internal()
9595 * ice_stop - Disables a network interface
9598 * The stop entry point is called when an interface is de-activated by the OS,
9602 * Returns success only - not allowed to fail
9607 struct ice_vsi *vsi = np->vsi; in ice_stop()
9608 struct ice_pf *pf = vsi->back; in ice_stop()
9610 if (ice_is_reset_in_progress(pf->state)) { in ice_stop()
9612 return -EBUSY; in ice_stop()
9615 if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) { in ice_stop()
9619 if (link_err == -ENOMEDIUM) in ice_stop()
9620 netdev_info(vsi->netdev, "Skipping link reconfig - no media attached, VSI %d\n", in ice_stop()
9621 vsi->vsi_num); in ice_stop()
9623 netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n", in ice_stop()
9624 vsi->vsi_num, link_err); in ice_stop()
9627 return -EIO; in ice_stop()
9637 * ice_features_check - Validate encapsulated packet conforms to limits
9654 if (skb->ip_summed != CHECKSUM_PARTIAL) in ice_features_check()
9657 /* We cannot support GSO if the MSS is going to be less than in ice_features_check()
9660 if (gso && (skb_shinfo(skb)->gso_size < ICE_TXD_CTX_MIN_MSS)) in ice_features_check()
9671 if (skb->encapsulation) { in ice_features_check()
9677 if (gso && (skb_shinfo(skb)->gso_type & in ice_features_check()
9679 len = skb_inner_network_header(skb) - in ice_features_check()