Lines Matching +full:75 +full:- +full:ec
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
14 * struct i40e_stats - definition for an ethtool statistic
15 * @stat_string: statistic name to display in ethtool -S output
63 I40E_QUEUE_STAT("%s-%u.packets", stats.packets),
64 I40E_QUEUE_STAT("%s-%u.bytes", stats.bytes),
68 * i40e_add_one_ethtool_stat - copy the stat into the supplied buffer
91 p = (char *)pointer + stat->stat_offset; in i40e_add_one_ethtool_stat()
92 switch (stat->sizeof_stat) { in i40e_add_one_ethtool_stat()
107 stat->stat_string); in i40e_add_one_ethtool_stat()
113 * __i40e_add_ethtool_stats - copy stats into the ethtool supplied buffer
137 * i40e_add_ethtool_stats - copy stats into ethtool supplied buffer
153 * i40e_add_queue_stats - copy queue statistics into supplied buffer
177 * non-null before attempting to access its syncp. in i40e_add_queue_stats()
180 start = !ring ? 0 : u64_stats_fetch_begin(&ring->syncp); in i40e_add_queue_stats()
185 } while (ring && u64_stats_fetch_retry(&ring->syncp, start)); in i40e_add_queue_stats()
192 * __i40e_add_stat_strings - copy stat strings into ethtool buffer
216 * i40e_add_stat_strings - copy stat strings into ethtool buffer
313 * The PF_STATs are appended to the netdev stats only when ethtool -S
446 I40E_PRIV_FLAG("total-port-shutdown",
449 I40E_PRIV_FLAG("flow-director-atr", I40E_FLAG_FD_ATR_ENA, 0),
450 I40E_PRIV_FLAG("veb-stats", I40E_FLAG_VEB_STATS_ENA, 0),
451 I40E_PRIV_FLAG("hw-atr-eviction", I40E_FLAG_HW_ATR_EVICT_ENA, 0),
452 I40E_PRIV_FLAG("link-down-on-close",
454 I40E_PRIV_FLAG("legacy-rx", I40E_FLAG_LEGACY_RX_ENA, 0),
455 I40E_PRIV_FLAG("disable-source-pruning",
457 I40E_PRIV_FLAG("disable-fw-lldp", I40E_FLAG_FW_LLDP_DIS, 0),
458 I40E_PRIV_FLAG("rs-fec", I40E_FLAG_RS_FEC, 0),
459 I40E_PRIV_FLAG("base-r-fec", I40E_FLAG_BASE_R_FEC, 0),
460 I40E_PRIV_FLAG("vf-vlan-pruning",
462 I40E_PRIV_FLAG("mdd-auto-reset-vf",
470 I40E_PRIV_FLAG("vf-true-promisc-support",
477 * i40e_partition_setting_complaint - generic complaint for MFP restriction
482 dev_info(&pf->pdev->dev, in i40e_partition_setting_complaint()
487 * i40e_phy_type_to_ethtool - convert the phy_types to ethtool link modes
495 struct i40e_link_status *hw_link_info = &pf->hw.phy.link_info; in i40e_phy_type_to_ethtool()
496 u64 phy_types = pf->hw.phy.phy_types; in i40e_phy_type_to_ethtool()
504 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
507 if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
521 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
528 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
535 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) in i40e_phy_type_to_ethtool()
542 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) in i40e_phy_type_to_ethtool()
555 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_40GB) in i40e_phy_type_to_ethtool()
562 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) in i40e_phy_type_to_ethtool()
569 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
594 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_20GB) in i40e_phy_type_to_ethtool()
601 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
606 !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
609 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
614 !test_bit(I40E_HW_CAP_CRT_RETIMER, pf->hw.caps)) { in i40e_phy_type_to_ethtool()
617 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
625 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
632 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
640 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
648 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) in i40e_phy_type_to_ethtool()
661 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_25GB) { in i40e_phy_type_to_ethtool()
675 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
682 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
689 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_phy_type_to_ethtool()
698 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_phy_type_to_ethtool()
735 * i40e_get_settings_link_up_fec - Get the FEC mode encoding from mask
765 * i40e_get_settings_link_up - Get the Link settings for when link is up
776 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_settings_link_up()
778 u32 link_speed = hw_link_info->link_speed; in i40e_get_settings_link_up()
781 switch (hw_link_info->phy_type) { in i40e_get_settings_link_up()
823 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
838 if (hw_link_info->module_type[2] & in i40e_get_settings_link_up()
840 hw_link_info->module_type[2] & in i40e_get_settings_link_up()
844 if (hw_link_info->requested_speeds & in i40e_get_settings_link_up()
849 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
870 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
873 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_5GB) in i40e_get_settings_link_up()
876 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_2_5GB) in i40e_get_settings_link_up()
879 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_get_settings_link_up()
882 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_100MB) in i40e_get_settings_link_up()
910 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_10GB) in i40e_get_settings_link_up()
913 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
919 if (hw_link_info->requested_speeds & I40E_LINK_SPEED_1GB) in i40e_get_settings_link_up()
922 if (test_bit(I40E_HW_CAP_100M_SGMII, pf->hw.caps)) { in i40e_get_settings_link_up()
925 if (hw_link_info->requested_speeds & in i40e_get_settings_link_up()
954 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
972 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
983 i40e_get_settings_link_up_fec(hw_link_info->req_fec_info, ks); in i40e_get_settings_link_up()
994 hw_link_info->phy_type); in i40e_get_settings_link_up()
1008 ks->base.speed = SPEED_40000; in i40e_get_settings_link_up()
1011 ks->base.speed = SPEED_25000; in i40e_get_settings_link_up()
1014 ks->base.speed = SPEED_20000; in i40e_get_settings_link_up()
1017 ks->base.speed = SPEED_10000; in i40e_get_settings_link_up()
1020 ks->base.speed = SPEED_5000; in i40e_get_settings_link_up()
1023 ks->base.speed = SPEED_2500; in i40e_get_settings_link_up()
1026 ks->base.speed = SPEED_1000; in i40e_get_settings_link_up()
1029 ks->base.speed = SPEED_100; in i40e_get_settings_link_up()
1032 ks->base.speed = SPEED_UNKNOWN; in i40e_get_settings_link_up()
1035 ks->base.duplex = DUPLEX_FULL; in i40e_get_settings_link_up()
1039 * i40e_get_settings_link_down - Get the Link settings for when link is down
1056 ks->base.speed = SPEED_UNKNOWN; in i40e_get_settings_link_down()
1057 ks->base.duplex = DUPLEX_UNKNOWN; in i40e_get_settings_link_down()
1061 * i40e_get_link_ksettings - Get Link Speed and Duplex settings
1071 struct i40e_pf *pf = np->vsi->back; in i40e_get_link_ksettings()
1072 struct i40e_hw *hw = &pf->hw; in i40e_get_link_ksettings()
1073 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_link_ksettings()
1074 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; in i40e_get_link_ksettings()
1086 ks->base.autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? in i40e_get_link_ksettings()
1090 switch (hw->phy.media_type) { in i40e_get_link_ksettings()
1097 ks->base.port = PORT_NONE; in i40e_get_link_ksettings()
1102 ks->base.port = PORT_TP; in i40e_get_link_ksettings()
1108 ks->base.port = PORT_DA; in i40e_get_link_ksettings()
1113 ks->base.port = PORT_FIBRE; in i40e_get_link_ksettings()
1117 ks->base.port = PORT_OTHER; in i40e_get_link_ksettings()
1125 switch (hw->fc.requested_mode) { in i40e_get_link_ksettings()
1150 * i40e_speed_to_link_speed - Translate decimal speed to i40e_aq_link_speed
1195 ks->link_modes.supported)) { in i40e_speed_to_link_speed()
1214 * i40e_set_link_ksettings - Set Speed and Duplex
1228 struct i40e_pf *pf = np->vsi->back; in i40e_set_link_ksettings()
1230 struct i40e_vsi *vsi = np->vsi; in i40e_set_link_ksettings()
1231 struct i40e_hw *hw = &pf->hw; in i40e_set_link_ksettings()
1242 if (hw->partition_id != 1) { in i40e_set_link_ksettings()
1244 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1246 if (vsi->type != I40E_VSI_MAIN) in i40e_set_link_ksettings()
1247 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1248 if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && in i40e_set_link_ksettings()
1249 hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && in i40e_set_link_ksettings()
1250 hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE && in i40e_set_link_ksettings()
1251 hw->phy.media_type != I40E_MEDIA_TYPE_DA && in i40e_set_link_ksettings()
1252 hw->phy.link_info.link_info & I40E_AQ_LINK_UP) in i40e_set_link_ksettings()
1253 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1254 if (hw->device_id == I40E_DEV_ID_KX_B || in i40e_set_link_ksettings()
1255 hw->device_id == I40E_DEV_ID_KX_C || in i40e_set_link_ksettings()
1256 hw->device_id == I40E_DEV_ID_20G_KR2 || in i40e_set_link_ksettings()
1257 hw->device_id == I40E_DEV_ID_20G_KR2_A || in i40e_set_link_ksettings()
1258 hw->device_id == I40E_DEV_ID_25G_B || in i40e_set_link_ksettings()
1259 hw->device_id == I40E_DEV_ID_KX_X722) { in i40e_set_link_ksettings()
1261 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1284 return -EINVAL; in i40e_set_link_ksettings()
1296 return -EOPNOTSUPP; in i40e_set_link_ksettings()
1299 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_set_link_ksettings()
1300 timeout--; in i40e_set_link_ksettings()
1302 return -EBUSY; in i40e_set_link_ksettings()
1310 err = -EAGAIN; in i40e_set_link_ksettings()
1323 if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) { in i40e_set_link_ksettings()
1329 err = -EINVAL; in i40e_set_link_ksettings()
1339 if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) { in i40e_set_link_ksettings()
1346 hw->phy.media_type != I40E_MEDIA_TYPE_BASET) { in i40e_set_link_ksettings()
1348 err = -EINVAL; in i40e_set_link_ksettings()
1414 err = -EOPNOTSUPP; in i40e_set_link_ksettings()
1423 err = -EOPNOTSUPP; in i40e_set_link_ksettings()
1445 hw->phy.link_info.requested_speeds = config.link_speed; in i40e_set_link_ksettings()
1449 if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP) { in i40e_set_link_ksettings()
1464 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_link_ksettings()
1465 err = -EAGAIN; in i40e_set_link_ksettings()
1474 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_link_ksettings()
1481 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_set_link_ksettings()
1490 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_cfg()
1491 struct i40e_hw *hw = &pf->hw; in i40e_set_fec_cfg()
1500 err = -EAGAIN; in i40e_set_fec_cfg()
1522 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_fec_cfg()
1523 err = -EAGAIN; in i40e_set_fec_cfg()
1526 i40e_set_fec_in_flags(fec_cfg, pf->flags); in i40e_set_fec_cfg()
1536 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_fec_cfg()
1548 struct i40e_pf *pf = np->vsi->back; in i40e_get_fec_param()
1549 struct i40e_hw *hw = &pf->hw; in i40e_get_fec_param()
1559 err = -EAGAIN; in i40e_get_fec_param()
1563 fecparam->fec = 0; in i40e_get_fec_param()
1566 fecparam->fec |= ETHTOOL_FEC_AUTO; in i40e_get_fec_param()
1569 fecparam->fec |= ETHTOOL_FEC_RS; in i40e_get_fec_param()
1572 fecparam->fec |= ETHTOOL_FEC_BASER; in i40e_get_fec_param()
1574 fecparam->fec |= ETHTOOL_FEC_OFF; in i40e_get_fec_param()
1576 if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_KR_ENA) in i40e_get_fec_param()
1577 fecparam->active_fec = ETHTOOL_FEC_BASER; in i40e_get_fec_param()
1578 else if (hw->phy.link_info.fec_info & I40E_AQ_CONFIG_FEC_RS_ENA) in i40e_get_fec_param()
1579 fecparam->active_fec = ETHTOOL_FEC_RS; in i40e_get_fec_param()
1581 fecparam->active_fec = ETHTOOL_FEC_OFF; in i40e_get_fec_param()
1590 struct i40e_pf *pf = np->vsi->back; in i40e_set_fec_param()
1591 struct i40e_hw *hw = &pf->hw; in i40e_set_fec_param()
1594 if (hw->device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_fec_param()
1595 hw->device_id != I40E_DEV_ID_25G_B && in i40e_set_fec_param()
1596 hw->device_id != I40E_DEV_ID_KX_X722) in i40e_set_fec_param()
1597 return -EPERM; in i40e_set_fec_param()
1599 if (hw->mac.type == I40E_MAC_X722 && in i40e_set_fec_param()
1600 !test_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps)) { in i40e_set_fec_param()
1602 return -EOPNOTSUPP; in i40e_set_fec_param()
1605 switch (fecparam->fec) { in i40e_set_fec_param()
1622 dev_warn(&pf->pdev->dev, "Unsupported FEC mode: %d", in i40e_set_fec_param()
1623 fecparam->fec); in i40e_set_fec_param()
1624 return -EINVAL; in i40e_set_fec_param()
1634 struct i40e_pf *pf = np->vsi->back; in i40e_nway_reset()
1635 struct i40e_hw *hw = &pf->hw; in i40e_nway_reset()
1636 bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP; in i40e_nway_reset()
1643 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_nway_reset()
1644 return -EIO; in i40e_nway_reset()
1651 * i40e_get_pauseparam - Get Flow Control status
1655 * Return tx/rx-pause status
1661 struct i40e_pf *pf = np->vsi->back; in i40e_get_pauseparam()
1662 struct i40e_hw *hw = &pf->hw; in i40e_get_pauseparam()
1663 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_get_pauseparam()
1664 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; in i40e_get_pauseparam()
1666 pause->autoneg = in i40e_get_pauseparam()
1667 ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? in i40e_get_pauseparam()
1671 if (dcbx_cfg->pfc.pfcenable) { in i40e_get_pauseparam()
1672 pause->rx_pause = 0; in i40e_get_pauseparam()
1673 pause->tx_pause = 0; in i40e_get_pauseparam()
1677 if (hw->fc.current_mode == I40E_FC_RX_PAUSE) { in i40e_get_pauseparam()
1678 pause->rx_pause = 1; in i40e_get_pauseparam()
1679 } else if (hw->fc.current_mode == I40E_FC_TX_PAUSE) { in i40e_get_pauseparam()
1680 pause->tx_pause = 1; in i40e_get_pauseparam()
1681 } else if (hw->fc.current_mode == I40E_FC_FULL) { in i40e_get_pauseparam()
1682 pause->rx_pause = 1; in i40e_get_pauseparam()
1683 pause->tx_pause = 1; in i40e_get_pauseparam()
1688 * i40e_set_pauseparam - Set Flow Control parameter
1696 struct i40e_pf *pf = np->vsi->back; in i40e_set_pauseparam()
1697 struct i40e_vsi *vsi = np->vsi; in i40e_set_pauseparam()
1698 struct i40e_hw *hw = &pf->hw; in i40e_set_pauseparam()
1699 struct i40e_link_status *hw_link_info = &hw->phy.link_info; in i40e_set_pauseparam()
1700 struct i40e_dcbx_config *dcbx_cfg = &hw->local_dcbx_config; in i40e_set_pauseparam()
1701 bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP; in i40e_set_pauseparam()
1710 if (hw->partition_id != 1) { in i40e_set_pauseparam()
1712 return -EOPNOTSUPP; in i40e_set_pauseparam()
1715 if (vsi->type != I40E_VSI_MAIN) in i40e_set_pauseparam()
1716 return -EOPNOTSUPP; in i40e_set_pauseparam()
1718 is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; in i40e_set_pauseparam()
1719 if (pause->autoneg != is_an) { in i40e_set_pauseparam()
1720 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); in i40e_set_pauseparam()
1721 return -EOPNOTSUPP; in i40e_set_pauseparam()
1725 if (!test_bit(__I40E_DOWN, pf->state) && !is_an) { in i40e_set_pauseparam()
1730 if (dcbx_cfg->pfc.pfcenable) { in i40e_set_pauseparam()
1733 return -EOPNOTSUPP; in i40e_set_pauseparam()
1736 if (pause->rx_pause && pause->tx_pause) in i40e_set_pauseparam()
1737 hw->fc.requested_mode = I40E_FC_FULL; in i40e_set_pauseparam()
1738 else if (pause->rx_pause && !pause->tx_pause) in i40e_set_pauseparam()
1739 hw->fc.requested_mode = I40E_FC_RX_PAUSE; in i40e_set_pauseparam()
1740 else if (!pause->rx_pause && pause->tx_pause) in i40e_set_pauseparam()
1741 hw->fc.requested_mode = I40E_FC_TX_PAUSE; in i40e_set_pauseparam()
1742 else if (!pause->rx_pause && !pause->tx_pause) in i40e_set_pauseparam()
1743 hw->fc.requested_mode = I40E_FC_NONE; in i40e_set_pauseparam()
1745 return -EINVAL; in i40e_set_pauseparam()
1760 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1761 err = -EAGAIN; in i40e_set_pauseparam()
1766 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1767 err = -EAGAIN; in i40e_set_pauseparam()
1772 i40e_aq_str(hw, hw->aq.asq_last_status)); in i40e_set_pauseparam()
1773 err = -EAGAIN; in i40e_set_pauseparam()
1776 if (!test_bit(__I40E_DOWN, pf->state) && is_an) { in i40e_set_pauseparam()
1778 msleep(75); in i40e_set_pauseparam()
1779 if (!test_bit(__I40E_DOWN, pf->state)) in i40e_set_pauseparam()
1789 struct i40e_pf *pf = np->vsi->back; in i40e_get_msglevel()
1790 u32 debug_mask = pf->hw.debug_mask; in i40e_get_msglevel()
1795 return pf->msg_enable; in i40e_get_msglevel()
1801 struct i40e_pf *pf = np->vsi->back; in i40e_set_msglevel()
1804 pf->hw.debug_mask = data; in i40e_set_msglevel()
1806 pf->msg_enable = data; in i40e_set_msglevel()
1824 struct i40e_pf *pf = np->vsi->back; in i40e_get_regs()
1825 struct i40e_hw *hw = &pf->hw; in i40e_get_regs()
1830 /* Tell ethtool which driver-version-specific regs output we have. in i40e_get_regs()
1837 regs->version = 1; in i40e_get_regs()
1855 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_get_eeprom()
1856 struct i40e_pf *pf = np->vsi->back; in i40e_get_eeprom()
1864 if (eeprom->len == 0) in i40e_get_eeprom()
1865 return -EINVAL; in i40e_get_eeprom()
1868 magic = hw->vendor_id | (hw->device_id << 16); in i40e_get_eeprom()
1869 if (eeprom->magic && eeprom->magic != magic) { in i40e_get_eeprom()
1874 if ((eeprom->magic >> 16) != hw->device_id) in i40e_get_eeprom()
1875 errno = -EINVAL; in i40e_get_eeprom()
1876 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_get_eeprom()
1877 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_get_eeprom()
1878 errno = -EBUSY; in i40e_get_eeprom()
1882 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) in i40e_get_eeprom()
1883 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1885 ret_val, hw->aq.asq_last_status, errno, in i40e_get_eeprom()
1886 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), in i40e_get_eeprom()
1887 cmd->offset, cmd->data_size); in i40e_get_eeprom()
1893 eeprom->magic = hw->vendor_id | (hw->device_id << 16); in i40e_get_eeprom()
1895 eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL); in i40e_get_eeprom()
1897 return -ENOMEM; in i40e_get_eeprom()
1901 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1903 ret_val, hw->aq.asq_last_status); in i40e_get_eeprom()
1907 sectors = eeprom->len / I40E_NVM_SECTOR_SIZE; in i40e_get_eeprom()
1908 sectors += (eeprom->len % I40E_NVM_SECTOR_SIZE) ? 1 : 0; in i40e_get_eeprom()
1912 if (i == (sectors - 1)) { in i40e_get_eeprom()
1913 len = eeprom->len - (I40E_NVM_SECTOR_SIZE * i); in i40e_get_eeprom()
1916 offset = eeprom->offset + (I40E_NVM_SECTOR_SIZE * i); in i40e_get_eeprom()
1920 if (ret_val && hw->aq.asq_last_status == I40E_AQ_RC_EPERM) { in i40e_get_eeprom()
1921 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1926 hw->aq.asq_last_status == I40E_AQ_RC_EACCES) { in i40e_get_eeprom()
1927 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1932 dev_info(&pf->pdev->dev, in i40e_get_eeprom()
1934 offset, ret_val, hw->aq.asq_last_status); in i40e_get_eeprom()
1940 memcpy(bytes, (u8 *)eeprom_buff, eeprom->len); in i40e_get_eeprom()
1949 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_get_eeprom_len()
1953 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_eeprom_len()
1968 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_set_eeprom()
1969 struct i40e_pf *pf = np->vsi->back; in i40e_set_eeprom()
1976 magic = hw->vendor_id | (hw->device_id << 16); in i40e_set_eeprom()
1977 if (eeprom->magic == magic) in i40e_set_eeprom()
1978 errno = -EOPNOTSUPP; in i40e_set_eeprom()
1980 else if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id) in i40e_set_eeprom()
1981 errno = -EINVAL; in i40e_set_eeprom()
1982 else if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_set_eeprom()
1983 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_set_eeprom()
1984 errno = -EBUSY; in i40e_set_eeprom()
1988 if ((errno || ret_val) && (hw->debug_mask & I40E_DEBUG_NVM)) in i40e_set_eeprom()
1989 dev_info(&pf->pdev->dev, in i40e_set_eeprom()
1991 ret_val, hw->aq.asq_last_status, errno, in i40e_set_eeprom()
1992 (u8)(cmd->config & I40E_NVM_MOD_PNT_MASK), in i40e_set_eeprom()
1993 cmd->offset, cmd->data_size); in i40e_set_eeprom()
2002 struct i40e_vsi *vsi = np->vsi; in i40e_get_drvinfo()
2003 struct i40e_pf *pf = vsi->back; in i40e_get_drvinfo()
2005 strscpy(drvinfo->driver, i40e_driver_name, sizeof(drvinfo->driver)); in i40e_get_drvinfo()
2006 i40e_nvm_version_str(&pf->hw, drvinfo->fw_version, in i40e_get_drvinfo()
2007 sizeof(drvinfo->fw_version)); in i40e_get_drvinfo()
2008 strscpy(drvinfo->bus_info, pci_name(pf->pdev), in i40e_get_drvinfo()
2009 sizeof(drvinfo->bus_info)); in i40e_get_drvinfo()
2010 drvinfo->n_priv_flags = I40E_PRIV_FLAGS_STR_LEN; in i40e_get_drvinfo()
2011 if (pf->hw.pf_id == 0) in i40e_get_drvinfo()
2012 drvinfo->n_priv_flags += I40E_GL_PRIV_FLAGS_STR_LEN; in i40e_get_drvinfo()
2017 struct i40e_hw *hw = &pf->hw; in i40e_get_max_num_descriptors()
2019 switch (hw->mac.type) { in i40e_get_max_num_descriptors()
2033 struct i40e_pf *pf = np->vsi->back; in i40e_get_ringparam()
2036 ring->rx_max_pending = i40e_get_max_num_descriptors(pf); in i40e_get_ringparam()
2037 ring->tx_max_pending = i40e_get_max_num_descriptors(pf); in i40e_get_ringparam()
2038 ring->rx_mini_max_pending = 0; in i40e_get_ringparam()
2039 ring->rx_jumbo_max_pending = 0; in i40e_get_ringparam()
2040 ring->rx_pending = vsi->rx_rings[0]->count; in i40e_get_ringparam()
2041 ring->tx_pending = vsi->tx_rings[0]->count; in i40e_get_ringparam()
2042 ring->rx_mini_pending = 0; in i40e_get_ringparam()
2043 ring->rx_jumbo_pending = 0; in i40e_get_ringparam()
2049 return index < vsi->num_queue_pairs || in i40e_active_tx_ring_index()
2050 (index >= vsi->alloc_queue_pairs && in i40e_active_tx_ring_index()
2051 index < vsi->alloc_queue_pairs + vsi->num_queue_pairs); in i40e_active_tx_ring_index()
2054 return index < vsi->num_queue_pairs; in i40e_active_tx_ring_index()
2065 struct i40e_hw *hw = &np->vsi->back->hw; in i40e_set_ringparam()
2066 struct i40e_vsi *vsi = np->vsi; in i40e_set_ringparam()
2067 struct i40e_pf *pf = vsi->back; in i40e_set_ringparam()
2072 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) in i40e_set_ringparam()
2073 return -EINVAL; in i40e_set_ringparam()
2076 if (ring->tx_pending > max_num_descriptors || in i40e_set_ringparam()
2077 ring->tx_pending < I40E_MIN_NUM_DESCRIPTORS || in i40e_set_ringparam()
2078 ring->rx_pending > max_num_descriptors || in i40e_set_ringparam()
2079 ring->rx_pending < I40E_MIN_NUM_DESCRIPTORS) { in i40e_set_ringparam()
2081 "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d]\n", in i40e_set_ringparam()
2082 ring->tx_pending, ring->rx_pending, in i40e_set_ringparam()
2084 return -EINVAL; in i40e_set_ringparam()
2087 new_tx_count = ALIGN(ring->tx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); in i40e_set_ringparam()
2088 new_rx_count = ALIGN(ring->rx_pending, I40E_REQ_DESCRIPTOR_MULTIPLE); in i40e_set_ringparam()
2091 if ((new_tx_count == vsi->tx_rings[0]->count) && in i40e_set_ringparam()
2092 (new_rx_count == vsi->rx_rings[0]->count)) in i40e_set_ringparam()
2096 * disallow changing the number of descriptors -- regardless in i40e_set_ringparam()
2100 return -EBUSY; in i40e_set_ringparam()
2102 while (test_and_set_bit(__I40E_CONFIG_BUSY, pf->state)) { in i40e_set_ringparam()
2103 timeout--; in i40e_set_ringparam()
2105 return -EBUSY; in i40e_set_ringparam()
2109 if (!netif_running(vsi->netdev)) { in i40e_set_ringparam()
2110 /* simple case - set for the next time the netdev is started */ in i40e_set_ringparam()
2111 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2112 vsi->tx_rings[i]->count = new_tx_count; in i40e_set_ringparam()
2113 vsi->rx_rings[i]->count = new_rx_count; in i40e_set_ringparam()
2115 vsi->xdp_rings[i]->count = new_tx_count; in i40e_set_ringparam()
2117 vsi->num_tx_desc = new_tx_count; in i40e_set_ringparam()
2118 vsi->num_rx_desc = new_rx_count; in i40e_set_ringparam()
2123 * because the ISRs in MSI-X mode get passed pointers in i40e_set_ringparam()
2128 tx_alloc_queue_pairs = vsi->alloc_queue_pairs * in i40e_set_ringparam()
2130 if (new_tx_count != vsi->tx_rings[0]->count) { in i40e_set_ringparam()
2133 vsi->tx_rings[0]->count, new_tx_count); in i40e_set_ringparam()
2137 err = -ENOMEM; in i40e_set_ringparam()
2145 tx_rings[i] = *vsi->tx_rings[i]; in i40e_set_ringparam()
2155 i--; in i40e_set_ringparam()
2169 if (new_rx_count != vsi->rx_rings[0]->count) { in i40e_set_ringparam()
2172 vsi->rx_rings[0]->count, new_rx_count); in i40e_set_ringparam()
2173 rx_rings = kcalloc(vsi->alloc_queue_pairs, in i40e_set_ringparam()
2176 err = -ENOMEM; in i40e_set_ringparam()
2180 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2184 rx_rings[i] = *vsi->rx_rings[i]; in i40e_set_ringparam()
2191 /* Clear cloned XDP RX-queue info before setup call */ in i40e_set_ringparam()
2196 rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS; in i40e_set_ringparam()
2210 } while (i--); in i40e_set_ringparam()
2227 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_set_ringparam()
2228 *vsi->tx_rings[i] = tx_rings[i]; in i40e_set_ringparam()
2236 for (i = 0; i < vsi->num_queue_pairs; i++) { in i40e_set_ringparam()
2237 i40e_free_rx_resources(vsi->rx_rings[i]); in i40e_set_ringparam()
2239 rx_rings[i].tail = vsi->rx_rings[i]->tail; in i40e_set_ringparam()
2242 * but the recycling logic will let us re-use in i40e_set_ringparam()
2249 *vsi->rx_rings[i] = rx_rings[i]; in i40e_set_ringparam()
2255 vsi->num_tx_desc = new_tx_count; in i40e_set_ringparam()
2256 vsi->num_rx_desc = new_rx_count; in i40e_set_ringparam()
2264 i40e_free_tx_resources(vsi->tx_rings[i]); in i40e_set_ringparam()
2271 clear_bit(__I40E_CONFIG_BUSY, pf->state); in i40e_set_ringparam()
2277 * i40e_get_stats_count - return the stats count for a device
2284 * obtaining stats is *not* safe against changes based on non-static
2293 struct i40e_vsi *vsi = np->vsi; in i40e_get_stats_count()
2294 struct i40e_pf *pf = vsi->back; in i40e_get_stats_count()
2297 if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1) in i40e_get_stats_count()
2312 * queues in pairs, we'll just use netdev->num_tx_queues * 2. This in i40e_get_stats_count()
2316 stats_len += I40E_QUEUE_STATS_LEN * 2 * netdev->num_tx_queues; in i40e_get_stats_count()
2324 struct i40e_vsi *vsi = np->vsi; in i40e_get_sset_count()
2325 struct i40e_pf *pf = vsi->back; in i40e_get_sset_count()
2334 (pf->hw.pf_id == 0 ? I40E_GL_PRIV_FLAGS_STR_LEN : 0); in i40e_get_sset_count()
2336 return -EOPNOTSUPP; in i40e_get_sset_count()
2341 * i40e_get_veb_tc_stats - copy VEB TC statistics to formatted structure
2342 * @tc: the TC statistics in VEB structure (veb->tc_stats)
2343 * @i: the index of traffic class in (veb->tc_stats) structure to copy
2345 * Copy VEB TC statistics from structure of arrays (veb->tc_stats) to
2354 .tc_rx_packets = tc->tc_rx_packets[i], in i40e_get_veb_tc_stats()
2355 .tc_rx_bytes = tc->tc_rx_bytes[i], in i40e_get_veb_tc_stats()
2356 .tc_tx_packets = tc->tc_tx_packets[i], in i40e_get_veb_tc_stats()
2357 .tc_tx_bytes = tc->tc_tx_bytes[i], in i40e_get_veb_tc_stats()
2364 * i40e_get_pfc_stats - copy HW PFC statistics to formatted structure
2368 * The PFC stats are found as arrays in pf->stats, which is not easy to pass
2376 .stat = pf->stats.stat[priority] in i40e_get_pfc_stats()
2389 * i40e_get_ethtool_stats - copy stat values into supplied buffer
2395 * pre-allocated to the size returned by i40e_get_stats_count.. Note that all
2406 struct i40e_vsi *vsi = np->vsi; in i40e_get_ethtool_stats()
2407 struct i40e_pf *pf = vsi->back; in i40e_get_ethtool_stats()
2421 for (i = 0; i < netdev->num_tx_queues; i++) { in i40e_get_ethtool_stats()
2422 i40e_add_queue_stats(&data, READ_ONCE(vsi->tx_rings[i])); in i40e_get_ethtool_stats()
2423 i40e_add_queue_stats(&data, READ_ONCE(vsi->rx_rings[i])); in i40e_get_ethtool_stats()
2427 if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) in i40e_get_ethtool_stats()
2431 veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); in i40e_get_ethtool_stats()
2446 i40e_get_veb_tc_stats(&veb->tc_stats, i); in i40e_get_ethtool_stats()
2464 WARN_ONCE(data - p != i40e_get_stats_count(netdev), in i40e_get_ethtool_stats()
2469 * i40e_get_stat_strings - copy stat strings into supplied buffer
2474 * pre-allocated with the size reported by i40e_get_stats_count. Note that the
2481 struct i40e_vsi *vsi = np->vsi; in i40e_get_stat_strings()
2482 struct i40e_pf *pf = vsi->back; in i40e_get_stat_strings()
2490 for (i = 0; i < netdev->num_tx_queues; i++) { in i40e_get_stat_strings()
2497 if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) in i40e_get_stat_strings()
2511 WARN_ONCE(data - p != i40e_get_stats_count(netdev) * ETH_GSTRING_LEN, in i40e_get_stat_strings()
2518 struct i40e_vsi *vsi = np->vsi; in i40e_get_priv_flag_strings()
2519 struct i40e_pf *pf = vsi->back; in i40e_get_priv_flag_strings()
2525 if (pf->hw.pf_id != 0) in i40e_get_priv_flag_strings()
2556 if (!test_bit(I40E_FLAG_PTP_ENA, pf->flags)) in i40e_get_ts_info()
2559 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | in i40e_get_ts_info()
2564 if (pf->ptp_clock) in i40e_get_ts_info()
2565 info->phc_index = ptp_clock_index(pf->ptp_clock); in i40e_get_ts_info()
2567 info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); in i40e_get_ts_info()
2569 info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | in i40e_get_ts_info()
2574 if (test_bit(I40E_HW_CAP_PTP_L4, pf->hw.caps)) in i40e_get_ts_info()
2575 info->rx_filters |= BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | in i40e_get_ts_info()
2590 struct i40e_pf *pf = np->vsi->back; in i40e_link_test()
2595 status = i40e_get_link_status(&pf->hw, &link_up); in i40e_link_test()
2613 struct i40e_pf *pf = np->vsi->back; in i40e_reg_test()
2616 *data = i40e_diag_reg_test(&pf->hw); in i40e_reg_test()
2624 struct i40e_pf *pf = np->vsi->back; in i40e_eeprom_test()
2627 *data = i40e_diag_eeprom_test(&pf->hw); in i40e_eeprom_test()
2630 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; in i40e_eeprom_test()
2638 struct i40e_pf *pf = np->vsi->back; in i40e_intr_test()
2639 u16 swc_old = pf->sw_int_count; in i40e_intr_test()
2642 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, in i40e_intr_test()
2649 *data = (swc_old == pf->sw_int_count); in i40e_intr_test()
2656 struct i40e_vf *vfs = pf->vf; in i40e_active_vfs()
2659 for (i = 0; i < pf->num_alloc_vfs; i++) in i40e_active_vfs()
2675 struct i40e_pf *pf = np->vsi->back; in i40e_diag_test()
2677 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { in i40e_diag_test()
2681 set_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2683 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_diag_test()
2684 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { in i40e_diag_test()
2685 dev_warn(&pf->pdev->dev, in i40e_diag_test()
2691 dev_warn(&pf->pdev->dev, in i40e_diag_test()
2701 /* This reset does not affect link - if it is in i40e_diag_test()
2709 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2712 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2715 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2719 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2721 clear_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2731 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2747 eth_test->flags |= ETH_TEST_FL_FAILED; in i40e_diag_test()
2748 clear_bit(__I40E_TESTING, pf->state); in i40e_diag_test()
2756 struct i40e_pf *pf = np->vsi->back; in i40e_get_wol()
2757 struct i40e_hw *hw = &pf->hw; in i40e_get_wol()
2762 if ((BIT(hw->port) & wol_nvm_bits) || (hw->partition_id != 1)) { in i40e_get_wol()
2763 wol->supported = 0; in i40e_get_wol()
2764 wol->wolopts = 0; in i40e_get_wol()
2766 wol->supported = WAKE_MAGIC; in i40e_get_wol()
2767 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); in i40e_get_wol()
2772 * i40e_set_wol - set the WakeOnLAN configuration
2779 struct i40e_pf *pf = np->vsi->back; in i40e_set_wol()
2780 struct i40e_vsi *vsi = np->vsi; in i40e_set_wol()
2781 struct i40e_hw *hw = &pf->hw; in i40e_set_wol()
2785 if (hw->partition_id != 1) { in i40e_set_wol()
2787 return -EOPNOTSUPP; in i40e_set_wol()
2790 if (vsi->type != I40E_VSI_MAIN) in i40e_set_wol()
2791 return -EOPNOTSUPP; in i40e_set_wol()
2795 if (BIT(hw->port) & wol_nvm_bits) in i40e_set_wol()
2796 return -EOPNOTSUPP; in i40e_set_wol()
2799 if (wol->wolopts & ~WAKE_MAGIC) in i40e_set_wol()
2800 return -EOPNOTSUPP; in i40e_set_wol()
2803 if (pf->wol_en != !!wol->wolopts) { in i40e_set_wol()
2804 pf->wol_en = !!wol->wolopts; in i40e_set_wol()
2805 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); in i40e_set_wol()
2815 struct i40e_pf *pf = np->vsi->back; in i40e_set_phys_id()
2816 struct i40e_hw *hw = &pf->hw; in i40e_set_phys_id()
2823 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { in i40e_set_phys_id()
2824 pf->led_status = i40e_led_get(hw); in i40e_set_phys_id()
2826 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) in i40e_set_phys_id()
2830 &pf->phy_led_val); in i40e_set_phys_id()
2831 pf->led_status = temp_status; in i40e_set_phys_id()
2835 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) in i40e_set_phys_id()
2838 ret = i40e_led_set_phy(hw, true, pf->led_status, 0); in i40e_set_phys_id()
2841 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) in i40e_set_phys_id()
2844 ret = i40e_led_set_phy(hw, false, pf->led_status, 0); in i40e_set_phys_id()
2847 if (!test_bit(I40E_HW_CAP_PHY_CONTROLS_LEDS, pf->hw.caps)) { in i40e_set_phys_id()
2848 i40e_led_set(hw, pf->led_status, false); in i40e_set_phys_id()
2850 ret = i40e_led_set_phy(hw, false, pf->led_status, in i40e_set_phys_id()
2851 (pf->phy_led_val | in i40e_set_phys_id()
2853 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) in i40e_set_phys_id()
2861 return -ENOENT; in i40e_set_phys_id()
2872 * __i40e_get_coalesce - get per-queue coalesce settings
2874 * @ec: ethtool coalesce data structure
2877 * Gets the per-queue settings for coalescence. Specifically Rx and Tx usecs
2882 struct ethtool_coalesce *ec, in __i40e_get_coalesce() argument
2887 struct i40e_vsi *vsi = np->vsi; in __i40e_get_coalesce()
2889 ec->tx_max_coalesced_frames_irq = vsi->work_limit; in __i40e_get_coalesce()
2896 else if (queue >= vsi->num_queue_pairs) in __i40e_get_coalesce()
2897 return -EINVAL; in __i40e_get_coalesce()
2899 rx_ring = vsi->rx_rings[queue]; in __i40e_get_coalesce()
2900 tx_ring = vsi->tx_rings[queue]; in __i40e_get_coalesce()
2902 if (ITR_IS_DYNAMIC(rx_ring->itr_setting)) in __i40e_get_coalesce()
2903 ec->use_adaptive_rx_coalesce = 1; in __i40e_get_coalesce()
2905 if (ITR_IS_DYNAMIC(tx_ring->itr_setting)) in __i40e_get_coalesce()
2906 ec->use_adaptive_tx_coalesce = 1; in __i40e_get_coalesce()
2908 ec->rx_coalesce_usecs = rx_ring->itr_setting & ~I40E_ITR_DYNAMIC; in __i40e_get_coalesce()
2909 ec->tx_coalesce_usecs = tx_ring->itr_setting & ~I40E_ITR_DYNAMIC; in __i40e_get_coalesce()
2917 ec->rx_coalesce_usecs_high = vsi->int_rate_limit; in __i40e_get_coalesce()
2918 ec->tx_coalesce_usecs_high = vsi->int_rate_limit; in __i40e_get_coalesce()
2924 * i40e_get_coalesce - get a netdev's coalesce settings
2926 * @ec: ethtool coalesce data structure
2931 * modified per-queue settings, this only guarantees to represent queue 0. See
2935 struct ethtool_coalesce *ec, in i40e_get_coalesce() argument
2939 return __i40e_get_coalesce(netdev, ec, -1); in i40e_get_coalesce()
2943 * i40e_get_per_queue_coalesce - gets coalesce settings for particular queue
2945 * @ec: ethtool's coalesce settings
2951 struct ethtool_coalesce *ec) in i40e_get_per_queue_coalesce() argument
2953 return __i40e_get_coalesce(netdev, ec, queue); in i40e_get_per_queue_coalesce()
2957 * i40e_set_itr_per_queue - set ITR values for specific queue
2959 * @ec: coalesce settings from ethtool
2965 struct ethtool_coalesce *ec, in i40e_set_itr_per_queue() argument
2968 struct i40e_ring *rx_ring = vsi->rx_rings[queue]; in i40e_set_itr_per_queue()
2969 struct i40e_ring *tx_ring = vsi->tx_rings[queue]; in i40e_set_itr_per_queue()
2970 struct i40e_pf *pf = vsi->back; in i40e_set_itr_per_queue()
2971 struct i40e_hw *hw = &pf->hw; in i40e_set_itr_per_queue()
2975 intrl = i40e_intrl_usec_to_reg(vsi->int_rate_limit); in i40e_set_itr_per_queue()
2977 rx_ring->itr_setting = ITR_REG_ALIGN(ec->rx_coalesce_usecs); in i40e_set_itr_per_queue()
2978 tx_ring->itr_setting = ITR_REG_ALIGN(ec->tx_coalesce_usecs); in i40e_set_itr_per_queue()
2980 if (ec->use_adaptive_rx_coalesce) in i40e_set_itr_per_queue()
2981 rx_ring->itr_setting |= I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2983 rx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2985 if (ec->use_adaptive_tx_coalesce) in i40e_set_itr_per_queue()
2986 tx_ring->itr_setting |= I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2988 tx_ring->itr_setting &= ~I40E_ITR_DYNAMIC; in i40e_set_itr_per_queue()
2990 q_vector = rx_ring->q_vector; in i40e_set_itr_per_queue()
2991 q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting); in i40e_set_itr_per_queue()
2993 q_vector = tx_ring->q_vector; in i40e_set_itr_per_queue()
2994 q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting); in i40e_set_itr_per_queue()
3001 wr32(hw, I40E_PFINT_RATEN(q_vector->reg_idx), intrl); in i40e_set_itr_per_queue()
3006 * __i40e_set_coalesce - set coalesce settings for particular queue
3008 * @ec: ethtool coalesce settings
3014 struct ethtool_coalesce *ec, in __i40e_set_coalesce() argument
3019 struct i40e_vsi *vsi = np->vsi; in __i40e_set_coalesce()
3020 struct i40e_pf *pf = vsi->back; in __i40e_set_coalesce()
3023 if (ec->tx_max_coalesced_frames_irq) in __i40e_set_coalesce()
3024 vsi->work_limit = ec->tx_max_coalesced_frames_irq; in __i40e_set_coalesce()
3027 cur_rx_itr = vsi->rx_rings[0]->itr_setting; in __i40e_set_coalesce()
3028 cur_tx_itr = vsi->tx_rings[0]->itr_setting; in __i40e_set_coalesce()
3029 } else if (queue < vsi->num_queue_pairs) { in __i40e_set_coalesce()
3030 cur_rx_itr = vsi->rx_rings[queue]->itr_setting; in __i40e_set_coalesce()
3031 cur_tx_itr = vsi->tx_rings[queue]->itr_setting; in __i40e_set_coalesce()
3033 netif_info(pf, drv, netdev, "Invalid queue value, queue range is 0 - %d\n", in __i40e_set_coalesce()
3034 vsi->num_queue_pairs - 1); in __i40e_set_coalesce()
3035 return -EINVAL; in __i40e_set_coalesce()
3041 /* tx_coalesce_usecs_high is ignored, use rx-usecs-high instead */ in __i40e_set_coalesce()
3042 if (ec->tx_coalesce_usecs_high != vsi->int_rate_limit) { in __i40e_set_coalesce()
3043 netif_info(pf, drv, netdev, "tx-usecs-high is not used, please program rx-usecs-high\n"); in __i40e_set_coalesce()
3044 return -EINVAL; in __i40e_set_coalesce()
3047 if (ec->rx_coalesce_usecs_high > INTRL_REG_TO_USEC(I40E_MAX_INTRL)) { in __i40e_set_coalesce()
3048 netif_info(pf, drv, netdev, "Invalid value, rx-usecs-high range is 0-%lu\n", in __i40e_set_coalesce()
3050 return -EINVAL; in __i40e_set_coalesce()
3053 if (ec->rx_coalesce_usecs != cur_rx_itr && in __i40e_set_coalesce()
3054 ec->use_adaptive_rx_coalesce) { in __i40e_set_coalesce()
3055 …netif_info(pf, drv, netdev, "RX interrupt moderation cannot be changed if adaptive-rx is enabled.\… in __i40e_set_coalesce()
3056 return -EINVAL; in __i40e_set_coalesce()
3059 if (ec->rx_coalesce_usecs > I40E_MAX_ITR) { in __i40e_set_coalesce()
3060 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); in __i40e_set_coalesce()
3061 return -EINVAL; in __i40e_set_coalesce()
3064 if (ec->tx_coalesce_usecs != cur_tx_itr && in __i40e_set_coalesce()
3065 ec->use_adaptive_tx_coalesce) { in __i40e_set_coalesce()
3066 …netif_info(pf, drv, netdev, "TX interrupt moderation cannot be changed if adaptive-tx is enabled.\… in __i40e_set_coalesce()
3067 return -EINVAL; in __i40e_set_coalesce()
3070 if (ec->tx_coalesce_usecs > I40E_MAX_ITR) { in __i40e_set_coalesce()
3071 netif_info(pf, drv, netdev, "Invalid value, tx-usecs range is 0-8160\n"); in __i40e_set_coalesce()
3072 return -EINVAL; in __i40e_set_coalesce()
3075 if (ec->use_adaptive_rx_coalesce && !cur_rx_itr) in __i40e_set_coalesce()
3076 ec->rx_coalesce_usecs = I40E_MIN_ITR; in __i40e_set_coalesce()
3078 if (ec->use_adaptive_tx_coalesce && !cur_tx_itr) in __i40e_set_coalesce()
3079 ec->tx_coalesce_usecs = I40E_MIN_ITR; in __i40e_set_coalesce()
3081 intrl_reg = i40e_intrl_usec_to_reg(ec->rx_coalesce_usecs_high); in __i40e_set_coalesce()
3082 vsi->int_rate_limit = INTRL_REG_TO_USEC(intrl_reg); in __i40e_set_coalesce()
3083 if (vsi->int_rate_limit != ec->rx_coalesce_usecs_high) { in __i40e_set_coalesce()
3085 vsi->int_rate_limit); in __i40e_set_coalesce()
3092 for (i = 0; i < vsi->num_queue_pairs; i++) in __i40e_set_coalesce()
3093 i40e_set_itr_per_queue(vsi, ec, i); in __i40e_set_coalesce()
3095 i40e_set_itr_per_queue(vsi, ec, queue); in __i40e_set_coalesce()
3102 * i40e_set_coalesce - set coalesce settings for every queue on the netdev
3104 * @ec: ethtool coalesce settings
3111 struct ethtool_coalesce *ec, in i40e_set_coalesce() argument
3115 return __i40e_set_coalesce(netdev, ec, -1); in i40e_set_coalesce()
3119 * i40e_set_per_queue_coalesce - set specific queue's coalesce settings
3121 * @ec: ethtool's coalesce settings
3127 struct ethtool_coalesce *ec) in i40e_set_per_queue_coalesce() argument
3129 return __i40e_set_coalesce(netdev, ec, queue); in i40e_set_per_queue_coalesce()
3133 * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
3141 struct i40e_hw *hw = &pf->hw; in i40e_get_rss_hash_opts()
3145 cmd->data = 0; in i40e_get_rss_hash_opts()
3147 switch (cmd->flow_type) { in i40e_get_rss_hash_opts()
3171 cmd->data |= RXH_IP_SRC | RXH_IP_DST; in i40e_get_rss_hash_opts()
3174 return -EINVAL; in i40e_get_rss_hash_opts()
3188 cmd->data |= RXH_L4_B_0_1; in i40e_get_rss_hash_opts()
3190 cmd->data |= RXH_L4_B_2_3; in i40e_get_rss_hash_opts()
3192 if (cmd->flow_type == TCP_V4_FLOW || in i40e_get_rss_hash_opts()
3193 cmd->flow_type == UDP_V4_FLOW) { in i40e_get_rss_hash_opts()
3194 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_rss_hash_opts()
3196 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3198 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3201 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3203 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3205 } else if (cmd->flow_type == TCP_V6_FLOW || in i40e_get_rss_hash_opts()
3206 cmd->flow_type == UDP_V6_FLOW) { in i40e_get_rss_hash_opts()
3208 cmd->data |= RXH_IP_SRC; in i40e_get_rss_hash_opts()
3210 cmd->data |= RXH_IP_DST; in i40e_get_rss_hash_opts()
3218 * i40e_check_mask - Check whether a mask field is set
3234 return -1; in i40e_check_mask()
3238 * i40e_parse_rx_flow_user_data - Deconstruct user-defined data
3242 * Read the user-defined data and deconstruct the value into a structure. No
3243 * other code should read the user-defined data, so as to ensure that every
3246 * The user-defined field is a 64bit Big Endian format value, which we
3251 * Returns 0 if the data is valid, and non-zero if the userdef data is invalid
3265 if (!(fsp->flow_type & FLOW_EXT)) in i40e_parse_rx_flow_user_data()
3268 value = be64_to_cpu(*((__be64 *)fsp->h_ext.data)); in i40e_parse_rx_flow_user_data()
3269 mask = be64_to_cpu(*((__be64 *)fsp->m_ext.data)); in i40e_parse_rx_flow_user_data()
3277 return -EINVAL; in i40e_parse_rx_flow_user_data()
3279 data->flex_word = value & I40E_USERDEF_FLEX_WORD; in i40e_parse_rx_flow_user_data()
3280 data->flex_offset = in i40e_parse_rx_flow_user_data()
3282 data->flex_filter = true; in i40e_parse_rx_flow_user_data()
3289 * i40e_fill_rx_flow_user_data - Fill in user-defined data field
3301 if (data->flex_filter) { in i40e_fill_rx_flow_user_data()
3302 value |= data->flex_word; in i40e_fill_rx_flow_user_data()
3303 value |= (u64)data->flex_offset << 16; in i40e_fill_rx_flow_user_data()
3308 fsp->flow_type |= FLOW_EXT; in i40e_fill_rx_flow_user_data()
3310 *((__be64 *)fsp->h_ext.data) = cpu_to_be64(value); in i40e_fill_rx_flow_user_data()
3311 *((__be64 *)fsp->m_ext.data) = cpu_to_be64(mask); in i40e_fill_rx_flow_user_data()
3315 * i40e_get_ethtool_fdir_all - Populates the rule count of a command
3323 * Returns 0 on success or -EMSGSIZE if entry not found
3334 cmd->data = i40e_get_fd_cnt_all(pf); in i40e_get_ethtool_fdir_all()
3337 &pf->fdir_filter_list, fdir_node) { in i40e_get_ethtool_fdir_all()
3338 if (cnt == cmd->rule_cnt) in i40e_get_ethtool_fdir_all()
3339 return -EMSGSIZE; in i40e_get_ethtool_fdir_all()
3341 rule_locs[cnt] = rule->fd_id; in i40e_get_ethtool_fdir_all()
3345 cmd->rule_cnt = cnt; in i40e_get_ethtool_fdir_all()
3351 * i40e_get_ethtool_fdir_entry - Look up a filter based on Rx flow
3358 * Returns 0 on success or -EINVAL if filter not found
3364 (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_get_ethtool_fdir_entry()
3373 &pf->fdir_filter_list, fdir_node) { in i40e_get_ethtool_fdir_entry()
3374 if (fsp->location <= rule->fd_id) in i40e_get_ethtool_fdir_entry()
3378 if (!rule || fsp->location != rule->fd_id) in i40e_get_ethtool_fdir_entry()
3379 return -EINVAL; in i40e_get_ethtool_fdir_entry()
3381 fsp->flow_type = rule->flow_type; in i40e_get_ethtool_fdir_entry()
3382 if (fsp->flow_type == IP_USER_FLOW) { in i40e_get_ethtool_fdir_entry()
3383 fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; in i40e_get_ethtool_fdir_entry()
3384 fsp->h_u.usr_ip4_spec.proto = 0; in i40e_get_ethtool_fdir_entry()
3385 fsp->m_u.usr_ip4_spec.proto = 0; in i40e_get_ethtool_fdir_entry()
3388 if (fsp->flow_type == IPV6_USER_FLOW || in i40e_get_ethtool_fdir_entry()
3389 fsp->flow_type == UDP_V6_FLOW || in i40e_get_ethtool_fdir_entry()
3390 fsp->flow_type == TCP_V6_FLOW || in i40e_get_ethtool_fdir_entry()
3391 fsp->flow_type == SCTP_V6_FLOW) { in i40e_get_ethtool_fdir_entry()
3396 fsp->h_u.tcp_ip6_spec.psrc = rule->dst_port; in i40e_get_ethtool_fdir_entry()
3397 fsp->h_u.tcp_ip6_spec.pdst = rule->src_port; in i40e_get_ethtool_fdir_entry()
3398 memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, rule->src_ip6, in i40e_get_ethtool_fdir_entry()
3400 memcpy(fsp->h_u.tcp_ip6_spec.ip6src, rule->dst_ip6, in i40e_get_ethtool_fdir_entry()
3407 fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port; in i40e_get_ethtool_fdir_entry()
3408 fsp->h_u.tcp_ip4_spec.pdst = rule->src_port; in i40e_get_ethtool_fdir_entry()
3409 fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip; in i40e_get_ethtool_fdir_entry()
3410 fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip; in i40e_get_ethtool_fdir_entry()
3413 switch (rule->flow_type) { in i40e_get_ethtool_fdir_entry()
3445 rule->flow_type); in i40e_get_ethtool_fdir_entry()
3454 fsp->m_u.tcp_ip6_spec.ip6src[0] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3455 fsp->m_u.tcp_ip6_spec.ip6src[1] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3456 fsp->m_u.tcp_ip6_spec.ip6src[2] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3457 fsp->m_u.tcp_ip6_spec.ip6src[3] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3461 fsp->m_u.tcp_ip6_spec.ip6dst[0] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3462 fsp->m_u.tcp_ip6_spec.ip6dst[1] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3463 fsp->m_u.tcp_ip6_spec.ip6dst[2] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3464 fsp->m_u.tcp_ip6_spec.ip6dst[3] = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3468 fsp->m_u.tcp_ip4_spec.ip4src = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3471 fsp->m_u.tcp_ip4_spec.ip4dst = htonl(0xFFFFFFFF); in i40e_get_ethtool_fdir_entry()
3474 fsp->m_u.tcp_ip4_spec.psrc = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3477 fsp->m_u.tcp_ip4_spec.pdst = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3479 if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET) in i40e_get_ethtool_fdir_entry()
3480 fsp->ring_cookie = RX_CLS_FLOW_DISC; in i40e_get_ethtool_fdir_entry()
3482 fsp->ring_cookie = rule->q_index; in i40e_get_ethtool_fdir_entry()
3484 if (rule->vlan_tag) { in i40e_get_ethtool_fdir_entry()
3485 fsp->h_ext.vlan_etype = rule->vlan_etype; in i40e_get_ethtool_fdir_entry()
3486 fsp->m_ext.vlan_etype = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3487 fsp->h_ext.vlan_tci = rule->vlan_tag; in i40e_get_ethtool_fdir_entry()
3488 fsp->m_ext.vlan_tci = htons(0xFFFF); in i40e_get_ethtool_fdir_entry()
3489 fsp->flow_type |= FLOW_EXT; in i40e_get_ethtool_fdir_entry()
3493 if (rule->dest_vsi != vsi->id) { in i40e_get_ethtool_fdir_entry()
3494 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); in i40e_get_ethtool_fdir_entry()
3495 if (vsi && vsi->type == I40E_VSI_SRIOV) { in i40e_get_ethtool_fdir_entry()
3496 /* VFs are zero-indexed by the driver, but ethtool in i40e_get_ethtool_fdir_entry()
3497 * expects them to be one-indexed, so add one here in i40e_get_ethtool_fdir_entry()
3499 u64 ring_vf = vsi->vf_id + 1; in i40e_get_ethtool_fdir_entry()
3502 fsp->ring_cookie |= ring_vf; in i40e_get_ethtool_fdir_entry()
3506 if (rule->flex_filter) { in i40e_get_ethtool_fdir_entry()
3508 userdef.flex_word = be16_to_cpu(rule->flex_word); in i40e_get_ethtool_fdir_entry()
3509 userdef.flex_offset = rule->flex_offset; in i40e_get_ethtool_fdir_entry()
3518 * i40e_get_rxnfc - command to get RX flow classification rules
3529 struct i40e_vsi *vsi = np->vsi; in i40e_get_rxnfc()
3530 struct i40e_pf *pf = vsi->back; in i40e_get_rxnfc()
3531 int ret = -EOPNOTSUPP; in i40e_get_rxnfc()
3533 switch (cmd->cmd) { in i40e_get_rxnfc()
3535 cmd->data = vsi->rss_size; in i40e_get_rxnfc()
3542 cmd->rule_cnt = pf->fdir_pf_active_filters; in i40e_get_rxnfc()
3544 cmd->data = i40e_get_fd_cnt_all(pf); in i40e_get_rxnfc()
3561 * i40e_get_rss_hash_bits - Read RSS Hash bits from register
3575 if (nfc->data & RXH_L4_B_0_1) in i40e_get_rss_hash_bits()
3579 if (nfc->data & RXH_L4_B_2_3) in i40e_get_rss_hash_bits()
3584 if (nfc->flow_type == TCP_V6_FLOW || nfc->flow_type == UDP_V6_FLOW) { in i40e_get_rss_hash_bits()
3587 } else if (nfc->flow_type == TCP_V4_FLOW || in i40e_get_rss_hash_bits()
3588 nfc->flow_type == UDP_V4_FLOW) { in i40e_get_rss_hash_bits()
3589 if (hw->mac.type == I40E_MAC_X722) { in i40e_get_rss_hash_bits()
3601 if (nfc->data & RXH_IP_SRC) in i40e_get_rss_hash_bits()
3605 if (nfc->data & RXH_IP_DST) in i40e_get_rss_hash_bits()
3615 * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash
3623 struct i40e_hw *hw = &pf->hw; in i40e_set_rss_hash_opt()
3631 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_set_rss_hash_opt()
3632 dev_err(&pf->pdev->dev, in i40e_set_rss_hash_opt()
3634 return -EOPNOTSUPP; in i40e_set_rss_hash_opt()
3640 if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | in i40e_set_rss_hash_opt()
3642 return -EINVAL; in i40e_set_rss_hash_opt()
3644 switch (nfc->flow_type) { in i40e_set_rss_hash_opt()
3648 pf->hw.caps)) in i40e_set_rss_hash_opt()
3655 pf->hw.caps)) in i40e_set_rss_hash_opt()
3662 pf->hw.caps)) { in i40e_set_rss_hash_opt()
3673 pf->hw.caps)) { in i40e_set_rss_hash_opt()
3685 if ((nfc->data & RXH_L4_B_0_1) || in i40e_set_rss_hash_opt()
3686 (nfc->data & RXH_L4_B_2_3)) in i40e_set_rss_hash_opt()
3687 return -EINVAL; in i40e_set_rss_hash_opt()
3694 if ((nfc->data & RXH_L4_B_0_1) || in i40e_set_rss_hash_opt()
3695 (nfc->data & RXH_L4_B_2_3)) in i40e_set_rss_hash_opt()
3696 return -EINVAL; in i40e_set_rss_hash_opt()
3708 return -EINVAL; in i40e_set_rss_hash_opt()
3717 i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); in i40e_set_rss_hash_opt()
3735 * i40e_update_ethtool_fdir_entry - Updates the fdir filter entry
3752 struct i40e_pf *pf = vsi->back; in i40e_update_ethtool_fdir_entry()
3754 int err = -EINVAL; in i40e_update_ethtool_fdir_entry()
3760 &pf->fdir_filter_list, fdir_node) { in i40e_update_ethtool_fdir_entry()
3762 if (rule->fd_id >= sw_idx) in i40e_update_ethtool_fdir_entry()
3768 if (rule && (rule->fd_id == sw_idx)) { in i40e_update_ethtool_fdir_entry()
3773 hlist_del(&rule->fdir_node); in i40e_update_ethtool_fdir_entry()
3775 pf->fdir_pf_active_filters--; in i40e_update_ethtool_fdir_entry()
3785 INIT_HLIST_NODE(&input->fdir_node); in i40e_update_ethtool_fdir_entry()
3789 hlist_add_behind(&input->fdir_node, &parent->fdir_node); in i40e_update_ethtool_fdir_entry()
3791 hlist_add_head(&input->fdir_node, in i40e_update_ethtool_fdir_entry()
3792 &pf->fdir_filter_list); in i40e_update_ethtool_fdir_entry()
3795 pf->fdir_pf_active_filters++; in i40e_update_ethtool_fdir_entry()
3801 * i40e_prune_flex_pit_list - Cleanup unused entries in FLX_PIT table
3814 list_for_each_entry_safe(entry, tmp, &pf->l3_flex_pit_list, list) { in i40e_prune_flex_pit_list()
3817 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { in i40e_prune_flex_pit_list()
3818 if (rule->flow_type != IP_USER_FLOW) in i40e_prune_flex_pit_list()
3820 if (rule->flex_filter && in i40e_prune_flex_pit_list()
3821 rule->flex_offset == entry->src_offset) { in i40e_prune_flex_pit_list()
3831 list_del(&entry->list); in i40e_prune_flex_pit_list()
3837 list_for_each_entry_safe(entry, tmp, &pf->l4_flex_pit_list, list) { in i40e_prune_flex_pit_list()
3840 hlist_for_each_entry(rule, &pf->fdir_filter_list, fdir_node) { in i40e_prune_flex_pit_list()
3844 if (rule->flow_type == IP_USER_FLOW) in i40e_prune_flex_pit_list()
3846 if (rule->flex_filter && in i40e_prune_flex_pit_list()
3847 rule->flex_offset == entry->src_offset) { in i40e_prune_flex_pit_list()
3857 list_del(&entry->list); in i40e_prune_flex_pit_list()
3864 * i40e_del_fdir_entry - Deletes a Flow Director filter entry
3877 (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_del_fdir_entry()
3878 struct i40e_pf *pf = vsi->back; in i40e_del_fdir_entry()
3881 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_del_fdir_entry()
3882 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_del_fdir_entry()
3883 return -EBUSY; in i40e_del_fdir_entry()
3885 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_del_fdir_entry()
3886 return -EBUSY; in i40e_del_fdir_entry()
3888 ret = i40e_update_ethtool_fdir_entry(vsi, NULL, fsp->location, cmd); in i40e_del_fdir_entry()
3897 * i40e_unused_pit_index - Find an unused PIT index for given list
3915 list_for_each_entry(entry, &pf->l4_flex_pit_list, list) in i40e_unused_pit_index()
3916 clear_bit(entry->pit_index, &available_index); in i40e_unused_pit_index()
3918 list_for_each_entry(entry, &pf->l3_flex_pit_list, list) in i40e_unused_pit_index()
3919 clear_bit(entry->pit_index, &available_index); in i40e_unused_pit_index()
3925 * i40e_find_flex_offset - Find an existing flex src_offset
3941 * already programmed, we can simply re-use it. in i40e_find_flex_offset()
3945 if (entry->src_offset == src_offset) in i40e_find_flex_offset()
3955 return ERR_PTR(-ENOSPC); in i40e_find_flex_offset()
3961 * i40e_add_flex_offset - Add src_offset to flex PIT table list
3981 return -ENOMEM; in i40e_add_flex_offset()
3983 new_pit->src_offset = src_offset; in i40e_add_flex_offset()
3984 new_pit->pit_index = pit_index; in i40e_add_flex_offset()
3990 if (new_pit->src_offset < entry->src_offset) { in i40e_add_flex_offset()
3991 list_add_tail(&new_pit->list, &entry->list); in i40e_add_flex_offset()
3999 if (new_pit->src_offset == entry->src_offset) { in i40e_add_flex_offset()
4002 /* If the PIT index is not the same we can't re-use in i40e_add_flex_offset()
4005 if (new_pit->pit_index != entry->pit_index) in i40e_add_flex_offset()
4006 err = -EINVAL; in i40e_add_flex_offset()
4016 list_add_tail(&new_pit->list, flex_pit_list); in i40e_add_flex_offset()
4021 * __i40e_reprogram_flex_pit - Re-program specific FLX_PIT table
4034 * This function will reprogram the FLX_PIT register from a book-keeping
4068 u16 offset = entry->src_offset + j; in __i40e_reprogram_flex_pit()
4072 offset - 3); in __i40e_reprogram_flex_pit()
4075 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4083 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4085 I40E_FLEX_PREP_VAL(entry->pit_index + 50, in __i40e_reprogram_flex_pit()
4087 entry->src_offset)); in __i40e_reprogram_flex_pit()
4098 last_offset = list_prev_entry(entry, list)->src_offset + 1; in __i40e_reprogram_flex_pit()
4101 i40e_write_rx_ctl(&pf->hw, in __i40e_reprogram_flex_pit()
4110 * i40e_reprogram_flex_pit - Reprogram all FLX_PIT tables after input set change
4118 __i40e_reprogram_flex_pit(pf, &pf->l3_flex_pit_list, in i40e_reprogram_flex_pit()
4121 __i40e_reprogram_flex_pit(pf, &pf->l4_flex_pit_list, in i40e_reprogram_flex_pit()
4125 i40e_write_rx_ctl(&pf->hw, in i40e_reprogram_flex_pit()
4130 i40e_write_rx_ctl(&pf->hw, in i40e_reprogram_flex_pit()
4137 * i40e_flow_str - Converts a flow_type into a human readable string
4145 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_flow_str()
4168 * i40e_pit_index_to_mask - Return the FLEX mask for a given PIT index
4199 * i40e_print_input_set - Show changes between two input sets
4210 struct i40e_pf *pf = vsi->back; in i40e_print_input_set()
4217 netif_info(pf, drv, vsi->netdev, "L3 source address: %s -> %s\n", in i40e_print_input_set()
4224 netif_info(pf, drv, vsi->netdev, "L3 destination address: %s -> %s\n", in i40e_print_input_set()
4231 netif_info(pf, drv, vsi->netdev, "L4 source port: %s -> %s\n", in i40e_print_input_set()
4238 netif_info(pf, drv, vsi->netdev, "L4 destination port: %s -> %s\n", in i40e_print_input_set()
4245 netif_info(pf, drv, vsi->netdev, "SCTP verification tag: %s -> %s\n", in i40e_print_input_set()
4256 netif_info(pf, drv, vsi->netdev, "FLEX index %d: %s -> %s\n", in i40e_print_input_set()
4262 netif_info(pf, drv, vsi->netdev, " Current input set: %0llx\n", in i40e_print_input_set()
4264 netif_info(pf, drv, vsi->netdev, "Requested input set: %0llx\n", in i40e_print_input_set()
4269 * i40e_check_fdir_input_set - Check that a given rx_flow_spec mask is valid
4304 struct i40e_pf *pf = vsi->back; in i40e_check_fdir_input_set()
4313 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_check_fdir_input_set()
4316 fdir_filter_count = &pf->fd_sctp4_filter_cnt; in i40e_check_fdir_input_set()
4320 fdir_filter_count = &pf->fd_tcp4_filter_cnt; in i40e_check_fdir_input_set()
4324 fdir_filter_count = &pf->fd_udp4_filter_cnt; in i40e_check_fdir_input_set()
4328 fdir_filter_count = &pf->fd_sctp6_filter_cnt; in i40e_check_fdir_input_set()
4332 fdir_filter_count = &pf->fd_tcp6_filter_cnt; in i40e_check_fdir_input_set()
4336 fdir_filter_count = &pf->fd_udp6_filter_cnt; in i40e_check_fdir_input_set()
4340 fdir_filter_count = &pf->fd_ip4_filter_cnt; in i40e_check_fdir_input_set()
4345 fdir_filter_count = &pf->fd_ip6_filter_cnt; in i40e_check_fdir_input_set()
4349 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4365 switch (fsp->flow_type & ~FLOW_EXT) { in i40e_check_fdir_input_set()
4371 tcp_ip4_spec = &fsp->m_u.tcp_ip4_spec; in i40e_check_fdir_input_set()
4374 if (tcp_ip4_spec->ip4src == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4376 else if (!tcp_ip4_spec->ip4src) in i40e_check_fdir_input_set()
4379 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4382 if (tcp_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4384 else if (!tcp_ip4_spec->ip4dst) in i40e_check_fdir_input_set()
4387 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4390 if (tcp_ip4_spec->psrc == htons(0xFFFF)) in i40e_check_fdir_input_set()
4392 else if (!tcp_ip4_spec->psrc) in i40e_check_fdir_input_set()
4395 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4398 if (tcp_ip4_spec->pdst == htons(0xFFFF)) in i40e_check_fdir_input_set()
4400 else if (!tcp_ip4_spec->pdst) in i40e_check_fdir_input_set()
4403 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4406 if (tcp_ip4_spec->tos) in i40e_check_fdir_input_set()
4407 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4415 tcp_ip6_spec = &fsp->m_u.tcp_ip6_spec; in i40e_check_fdir_input_set()
4418 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6src, in i40e_check_fdir_input_set()
4422 &tcp_ip6_spec->ip6src)) in i40e_check_fdir_input_set()
4425 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4428 if (ipv6_addr_equal((struct in6_addr *)&tcp_ip6_spec->ip6dst, in i40e_check_fdir_input_set()
4432 &tcp_ip6_spec->ip6dst)) in i40e_check_fdir_input_set()
4435 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4438 if (tcp_ip6_spec->psrc == htons(0xFFFF)) in i40e_check_fdir_input_set()
4440 else if (!tcp_ip6_spec->psrc) in i40e_check_fdir_input_set()
4443 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4446 if (tcp_ip6_spec->pdst == htons(0xFFFF)) in i40e_check_fdir_input_set()
4448 else if (!tcp_ip6_spec->pdst) in i40e_check_fdir_input_set()
4451 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4454 if (tcp_ip6_spec->tclass) in i40e_check_fdir_input_set()
4455 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4458 usr_ip4_spec = &fsp->m_u.usr_ip4_spec; in i40e_check_fdir_input_set()
4461 if (usr_ip4_spec->ip4src == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4463 else if (!usr_ip4_spec->ip4src) in i40e_check_fdir_input_set()
4466 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4469 if (usr_ip4_spec->ip4dst == htonl(0xFFFFFFFF)) in i40e_check_fdir_input_set()
4471 else if (!usr_ip4_spec->ip4dst) in i40e_check_fdir_input_set()
4474 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4477 if (usr_ip4_spec->l4_4_bytes) in i40e_check_fdir_input_set()
4478 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4481 if (usr_ip4_spec->tos) in i40e_check_fdir_input_set()
4482 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4485 if (usr_ip4_spec->ip_ver) in i40e_check_fdir_input_set()
4486 return -EINVAL; in i40e_check_fdir_input_set()
4489 if (usr_ip4_spec->proto) in i40e_check_fdir_input_set()
4490 return -EINVAL; in i40e_check_fdir_input_set()
4494 usr_ip6_spec = &fsp->m_u.usr_ip6_spec; in i40e_check_fdir_input_set()
4497 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6src, in i40e_check_fdir_input_set()
4501 &usr_ip6_spec->ip6src)) in i40e_check_fdir_input_set()
4504 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4507 if (ipv6_addr_equal((struct in6_addr *)&usr_ip6_spec->ip6dst, in i40e_check_fdir_input_set()
4511 &usr_ip6_spec->ip6dst)) in i40e_check_fdir_input_set()
4514 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4516 if (usr_ip6_spec->l4_4_bytes) in i40e_check_fdir_input_set()
4517 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4520 if (usr_ip6_spec->tclass) in i40e_check_fdir_input_set()
4521 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4524 if (usr_ip6_spec->l4_proto) in i40e_check_fdir_input_set()
4525 return -EINVAL; in i40e_check_fdir_input_set()
4529 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4532 if (fsp->flow_type & FLOW_EXT) { in i40e_check_fdir_input_set()
4536 if (fsp->h_ext.vlan_etype != htons(ETH_P_8021Q) && in i40e_check_fdir_input_set()
4537 fsp->h_ext.vlan_etype != 0) in i40e_check_fdir_input_set()
4538 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4539 if (fsp->m_ext.vlan_tci == htons(0xFFFF)) in i40e_check_fdir_input_set()
4553 if (userdef->flex_filter) { in i40e_check_fdir_input_set()
4557 * must be aligned on 2-byte boundary. in i40e_check_fdir_input_set()
4559 if (userdef->flex_offset & 0x1) { in i40e_check_fdir_input_set()
4560 dev_warn(&pf->pdev->dev, in i40e_check_fdir_input_set()
4561 "Flexible data offset must be 2-byte aligned\n"); in i40e_check_fdir_input_set()
4562 return -EINVAL; in i40e_check_fdir_input_set()
4565 src_offset = userdef->flex_offset >> 1; in i40e_check_fdir_input_set()
4569 dev_warn(&pf->pdev->dev, in i40e_check_fdir_input_set()
4571 return -EINVAL; in i40e_check_fdir_input_set()
4579 flex_pit = i40e_find_flex_offset(&pf->l4_flex_pit_list, in i40e_check_fdir_input_set()
4593 i40e_find_flex_offset(&pf->l3_flex_pit_list, in i40e_check_fdir_input_set()
4604 if (l3_flex_pit->pit_index != in i40e_check_fdir_input_set()
4605 flex_pit->pit_index) { in i40e_check_fdir_input_set()
4606 return -EINVAL; in i40e_check_fdir_input_set()
4625 pit_index = flex_pit->pit_index; in i40e_check_fdir_input_set()
4639 netif_info(pf, drv, vsi->netdev, "Input set change requested for %s flows:\n", in i40e_check_fdir_input_set()
4643 netif_info(pf, drv, vsi->netdev, "FLEX index %d: Offset -> %d", in i40e_check_fdir_input_set()
4651 if (test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { in i40e_check_fdir_input_set()
4652 netif_err(pf, drv, vsi->netdev, "Cannot change Flow Director input sets while MFP is enabled\n"); in i40e_check_fdir_input_set()
4653 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4666 …netif_err(pf, drv, vsi->netdev, "Cannot change input set for %s flows until %d preexisting filters… in i40e_check_fdir_input_set()
4669 return -EOPNOTSUPP; in i40e_check_fdir_input_set()
4686 err = i40e_add_flex_offset(&pf->l4_flex_pit_list, src_offset, in i40e_check_fdir_input_set()
4692 err = i40e_add_flex_offset(&pf->l3_flex_pit_list, in i40e_check_fdir_input_set()
4706 * i40e_match_fdir_filter - Return true of two filters match
4712 * check any input-set since all filters of the same flow type must use the
4719 if (a->dst_ip != b->dst_ip || in i40e_match_fdir_filter()
4720 a->src_ip != b->src_ip || in i40e_match_fdir_filter()
4721 a->dst_port != b->dst_port || in i40e_match_fdir_filter()
4722 a->src_port != b->src_port || in i40e_match_fdir_filter()
4723 a->flow_type != b->flow_type || in i40e_match_fdir_filter()
4724 a->ipl4_proto != b->ipl4_proto || in i40e_match_fdir_filter()
4725 a->vlan_tag != b->vlan_tag || in i40e_match_fdir_filter()
4726 a->vlan_etype != b->vlan_etype) in i40e_match_fdir_filter()
4733 * i40e_disallow_matching_filters - Check that new filters differ
4760 struct i40e_pf *pf = vsi->back; in i40e_disallow_matching_filters()
4766 &pf->fdir_filter_list, fdir_node) { in i40e_disallow_matching_filters()
4771 if (rule->fd_id == input->fd_id) in i40e_disallow_matching_filters()
4778 dev_warn(&pf->pdev->dev, in i40e_disallow_matching_filters()
4780 rule->fd_id); in i40e_disallow_matching_filters()
4781 return -EINVAL; in i40e_disallow_matching_filters()
4789 * i40e_add_fdir_ethtool - Add/Remove Flow Director filters
4804 int ret = -EINVAL; in i40e_add_fdir_ethtool()
4808 return -EINVAL; in i40e_add_fdir_ethtool()
4809 pf = vsi->back; in i40e_add_fdir_ethtool()
4811 if (!test_bit(I40E_FLAG_FD_SB_ENA, pf->flags)) in i40e_add_fdir_ethtool()
4812 return -EOPNOTSUPP; in i40e_add_fdir_ethtool()
4814 if (test_bit(__I40E_FD_SB_AUTO_DISABLED, pf->state)) in i40e_add_fdir_ethtool()
4815 return -ENOSPC; in i40e_add_fdir_ethtool()
4817 if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || in i40e_add_fdir_ethtool()
4818 test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) in i40e_add_fdir_ethtool()
4819 return -EBUSY; in i40e_add_fdir_ethtool()
4821 if (test_bit(__I40E_FD_FLUSH_REQUESTED, pf->state)) in i40e_add_fdir_ethtool()
4822 return -EBUSY; in i40e_add_fdir_ethtool()
4824 fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; in i40e_add_fdir_ethtool()
4826 /* Parse the user-defined field */ in i40e_add_fdir_ethtool()
4828 return -EINVAL; in i40e_add_fdir_ethtool()
4831 if (fsp->flow_type & FLOW_MAC_EXT) in i40e_add_fdir_ethtool()
4832 return -EINVAL; in i40e_add_fdir_ethtool()
4838 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + in i40e_add_fdir_ethtool()
4839 pf->hw.func_caps.fd_filters_guaranteed)) { in i40e_add_fdir_ethtool()
4840 return -EINVAL; in i40e_add_fdir_ethtool()
4846 if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { in i40e_add_fdir_ethtool()
4849 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); in i40e_add_fdir_ethtool()
4850 u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie); in i40e_add_fdir_ethtool()
4853 if (ring >= vsi->num_queue_pairs) in i40e_add_fdir_ethtool()
4854 return -EINVAL; in i40e_add_fdir_ethtool()
4855 dest_vsi = vsi->id; in i40e_add_fdir_ethtool()
4857 /* VFs are zero-indexed, so we subtract one here */ in i40e_add_fdir_ethtool()
4858 vf--; in i40e_add_fdir_ethtool()
4860 if (vf >= pf->num_alloc_vfs) in i40e_add_fdir_ethtool()
4861 return -EINVAL; in i40e_add_fdir_ethtool()
4862 if (ring >= pf->vf[vf].num_queue_pairs) in i40e_add_fdir_ethtool()
4863 return -EINVAL; in i40e_add_fdir_ethtool()
4864 dest_vsi = pf->vf[vf].lan_vsi_id; in i40e_add_fdir_ethtool()
4873 return -ENOMEM; in i40e_add_fdir_ethtool()
4875 input->fd_id = fsp->location; in i40e_add_fdir_ethtool()
4876 input->q_index = q_index; in i40e_add_fdir_ethtool()
4877 input->dest_vsi = dest_vsi; in i40e_add_fdir_ethtool()
4878 input->dest_ctl = dest_ctl; in i40e_add_fdir_ethtool()
4879 input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID; in i40e_add_fdir_ethtool()
4880 input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); in i40e_add_fdir_ethtool()
4881 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; in i40e_add_fdir_ethtool()
4882 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; in i40e_add_fdir_ethtool()
4883 input->flow_type = fsp->flow_type & ~FLOW_EXT; in i40e_add_fdir_ethtool()
4885 input->vlan_etype = fsp->h_ext.vlan_etype; in i40e_add_fdir_ethtool()
4886 if (!fsp->m_ext.vlan_etype && fsp->h_ext.vlan_tci) in i40e_add_fdir_ethtool()
4887 input->vlan_etype = cpu_to_be16(ETH_P_8021Q); in i40e_add_fdir_ethtool()
4888 if (fsp->m_ext.vlan_tci && input->vlan_etype) in i40e_add_fdir_ethtool()
4889 input->vlan_tag = fsp->h_ext.vlan_tci; in i40e_add_fdir_ethtool()
4890 if (input->flow_type == IPV6_USER_FLOW || in i40e_add_fdir_ethtool()
4891 input->flow_type == UDP_V6_FLOW || in i40e_add_fdir_ethtool()
4892 input->flow_type == TCP_V6_FLOW || in i40e_add_fdir_ethtool()
4893 input->flow_type == SCTP_V6_FLOW) { in i40e_add_fdir_ethtool()
4898 input->ipl4_proto = fsp->h_u.usr_ip6_spec.l4_proto; in i40e_add_fdir_ethtool()
4899 input->dst_port = fsp->h_u.tcp_ip6_spec.psrc; in i40e_add_fdir_ethtool()
4900 input->src_port = fsp->h_u.tcp_ip6_spec.pdst; in i40e_add_fdir_ethtool()
4901 memcpy(input->dst_ip6, fsp->h_u.ah_ip6_spec.ip6src, in i40e_add_fdir_ethtool()
4903 memcpy(input->src_ip6, fsp->h_u.ah_ip6_spec.ip6dst, in i40e_add_fdir_ethtool()
4910 input->ipl4_proto = fsp->h_u.usr_ip4_spec.proto; in i40e_add_fdir_ethtool()
4911 input->dst_port = fsp->h_u.tcp_ip4_spec.psrc; in i40e_add_fdir_ethtool()
4912 input->src_port = fsp->h_u.tcp_ip4_spec.pdst; in i40e_add_fdir_ethtool()
4913 input->dst_ip = fsp->h_u.tcp_ip4_spec.ip4src; in i40e_add_fdir_ethtool()
4914 input->src_ip = fsp->h_u.tcp_ip4_spec.ip4dst; in i40e_add_fdir_ethtool()
4918 input->flex_filter = true; in i40e_add_fdir_ethtool()
4919 input->flex_word = cpu_to_be16(userdef.flex_word); in i40e_add_fdir_ethtool()
4920 input->flex_offset = userdef.flex_offset; in i40e_add_fdir_ethtool()
4930 * to the list as this would cause a use-after-free bug. in i40e_add_fdir_ethtool()
4932 i40e_update_ethtool_fdir_entry(vsi, input, fsp->location, NULL); in i40e_add_fdir_ethtool()
4939 hlist_del(&input->fdir_node); in i40e_add_fdir_ethtool()
4940 pf->fdir_pf_active_filters--; in i40e_add_fdir_ethtool()
4947 * i40e_set_rxnfc - command to set RX flow classification rules
4956 struct i40e_vsi *vsi = np->vsi; in i40e_set_rxnfc()
4957 struct i40e_pf *pf = vsi->back; in i40e_set_rxnfc()
4958 int ret = -EOPNOTSUPP; in i40e_set_rxnfc()
4960 switch (cmd->cmd) { in i40e_set_rxnfc()
4978 * i40e_max_channels - get Max number of combined channels supported
4984 return vsi->alloc_queue_pairs; in i40e_max_channels()
4988 * i40e_get_channels - Get the current channels enabled and max supported etc.
5001 struct i40e_vsi *vsi = np->vsi; in i40e_get_channels()
5002 struct i40e_pf *pf = vsi->back; in i40e_get_channels()
5005 ch->max_combined = i40e_max_channels(vsi); in i40e_get_channels()
5008 ch->other_count = test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0; in i40e_get_channels()
5009 ch->max_other = ch->other_count; in i40e_get_channels()
5012 ch->combined_count = vsi->num_queue_pairs; in i40e_get_channels()
5016 * i40e_set_channels - Set the new channels count.
5028 unsigned int count = ch->combined_count; in i40e_set_channels()
5029 struct i40e_vsi *vsi = np->vsi; in i40e_set_channels()
5030 struct i40e_pf *pf = vsi->back; in i40e_set_channels()
5037 if (vsi->type != I40E_VSI_MAIN) in i40e_set_channels()
5038 return -EINVAL; in i40e_set_channels()
5044 return -EINVAL; in i40e_set_channels()
5047 if (!count || ch->rx_count || ch->tx_count) in i40e_set_channels()
5048 return -EINVAL; in i40e_set_channels()
5051 if (ch->other_count != (test_bit(I40E_FLAG_FD_SB_ENA, pf->flags) ? 1 : 0)) in i40e_set_channels()
5052 return -EINVAL; in i40e_set_channels()
5056 return -EINVAL; in i40e_set_channels()
5062 &pf->fdir_filter_list, fdir_node) { in i40e_set_channels()
5063 if (rule->dest_ctl != drop && count <= rule->q_index) { in i40e_set_channels()
5064 dev_warn(&pf->pdev->dev, in i40e_set_channels()
5066 rule->fd_id, rule->q_index); in i40e_set_channels()
5067 err = -EINVAL; in i40e_set_channels()
5072 dev_err(&pf->pdev->dev, in i40e_set_channels()
5088 return -EINVAL; in i40e_set_channels()
5092 * i40e_get_rxfh_key_size - get the RSS hash key size
5103 * i40e_get_rxfh_indir_size - get the rx flow hash indirection table size
5114 * i40e_get_rxfh - get the rx flow hash indirection table
5125 struct i40e_vsi *vsi = np->vsi; in i40e_get_rxfh()
5130 rxfh->hfunc = ETH_RSS_HASH_TOP; in i40e_get_rxfh()
5132 if (!rxfh->indir) in i40e_get_rxfh()
5135 seed = rxfh->key; in i40e_get_rxfh()
5138 return -ENOMEM; in i40e_get_rxfh()
5143 rxfh->indir[i] = (u32)(lut[i]); in i40e_get_rxfh()
5152 * i40e_set_rxfh - set the rx flow hash indirection table
5157 * Returns -EINVAL if the table specifies an invalid queue id, otherwise
5165 struct i40e_vsi *vsi = np->vsi; in i40e_set_rxfh()
5166 struct i40e_pf *pf = vsi->back; in i40e_set_rxfh()
5170 if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && in i40e_set_rxfh()
5171 rxfh->hfunc != ETH_RSS_HASH_TOP) in i40e_set_rxfh()
5172 return -EOPNOTSUPP; in i40e_set_rxfh()
5174 if (rxfh->key) { in i40e_set_rxfh()
5175 if (!vsi->rss_hkey_user) { in i40e_set_rxfh()
5176 vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE, in i40e_set_rxfh()
5178 if (!vsi->rss_hkey_user) in i40e_set_rxfh()
5179 return -ENOMEM; in i40e_set_rxfh()
5181 memcpy(vsi->rss_hkey_user, rxfh->key, I40E_HKEY_ARRAY_SIZE); in i40e_set_rxfh()
5182 seed = vsi->rss_hkey_user; in i40e_set_rxfh()
5184 if (!vsi->rss_lut_user) { in i40e_set_rxfh()
5185 vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); in i40e_set_rxfh()
5186 if (!vsi->rss_lut_user) in i40e_set_rxfh()
5187 return -ENOMEM; in i40e_set_rxfh()
5191 if (rxfh->indir) in i40e_set_rxfh()
5193 vsi->rss_lut_user[i] = (u8)(rxfh->indir[i]); in i40e_set_rxfh()
5195 i40e_fill_rss_lut(pf, vsi->rss_lut_user, I40E_HLUT_ARRAY_SIZE, in i40e_set_rxfh()
5196 vsi->rss_size); in i40e_set_rxfh()
5198 return i40e_config_rss(vsi, seed, vsi->rss_lut_user, in i40e_set_rxfh()
5203 * i40e_get_priv_flags - report device private flags
5215 struct i40e_vsi *vsi = np->vsi; in i40e_get_priv_flags()
5216 struct i40e_pf *pf = vsi->back; in i40e_get_priv_flags()
5224 if (test_bit(priv_flag->bitno, pf->flags)) in i40e_get_priv_flags()
5228 if (pf->hw.pf_id != 0) in i40e_get_priv_flags()
5236 if (test_bit(priv_flag->bitno, pf->flags)) in i40e_get_priv_flags()
5244 * i40e_set_priv_flags - set private flags
5255 struct i40e_vsi *vsi = np->vsi; in i40e_set_priv_flags()
5256 struct i40e_pf *pf = vsi->back; in i40e_set_priv_flags()
5261 bitmap_copy(orig_flags, pf->flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5262 bitmap_copy(new_flags, pf->flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5271 /* If this is a read-only flag, it can't be changed */ in i40e_set_priv_flags()
5272 if (priv_flag->read_only && in i40e_set_priv_flags()
5273 test_bit(priv_flag->bitno, orig_flags) != new_val) in i40e_set_priv_flags()
5274 return -EOPNOTSUPP; in i40e_set_priv_flags()
5277 set_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5279 clear_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5282 if (pf->hw.pf_id != 0) in i40e_set_priv_flags()
5292 /* If this is a read-only flag, it can't be changed */ in i40e_set_priv_flags()
5293 if (priv_flag->read_only && in i40e_set_priv_flags()
5294 test_bit(priv_flag->bitno, orig_flags) != new_val) in i40e_set_priv_flags()
5295 return -EOPNOTSUPP; in i40e_set_priv_flags()
5298 set_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5300 clear_bit(priv_flag->bitno, new_flags); in i40e_set_priv_flags()
5320 !test_bit(I40E_HW_CAP_ATR_EVICT, pf->hw.caps)) in i40e_set_priv_flags()
5321 return -EOPNOTSUPP; in i40e_set_priv_flags()
5325 * - on XL710 if NPAR is enabled or FW API version < 1.7 in i40e_set_priv_flags()
5326 * - on X722 with FW API version < 1.6 in i40e_set_priv_flags()
5332 !test_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, pf->hw.caps)) { in i40e_set_priv_flags()
5333 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5335 return -EOPNOTSUPP; in i40e_set_priv_flags()
5339 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_priv_flags()
5340 pf->hw.device_id != I40E_DEV_ID_25G_B) { in i40e_set_priv_flags()
5341 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5343 return -EOPNOTSUPP; in i40e_set_priv_flags()
5347 pf->hw.device_id != I40E_DEV_ID_25G_SFP28 && in i40e_set_priv_flags()
5348 pf->hw.device_id != I40E_DEV_ID_25G_B && in i40e_set_priv_flags()
5349 pf->hw.device_id != I40E_DEV_ID_KX_X722) { in i40e_set_priv_flags()
5350 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5352 return -EOPNOTSUPP; in i40e_set_priv_flags()
5363 set_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); in i40e_set_priv_flags()
5364 set_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); in i40e_set_priv_flags()
5374 ret = i40e_aq_set_switch_config(&pf->hw, sw_flags, valid_flags, in i40e_set_priv_flags()
5376 if (ret && pf->hw.aq.asq_last_status != I40E_AQ_RC_ESRCH) { in i40e_set_priv_flags()
5377 dev_info(&pf->pdev->dev, in i40e_set_priv_flags()
5380 i40e_aq_str(&pf->hw, in i40e_set_priv_flags()
5381 pf->hw.aq.asq_last_status)); in i40e_set_priv_flags()
5401 dev_warn(&pf->pdev->dev, "Cannot change FEC config\n"); in i40e_set_priv_flags()
5406 dev_err(&pf->pdev->dev, in i40e_set_priv_flags()
5407 …"Setting link-down-on-close not supported on this port (because total-port-shutdown is enabled)\n"… in i40e_set_priv_flags()
5408 return -EOPNOTSUPP; in i40e_set_priv_flags()
5412 pf->num_alloc_vfs) { in i40e_set_priv_flags()
5413 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5414 "Changing vf-vlan-pruning flag while VF(s) are active is not supported\n"); in i40e_set_priv_flags()
5415 return -EOPNOTSUPP; in i40e_set_priv_flags()
5420 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5422 return -EOPNOTSUPP; in i40e_set_priv_flags()
5428 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5429 "Turning on link-down-on-close flag may affect other partitions\n"); in i40e_set_priv_flags()
5436 i40e_aq_cfg_lldp_mib_change_event(&pf->hw, false, NULL); in i40e_set_priv_flags()
5437 i40e_aq_stop_lldp(&pf->hw, true, false, NULL); in i40e_set_priv_flags()
5439 status = i40e_aq_start_lldp(&pf->hw, false, NULL); in i40e_set_priv_flags()
5441 adq_err = pf->hw.aq.asq_last_status; in i40e_set_priv_flags()
5444 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5449 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5451 return -EINVAL; in i40e_set_priv_flags()
5453 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5455 return -EBUSY; in i40e_set_priv_flags()
5457 dev_warn(&pf->pdev->dev, in i40e_set_priv_flags()
5460 i40e_aq_str(&pf->hw, in i40e_set_priv_flags()
5462 return -EINVAL; in i40e_set_priv_flags()
5473 bitmap_copy(pf->flags, new_flags, I40E_PF_FLAGS_NBITS); in i40e_set_priv_flags()
5485 * i40e_get_module_info - get (Q)SFP+ module type info
5493 struct i40e_vsi *vsi = np->vsi; in i40e_get_module_info()
5494 struct i40e_pf *pf = vsi->back; in i40e_get_module_info()
5495 struct i40e_hw *hw = &pf->hw; in i40e_get_module_info()
5503 if (!test_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps)) { in i40e_get_module_info()
5504 …netdev_err(vsi->netdev, "Module EEPROM memory read not supported. Please update the NVM image.\n"); in i40e_get_module_info()
5505 return -EINVAL; in i40e_get_module_info()
5510 return -EIO; in i40e_get_module_info()
5512 if (hw->phy.link_info.phy_type == I40E_PHY_TYPE_EMPTY) { in i40e_get_module_info()
5513 netdev_err(vsi->netdev, "Cannot read module EEPROM memory. No module connected.\n"); in i40e_get_module_info()
5514 return -EINVAL; in i40e_get_module_info()
5517 type = hw->phy.link_info.module_type[0]; in i40e_get_module_info()
5527 return -EIO; in i40e_get_module_info()
5535 return -EIO; in i40e_get_module_info()
5541 netdev_warn(vsi->netdev, "Module address swap to access page 0xA2 is not supported.\n"); in i40e_get_module_info()
5542 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5543 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5545 /* Module is not SFF-8472 compliant */ in i40e_get_module_info()
5546 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5547 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5549 /* Module is SFF-8472 compliant but doesn't implement in i40e_get_module_info()
5552 modinfo->type = ETH_MODULE_SFF_8079; in i40e_get_module_info()
5553 modinfo->eeprom_len = ETH_MODULE_SFF_8079_LEN; in i40e_get_module_info()
5555 modinfo->type = ETH_MODULE_SFF_8472; in i40e_get_module_info()
5556 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; in i40e_get_module_info()
5567 return -EIO; in i40e_get_module_info()
5570 /* Module is SFF-8636 compliant */ in i40e_get_module_info()
5571 modinfo->type = ETH_MODULE_SFF_8636; in i40e_get_module_info()
5572 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5574 modinfo->type = ETH_MODULE_SFF_8436; in i40e_get_module_info()
5575 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5579 modinfo->type = ETH_MODULE_SFF_8636; in i40e_get_module_info()
5580 modinfo->eeprom_len = I40E_MODULE_QSFP_MAX_LEN; in i40e_get_module_info()
5583 netdev_dbg(vsi->netdev, "SFP module type unrecognized or no SFP connector used.\n"); in i40e_get_module_info()
5584 return -EOPNOTSUPP; in i40e_get_module_info()
5590 * i40e_get_module_eeprom - fills buffer with (Q)SFP+ module memory contents
5600 struct i40e_vsi *vsi = np->vsi; in i40e_get_module_eeprom()
5601 struct i40e_pf *pf = vsi->back; in i40e_get_module_eeprom()
5602 struct i40e_hw *hw = &pf->hw; in i40e_get_module_eeprom()
5608 if (!ee || !ee->len || !data) in i40e_get_module_eeprom()
5609 return -EINVAL; in i40e_get_module_eeprom()
5611 if (hw->phy.link_info.module_type[0] == I40E_MODULE_TYPE_SFP) in i40e_get_module_eeprom()
5614 for (i = 0; i < ee->len; i++) { in i40e_get_module_eeprom()
5615 u32 offset = i + ee->offset; in i40e_get_module_eeprom()
5621 offset -= ETH_MODULE_SFF_8079_LEN; in i40e_get_module_eeprom()
5627 offset -= ETH_MODULE_SFF_8436_LEN / 2; in i40e_get_module_eeprom()
5636 return -EIO; in i40e_get_module_eeprom()
5657 for (unsigned int i = ARRAY_SIZE(lut); i--; ) in i40e_eee_capability_to_kedata_supported()
5666 struct i40e_vsi *vsi = np->vsi; in i40e_get_eee()
5667 struct i40e_pf *pf = vsi->back; in i40e_get_eee()
5668 struct i40e_hw *hw = &pf->hw; in i40e_get_eee()
5674 return -EAGAIN; in i40e_get_eee()
5680 return -EOPNOTSUPP; in i40e_get_eee()
5683 edata->supported); in i40e_get_eee()
5684 linkmode_copy(edata->lp_advertised, edata->supported); in i40e_get_eee()
5689 return -EAGAIN; in i40e_get_eee()
5691 linkmode_zero(edata->advertised); in i40e_get_eee()
5693 linkmode_copy(edata->advertised, edata->supported); in i40e_get_eee()
5694 edata->eee_enabled = !!phy_cfg.eee_capability; in i40e_get_eee()
5695 edata->tx_lpi_enabled = pf->stats.tx_lpi_status; in i40e_get_eee()
5697 edata->eee_active = pf->stats.tx_lpi_status && pf->stats.rx_lpi_status; in i40e_get_eee()
5706 struct i40e_vsi *vsi = np->vsi; in i40e_is_eee_param_supported()
5707 struct i40e_pf *pf = vsi->back; in i40e_is_eee_param_supported()
5712 {!!(edata->advertised[0] & ~edata->supported[0]), "advertise"}, in i40e_is_eee_param_supported()
5713 {!!edata->tx_lpi_timer, "tx-timer"}, in i40e_is_eee_param_supported()
5714 {edata->tx_lpi_enabled != pf->stats.tx_lpi_status, "tx-lpi"} in i40e_is_eee_param_supported()
5723 return -EOPNOTSUPP; in i40e_is_eee_param_supported()
5735 struct i40e_vsi *vsi = np->vsi; in i40e_set_eee()
5736 struct i40e_pf *pf = vsi->back; in i40e_set_eee()
5737 struct i40e_hw *hw = &pf->hw; in i40e_set_eee()
5743 return -EOPNOTSUPP; in i40e_set_eee()
5749 return -EAGAIN; in i40e_set_eee()
5755 return -EOPNOTSUPP; in i40e_set_eee()
5764 return -EAGAIN; in i40e_set_eee()
5778 if (edata->eee_enabled) { in i40e_set_eee()
5789 return -EAGAIN; in i40e_set_eee()
5857 struct i40e_pf *pf = np->vsi->back; in i40e_set_ethtool_ops()
5859 if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) in i40e_set_ethtool_ops()
5860 netdev->ethtool_ops = &i40e_ethtool_ops; in i40e_set_ethtool_ops()
5862 netdev->ethtool_ops = &i40e_ethtool_recovery_mode_ops; in i40e_set_ethtool_ops()