Lines Matching full:vport

167  * @vport: virtual port structure
171 static void idpf_tx_desc_rel_all(struct idpf_vport *vport) in idpf_tx_desc_rel_all() argument
175 if (!vport->txq_grps) in idpf_tx_desc_rel_all()
178 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_rel_all()
179 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_tx_desc_rel_all()
184 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_rel_all()
237 * @vport: vport to allocate resources for
242 static int idpf_tx_desc_alloc(const struct idpf_vport *vport, in idpf_tx_desc_alloc() argument
279 * @vport: vport to allocate resources for
284 static int idpf_compl_desc_alloc(const struct idpf_vport *vport, in idpf_compl_desc_alloc() argument
304 * @vport: virtual port private structure
308 static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) in idpf_tx_desc_alloc_all() argument
316 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_tx_desc_alloc_all()
317 for (j = 0; j < vport->txq_grps[i].num_txq; j++) { in idpf_tx_desc_alloc_all()
318 struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; in idpf_tx_desc_alloc_all()
322 err = idpf_tx_desc_alloc(vport, txq); in idpf_tx_desc_alloc_all()
324 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
330 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_alloc_all()
359 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_desc_alloc_all()
363 err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); in idpf_tx_desc_alloc_all()
365 pci_err(vport->adapter->pdev, in idpf_tx_desc_alloc_all()
374 idpf_tx_desc_rel_all(vport); in idpf_tx_desc_alloc_all()
520 * @vport: virtual port structure
524 static void idpf_rx_desc_rel_all(struct idpf_vport *vport) in idpf_rx_desc_rel_all() argument
526 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_rel_all()
531 if (!vport->rxq_grps) in idpf_rx_desc_rel_all()
534 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_rel_all()
535 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_rel_all()
537 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rx_desc_rel_all()
552 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_rel_all()
805 * @vport: virtual port struct
809 int idpf_rx_bufs_init_all(struct idpf_vport *vport) in idpf_rx_bufs_init_all() argument
811 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_rx_bufs_init_all()
814 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_bufs_init_all()
815 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_bufs_init_all()
835 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_bufs_init_all()
857 * @vport: vport to allocate resources for
862 static int idpf_rx_desc_alloc(const struct idpf_vport *vport, in idpf_rx_desc_alloc() argument
865 struct device *dev = &vport->adapter->pdev->dev; in idpf_rx_desc_alloc()
889 * @vport: vport to allocate resources for
894 static int idpf_bufq_desc_alloc(const struct idpf_vport *vport, in idpf_bufq_desc_alloc() argument
897 struct device *dev = &vport->adapter->pdev->dev; in idpf_bufq_desc_alloc()
917 * @vport: virtual port structure
921 static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) in idpf_rx_desc_alloc_all() argument
927 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rx_desc_alloc_all()
928 rx_qgrp = &vport->rxq_grps[i]; in idpf_rx_desc_alloc_all()
929 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
937 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
942 err = idpf_rx_desc_alloc(vport, q); in idpf_rx_desc_alloc_all()
944 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
951 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_rx_desc_alloc_all()
954 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rx_desc_alloc_all()
959 err = idpf_bufq_desc_alloc(vport, q); in idpf_rx_desc_alloc_all()
961 pci_err(vport->adapter->pdev, in idpf_rx_desc_alloc_all()
972 idpf_rx_desc_rel_all(vport); in idpf_rx_desc_alloc_all()
979 * @vport: vport to release txq groups on
981 static void idpf_txq_group_rel(struct idpf_vport *vport) in idpf_txq_group_rel() argument
986 if (!vport->txq_grps) in idpf_txq_group_rel()
989 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_rel()
990 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_rel()
993 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_rel()
994 struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; in idpf_txq_group_rel()
1010 kfree(vport->txq_grps); in idpf_txq_group_rel()
1011 vport->txq_grps = NULL; in idpf_txq_group_rel()
1022 for (i = 0; i < rx_qgrp->vport->num_bufqs_per_qgrp; i++) { in idpf_rxq_sw_queue_rel()
1036 * @vport: vport to release rxq groups on
1038 static void idpf_rxq_group_rel(struct idpf_vport *vport) in idpf_rxq_group_rel() argument
1042 if (!vport->rxq_grps) in idpf_rxq_group_rel()
1045 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_rel()
1046 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_rel()
1050 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_rel()
1068 kfree(vport->rxq_grps); in idpf_rxq_group_rel()
1069 vport->rxq_grps = NULL; in idpf_rxq_group_rel()
1074 * @vport: vport to release queue groups for
1076 static void idpf_vport_queue_grp_rel_all(struct idpf_vport *vport) in idpf_vport_queue_grp_rel_all() argument
1078 idpf_txq_group_rel(vport); in idpf_vport_queue_grp_rel_all()
1079 idpf_rxq_group_rel(vport); in idpf_vport_queue_grp_rel_all()
1084 * @vport: virtual port
1086 * Free the memory allocated for queues associated to a vport
1088 void idpf_vport_queues_rel(struct idpf_vport *vport) in idpf_vport_queues_rel() argument
1090 idpf_tx_desc_rel_all(vport); in idpf_vport_queues_rel()
1091 idpf_rx_desc_rel_all(vport); in idpf_vport_queues_rel()
1092 idpf_vport_queue_grp_rel_all(vport); in idpf_vport_queues_rel()
1094 kfree(vport->txqs); in idpf_vport_queues_rel()
1095 vport->txqs = NULL; in idpf_vport_queues_rel()
1100 * @vport: vport to init txqs on
1108 static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) in idpf_vport_init_fast_path_txqs() argument
1112 vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), in idpf_vport_init_fast_path_txqs()
1115 if (!vport->txqs) in idpf_vport_init_fast_path_txqs()
1118 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_vport_init_fast_path_txqs()
1119 struct idpf_txq_group *tx_grp = &vport->txq_grps[i]; in idpf_vport_init_fast_path_txqs()
1122 vport->txqs[k] = tx_grp->txqs[j]; in idpf_vport_init_fast_path_txqs()
1123 vport->txqs[k]->idx = k; in idpf_vport_init_fast_path_txqs()
1132 * @vport: vport to initialize queues
1133 * @vport_msg: data to be filled into vport
1135 void idpf_vport_init_num_qs(struct idpf_vport *vport, in idpf_vport_init_num_qs() argument
1139 u16 idx = vport->idx; in idpf_vport_init_num_qs()
1141 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_init_num_qs()
1142 vport->num_txq = le16_to_cpu(vport_msg->num_tx_q); in idpf_vport_init_num_qs()
1143 vport->num_rxq = le16_to_cpu(vport_msg->num_rx_q); in idpf_vport_init_num_qs()
1152 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_init_num_qs()
1153 vport->num_complq = le16_to_cpu(vport_msg->num_tx_complq); in idpf_vport_init_num_qs()
1154 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_init_num_qs()
1155 vport->num_bufq = le16_to_cpu(vport_msg->num_rx_bufq); in idpf_vport_init_num_qs()
1158 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_vport_init_num_qs()
1159 vport->num_bufqs_per_qgrp = 0; in idpf_vport_init_num_qs()
1164 vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; in idpf_vport_init_num_qs()
1169 * @vport: vport to calculate q groups for
1171 void idpf_vport_calc_num_q_desc(struct idpf_vport *vport) in idpf_vport_calc_num_q_desc() argument
1174 int num_bufqs = vport->num_bufqs_per_qgrp; in idpf_vport_calc_num_q_desc()
1176 u16 idx = vport->idx; in idpf_vport_calc_num_q_desc()
1179 config_data = &vport->adapter->vport_config[idx]->user_config; in idpf_vport_calc_num_q_desc()
1183 vport->complq_desc_count = 0; in idpf_vport_calc_num_q_desc()
1185 vport->txq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1186 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_vport_calc_num_q_desc()
1187 vport->complq_desc_count = num_req_txq_desc; in idpf_vport_calc_num_q_desc()
1188 if (vport->complq_desc_count < IDPF_MIN_TXQ_COMPLQ_DESC) in idpf_vport_calc_num_q_desc()
1189 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1193 vport->txq_desc_count = IDPF_DFLT_TX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1194 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_desc()
1195 vport->complq_desc_count = in idpf_vport_calc_num_q_desc()
1200 vport->rxq_desc_count = num_req_rxq_desc; in idpf_vport_calc_num_q_desc()
1202 vport->rxq_desc_count = IDPF_DFLT_RX_Q_DESC_COUNT; in idpf_vport_calc_num_q_desc()
1205 if (!vport->bufq_desc_count[i]) in idpf_vport_calc_num_q_desc()
1206 vport->bufq_desc_count[i] = in idpf_vport_calc_num_q_desc()
1207 IDPF_RX_BUFQ_DESC_COUNT(vport->rxq_desc_count, in idpf_vport_calc_num_q_desc()
1215 * @vport_idx: vport idx to retrieve vport pointer
1217 * @max_q: vport max queue info
1283 * @vport: vport to calculate q groups for
1285 void idpf_vport_calc_num_q_groups(struct idpf_vport *vport) in idpf_vport_calc_num_q_groups() argument
1287 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_num_q_groups()
1288 vport->num_txq_grp = vport->num_txq; in idpf_vport_calc_num_q_groups()
1290 vport->num_txq_grp = IDPF_DFLT_SINGLEQ_TX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1292 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_num_q_groups()
1293 vport->num_rxq_grp = vport->num_rxq; in idpf_vport_calc_num_q_groups()
1295 vport->num_rxq_grp = IDPF_DFLT_SINGLEQ_RX_Q_GROUPS; in idpf_vport_calc_num_q_groups()
1300 * @vport: vport to calculate queues for
1304 static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, in idpf_vport_calc_numq_per_grp() argument
1307 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_calc_numq_per_grp()
1310 *num_txq = vport->num_txq; in idpf_vport_calc_numq_per_grp()
1312 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_calc_numq_per_grp()
1315 *num_rxq = vport->num_rxq; in idpf_vport_calc_numq_per_grp()
1320 * @vport: virtual port data structure
1324 static void idpf_rxq_set_descids(const struct idpf_vport *vport, in idpf_rxq_set_descids() argument
1327 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_set_descids()
1330 if (vport->base_rxd) in idpf_rxq_set_descids()
1339 * @vport: vport to allocate txq groups for
1344 static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) in idpf_txq_group_alloc() argument
1349 vport->txq_grps = kcalloc(vport->num_txq_grp, in idpf_txq_group_alloc()
1350 sizeof(*vport->txq_grps), GFP_KERNEL); in idpf_txq_group_alloc()
1351 if (!vport->txq_grps) in idpf_txq_group_alloc()
1354 split = idpf_is_queue_model_split(vport->txq_model); in idpf_txq_group_alloc()
1355 flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, in idpf_txq_group_alloc()
1358 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_txq_group_alloc()
1359 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_txq_group_alloc()
1360 struct idpf_adapter *adapter = vport->adapter; in idpf_txq_group_alloc()
1364 tx_qgrp->vport = vport; in idpf_txq_group_alloc()
1387 q->desc_count = vport->txq_desc_count; in idpf_txq_group_alloc()
1390 q->netdev = vport->netdev; in idpf_txq_group_alloc()
1394 q->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1396 vport->crc_enable); in idpf_txq_group_alloc()
1419 tx_qgrp->complq->desc_count = vport->complq_desc_count; in idpf_txq_group_alloc()
1421 tx_qgrp->complq->netdev = vport->netdev; in idpf_txq_group_alloc()
1422 tx_qgrp->complq->clean_budget = vport->compln_clean_budget; in idpf_txq_group_alloc()
1431 idpf_txq_group_rel(vport); in idpf_txq_group_alloc()
1438 * @vport: vport to allocate rxq groups for
1443 static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) in idpf_rxq_group_alloc() argument
1448 vport->rxq_grps = kcalloc(vport->num_rxq_grp, in idpf_rxq_group_alloc()
1450 if (!vport->rxq_grps) in idpf_rxq_group_alloc()
1453 hs = idpf_vport_get_hsplit(vport) == ETHTOOL_TCP_DATA_SPLIT_ENABLED; in idpf_rxq_group_alloc()
1455 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_rxq_group_alloc()
1456 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_rxq_group_alloc()
1459 rx_qgrp->vport = vport; in idpf_rxq_group_alloc()
1460 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1485 rx_qgrp->splitq.bufq_sets = kcalloc(vport->num_bufqs_per_qgrp, in idpf_rxq_group_alloc()
1493 for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_rxq_group_alloc()
1500 q->desc_count = vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1517 vport->bufq_desc_count[j]; in idpf_rxq_group_alloc()
1534 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_rxq_group_alloc()
1541 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) in idpf_rxq_group_alloc()
1548 q->desc_count = vport->rxq_desc_count; in idpf_rxq_group_alloc()
1549 q->rx_ptype_lkup = vport->rx_ptype_lkup; in idpf_rxq_group_alloc()
1550 q->netdev = vport->netdev; in idpf_rxq_group_alloc()
1554 q->rx_max_pkt_size = vport->netdev->mtu + in idpf_rxq_group_alloc()
1556 idpf_rxq_set_descids(vport, q); in idpf_rxq_group_alloc()
1562 idpf_rxq_group_rel(vport); in idpf_rxq_group_alloc()
1569 * @vport: vport with qgrps to allocate
1573 static int idpf_vport_queue_grp_alloc_all(struct idpf_vport *vport) in idpf_vport_queue_grp_alloc_all() argument
1578 idpf_vport_calc_numq_per_grp(vport, &num_txq, &num_rxq); in idpf_vport_queue_grp_alloc_all()
1580 err = idpf_txq_group_alloc(vport, num_txq); in idpf_vport_queue_grp_alloc_all()
1584 err = idpf_rxq_group_alloc(vport, num_rxq); in idpf_vport_queue_grp_alloc_all()
1591 idpf_vport_queue_grp_rel_all(vport); in idpf_vport_queue_grp_alloc_all()
1598 * @vport: virtual port
1600 * Allocate memory for queues associated with a vport. Returns 0 on success,
1603 int idpf_vport_queues_alloc(struct idpf_vport *vport) in idpf_vport_queues_alloc() argument
1607 err = idpf_vport_queue_grp_alloc_all(vport); in idpf_vport_queues_alloc()
1611 err = idpf_tx_desc_alloc_all(vport); in idpf_vport_queues_alloc()
1615 err = idpf_rx_desc_alloc_all(vport); in idpf_vport_queues_alloc()
1619 err = idpf_vport_init_fast_path_txqs(vport); in idpf_vport_queues_alloc()
1626 idpf_vport_queues_rel(vport); in idpf_vport_queues_alloc()
1638 struct idpf_vport *vport = priv->vport; in idpf_tx_handle_sw_marker() local
1645 for (i = 0; i < vport->num_txq; i++) in idpf_tx_handle_sw_marker()
1649 if (idpf_queue_has(SW_MARKER, vport->txqs[i])) in idpf_tx_handle_sw_marker()
1653 set_bit(IDPF_VPORT_SW_MARKER, vport->flags); in idpf_tx_handle_sw_marker()
1654 wake_up(&vport->sw_marker_wq); in idpf_tx_handle_sw_marker()
2838 struct idpf_vport *vport = idpf_netdev_to_vport(netdev); in idpf_tx_start() local
2841 if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { in idpf_tx_start()
2847 tx_q = vport->txqs[skb_get_queue_mapping(skb)]; in idpf_tx_start()
2858 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_tx_start()
3512 * idpf_vport_intr_napi_del_all - Unregister napi for all q_vectors in vport
3513 * @vport: virtual port structure
3516 static void idpf_vport_intr_napi_del_all(struct idpf_vport *vport) in idpf_vport_intr_napi_del_all() argument
3520 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_del_all()
3521 netif_napi_del(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_del_all()
3525 * idpf_vport_intr_napi_dis_all - Disable NAPI for all q_vectors in the vport
3526 * @vport: main vport structure
3528 static void idpf_vport_intr_napi_dis_all(struct idpf_vport *vport) in idpf_vport_intr_napi_dis_all() argument
3532 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) in idpf_vport_intr_napi_dis_all()
3533 napi_disable(&vport->q_vectors[v_idx].napi); in idpf_vport_intr_napi_dis_all()
3538 * @vport: virtual port
3540 * Free the memory allocated for interrupt vectors associated to a vport
3542 void idpf_vport_intr_rel(struct idpf_vport *vport) in idpf_vport_intr_rel() argument
3544 for (u32 v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_rel()
3545 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_rel()
3559 kfree(vport->q_vectors); in idpf_vport_intr_rel()
3560 vport->q_vectors = NULL; in idpf_vport_intr_rel()
3565 * @vport: main vport structure
3567 static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) in idpf_vport_intr_rel_irq() argument
3569 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_rel_irq()
3572 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_rel_irq()
3573 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_rel_irq()
3580 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_rel_irq()
3591 * @vport: main vport structure
3593 static void idpf_vport_intr_dis_irq_all(struct idpf_vport *vport) in idpf_vport_intr_dis_irq_all() argument
3595 struct idpf_q_vector *q_vector = vport->q_vectors; in idpf_vport_intr_dis_irq_all()
3598 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) in idpf_vport_intr_dis_irq_all()
3734 * idpf_vport_intr_req_irq - get MSI-X vectors from the OS for the vport
3735 * @vport: main vport structure
3737 static int idpf_vport_intr_req_irq(struct idpf_vport *vport) in idpf_vport_intr_req_irq() argument
3739 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_req_irq()
3744 if_name = netdev_name(vport->netdev); in idpf_vport_intr_req_irq()
3746 for (vector = 0; vector < vport->num_q_vectors; vector++) { in idpf_vport_intr_req_irq()
3747 struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; in idpf_vport_intr_req_irq()
3750 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
3768 netdev_err(vport->netdev, in idpf_vport_intr_req_irq()
3780 vidx = vport->q_vector_idxs[vector]; in idpf_vport_intr_req_irq()
3782 kfree(free_irq(irq_num, &vport->q_vectors[vector])); in idpf_vport_intr_req_irq()
3809 * idpf_vport_intr_ena_irq_all - Enable IRQ for the given vport
3810 * @vport: main vport structure
3812 static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport) in idpf_vport_intr_ena_irq_all() argument
3818 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_ena_irq_all()
3819 struct idpf_q_vector *qv = &vport->q_vectors[q_idx]; in idpf_vport_intr_ena_irq_all()
3824 itr = vport->tx_itr_profile[qv->tx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
3832 itr = vport->rx_itr_profile[qv->rx_dim.profile_ix]; in idpf_vport_intr_ena_irq_all()
3844 * idpf_vport_intr_deinit - Release all vector associations for the vport
3845 * @vport: main vport structure
3847 void idpf_vport_intr_deinit(struct idpf_vport *vport) in idpf_vport_intr_deinit() argument
3849 idpf_vport_intr_dis_irq_all(vport); in idpf_vport_intr_deinit()
3850 idpf_vport_intr_napi_dis_all(vport); in idpf_vport_intr_deinit()
3851 idpf_vport_intr_napi_del_all(vport); in idpf_vport_intr_deinit()
3852 idpf_vport_intr_rel_irq(vport); in idpf_vport_intr_deinit()
3862 struct idpf_vport *vport; in idpf_tx_dim_work() local
3868 vport = q_vector->vport; in idpf_tx_dim_work()
3870 if (dim->profile_ix >= ARRAY_SIZE(vport->tx_itr_profile)) in idpf_tx_dim_work()
3871 dim->profile_ix = ARRAY_SIZE(vport->tx_itr_profile) - 1; in idpf_tx_dim_work()
3874 itr = vport->tx_itr_profile[dim->profile_ix]; in idpf_tx_dim_work()
3888 struct idpf_vport *vport; in idpf_rx_dim_work() local
3894 vport = q_vector->vport; in idpf_rx_dim_work()
3896 if (dim->profile_ix >= ARRAY_SIZE(vport->rx_itr_profile)) in idpf_rx_dim_work()
3897 dim->profile_ix = ARRAY_SIZE(vport->rx_itr_profile) - 1; in idpf_rx_dim_work()
3900 itr = vport->rx_itr_profile[dim->profile_ix]; in idpf_rx_dim_work()
3923 * idpf_vport_intr_napi_ena_all - Enable NAPI for all q_vectors in the vport
3924 * @vport: main vport structure
3926 static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) in idpf_vport_intr_napi_ena_all() argument
3930 for (q_idx = 0; q_idx < vport->num_q_vectors; q_idx++) { in idpf_vport_intr_napi_ena_all()
3931 struct idpf_q_vector *q_vector = &vport->q_vectors[q_idx]; in idpf_vport_intr_napi_ena_all()
4057 * @vport: virtual port
4061 static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) in idpf_vport_intr_map_vector_to_qs() argument
4063 bool split = idpf_is_queue_model_split(vport->rxq_model); in idpf_vport_intr_map_vector_to_qs()
4064 u16 num_txq_grp = vport->num_txq_grp; in idpf_vport_intr_map_vector_to_qs()
4069 for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { in idpf_vport_intr_map_vector_to_qs()
4072 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4075 rx_qgrp = &vport->rxq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4088 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4098 for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { in idpf_vport_intr_map_vector_to_qs()
4102 bufq->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4112 split = idpf_is_queue_model_split(vport->txq_model); in idpf_vport_intr_map_vector_to_qs()
4117 if (qv_idx >= vport->num_q_vectors) in idpf_vport_intr_map_vector_to_qs()
4120 tx_qgrp = &vport->txq_grps[i]; in idpf_vport_intr_map_vector_to_qs()
4127 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4134 q->q_vector = &vport->q_vectors[qv_idx]; in idpf_vport_intr_map_vector_to_qs()
4144 * @vport: virtual port
4148 static int idpf_vport_intr_init_vec_idx(struct idpf_vport *vport) in idpf_vport_intr_init_vec_idx() argument
4150 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_intr_init_vec_idx()
4157 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4158 vport->q_vectors[i].v_idx = vport->q_vector_idxs[i]; in idpf_vport_intr_init_vec_idx()
4170 for (i = 0; i < vport->num_q_vectors; i++) in idpf_vport_intr_init_vec_idx()
4171 vport->q_vectors[i].v_idx = vecids[vport->q_vector_idxs[i]]; in idpf_vport_intr_init_vec_idx()
4180 * @vport: virtual port structure
4182 static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) in idpf_vport_intr_napi_add_all() argument
4187 if (idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_intr_napi_add_all()
4192 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_napi_add_all()
4193 struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_napi_add_all()
4195 netif_napi_add(vport->netdev, &q_vector->napi, napi_poll); in idpf_vport_intr_napi_add_all()
4205 * @vport: virtual port
4210 int idpf_vport_intr_alloc(struct idpf_vport *vport) in idpf_vport_intr_alloc() argument
4216 vport->q_vectors = kcalloc(vport->num_q_vectors, in idpf_vport_intr_alloc()
4218 if (!vport->q_vectors) in idpf_vport_intr_alloc()
4221 txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4222 vport->num_q_vectors); in idpf_vport_intr_alloc()
4223 rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4224 vport->num_q_vectors); in idpf_vport_intr_alloc()
4225 bufqs_per_vector = vport->num_bufqs_per_qgrp * in idpf_vport_intr_alloc()
4226 DIV_ROUND_UP(vport->num_rxq_grp, in idpf_vport_intr_alloc()
4227 vport->num_q_vectors); in idpf_vport_intr_alloc()
4228 complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, in idpf_vport_intr_alloc()
4229 vport->num_q_vectors); in idpf_vport_intr_alloc()
4231 for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { in idpf_vport_intr_alloc()
4232 q_vector = &vport->q_vectors[v_idx]; in idpf_vport_intr_alloc()
4233 q_vector->vport = vport; in idpf_vport_intr_alloc()
4256 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_intr_alloc()
4275 idpf_vport_intr_rel(vport); in idpf_vport_intr_alloc()
4281 * idpf_vport_intr_init - Setup all vectors for the given vport
4282 * @vport: virtual port
4286 int idpf_vport_intr_init(struct idpf_vport *vport) in idpf_vport_intr_init() argument
4290 err = idpf_vport_intr_init_vec_idx(vport); in idpf_vport_intr_init()
4294 idpf_vport_intr_map_vector_to_qs(vport); in idpf_vport_intr_init()
4295 idpf_vport_intr_napi_add_all(vport); in idpf_vport_intr_init()
4297 err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport); in idpf_vport_intr_init()
4301 err = idpf_vport_intr_req_irq(vport); in idpf_vport_intr_init()
4308 idpf_vport_intr_napi_del_all(vport); in idpf_vport_intr_init()
4313 void idpf_vport_intr_ena(struct idpf_vport *vport) in idpf_vport_intr_ena() argument
4315 idpf_vport_intr_napi_ena_all(vport); in idpf_vport_intr_ena()
4316 idpf_vport_intr_ena_irq_all(vport); in idpf_vport_intr_ena()
4321 * @vport: virtual port
4325 int idpf_config_rss(struct idpf_vport *vport) in idpf_config_rss() argument
4329 err = idpf_send_get_set_rss_key_msg(vport, false); in idpf_config_rss()
4333 return idpf_send_get_set_rss_lut_msg(vport, false); in idpf_config_rss()
4338 * @vport: virtual port structure
4340 static void idpf_fill_dflt_rss_lut(struct idpf_vport *vport) in idpf_fill_dflt_rss_lut() argument
4342 struct idpf_adapter *adapter = vport->adapter; in idpf_fill_dflt_rss_lut()
4343 u16 num_active_rxq = vport->num_rxq; in idpf_fill_dflt_rss_lut()
4347 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_fill_dflt_rss_lut()
4357 * @vport: virtual port
4361 int idpf_init_rss(struct idpf_vport *vport) in idpf_init_rss() argument
4363 struct idpf_adapter *adapter = vport->adapter; in idpf_init_rss()
4367 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_init_rss()
4383 idpf_fill_dflt_rss_lut(vport); in idpf_init_rss()
4385 return idpf_config_rss(vport); in idpf_init_rss()
4390 * @vport: virtual port
4392 void idpf_deinit_rss(struct idpf_vport *vport) in idpf_deinit_rss() argument
4394 struct idpf_adapter *adapter = vport->adapter; in idpf_deinit_rss()
4397 rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_deinit_rss()