Lines Matching full:vport

106  * idpf_vid_to_vport - Translate vport id to vport pointer
108 * @v_id: vport id to translate
110 * Returns vport matching v_id, NULL if not found.
134 struct idpf_vport *vport; in idpf_handle_event_link() local
136 vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id)); in idpf_handle_event_link()
137 if (!vport) { in idpf_handle_event_link()
142 np = netdev_priv(vport->netdev); in idpf_handle_event_link()
146 if (vport->link_up == v2e->link_status) in idpf_handle_event_link()
149 vport->link_up = v2e->link_status; in idpf_handle_event_link()
154 if (vport->link_up) { in idpf_handle_event_link()
155 netif_tx_start_all_queues(vport->netdev); in idpf_handle_event_link()
156 netif_carrier_on(vport->netdev); in idpf_handle_event_link()
158 netif_tx_stop_all_queues(vport->netdev); in idpf_handle_event_link()
159 netif_carrier_off(vport->netdev); in idpf_handle_event_link()
749 * @vport: virtual port data structure
753 static int idpf_wait_for_marker_event(struct idpf_vport *vport) in idpf_wait_for_marker_event() argument
758 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
759 idpf_queue_set(SW_MARKER, vport->txqs[i]); in idpf_wait_for_marker_event()
761 event = wait_event_timeout(vport->sw_marker_wq, in idpf_wait_for_marker_event()
763 vport->flags), in idpf_wait_for_marker_event()
766 for (i = 0; i < vport->num_txq; i++) in idpf_wait_for_marker_event()
767 idpf_queue_clear(POLL_MODE, vport->txqs[i]); in idpf_wait_for_marker_event()
772 dev_warn(&vport->adapter->pdev->dev, "Failed to receive marker packets\n"); in idpf_wait_for_marker_event()
922 * idpf_vport_alloc_max_qs - Allocate max queues for a vport
924 * @max_q: vport max queue structure
968 * idpf_vport_dealloc_max_qs - Deallocate max queues of a vport
970 * @max_q: vport max queue structure
1005 * @vport: virtual port structure
1010 int idpf_get_reg_intr_vecs(struct idpf_vport *vport, in idpf_get_reg_intr_vecs() argument
1018 chunks = &vport->adapter->req_vec_chunks->vchunks; in idpf_get_reg_intr_vecs()
1051 * idpf_vport_get_q_reg - Get the queue registers for the vport
1090 * @vport: virtual port structure
1097 static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, in __idpf_queue_reg_init() argument
1100 struct idpf_adapter *adapter = vport->adapter; in __idpf_queue_reg_init()
1105 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_queue_reg_init()
1106 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_queue_reg_init()
1114 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1115 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1128 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_queue_reg_init()
1129 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_queue_reg_init()
1130 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_queue_reg_init()
1150 * @vport: virtual port structure
1154 int idpf_queue_reg_init(struct idpf_vport *vport) in idpf_queue_reg_init() argument
1159 u16 vport_idx = vport->idx; in idpf_queue_reg_init()
1168 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_queue_reg_init()
1174 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_queue_reg_init()
1182 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1187 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, in idpf_queue_reg_init()
1189 if (num_regs < vport->num_txq) { in idpf_queue_reg_init()
1197 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_queue_reg_init()
1201 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1206 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, in idpf_queue_reg_init()
1208 if (num_regs < vport->num_bufq) { in idpf_queue_reg_init()
1216 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1221 num_regs = __idpf_queue_reg_init(vport, reg_vals, num_regs, in idpf_queue_reg_init()
1223 if (num_regs < vport->num_rxq) { in idpf_queue_reg_init()
1236 * idpf_send_create_vport_msg - Send virtchnl create vport message
1238 * @max_q: vport max queue info
1240 * send virtchnl creae vport message
1316 * @vport: virtual port structure
1320 int idpf_check_supported_desc_ids(struct idpf_vport *vport) in idpf_check_supported_desc_ids() argument
1322 struct idpf_adapter *adapter = vport->adapter; in idpf_check_supported_desc_ids()
1326 vport_msg = adapter->vport_params_recvd[vport->idx]; in idpf_check_supported_desc_ids()
1338 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_check_supported_desc_ids()
1345 vport->base_rxd = true; in idpf_check_supported_desc_ids()
1348 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_check_supported_desc_ids()
1360 * idpf_send_destroy_vport_msg - Send virtchnl destroy vport message
1361 * @vport: virtual port data structure
1363 * Send virtchnl destroy vport message. Returns 0 on success, negative on
1366 int idpf_send_destroy_vport_msg(struct idpf_vport *vport) in idpf_send_destroy_vport_msg() argument
1372 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_destroy_vport_msg()
1378 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_destroy_vport_msg()
1384 * idpf_send_enable_vport_msg - Send virtchnl enable vport message
1385 * @vport: virtual port data structure
1387 * Send enable vport virtchnl message. Returns 0 on success, negative on
1390 int idpf_send_enable_vport_msg(struct idpf_vport *vport) in idpf_send_enable_vport_msg() argument
1396 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_enable_vport_msg()
1402 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_enable_vport_msg()
1408 * idpf_send_disable_vport_msg - Send virtchnl disable vport message
1409 * @vport: virtual port data structure
1411 * Send disable vport virtchnl message. Returns 0 on success, negative on
1414 int idpf_send_disable_vport_msg(struct idpf_vport *vport) in idpf_send_disable_vport_msg() argument
1420 v_id.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_disable_vport_msg()
1426 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_disable_vport_msg()
1433 * @vport: virtual port data structure
1438 static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) in idpf_send_config_tx_queues_msg() argument
1448 totqs = vport->num_txq + vport->num_complq; in idpf_send_config_tx_queues_msg()
1454 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_config_tx_queues_msg()
1455 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_config_tx_queues_msg()
1462 cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1469 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_config_tx_queues_msg()
1488 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_config_tx_queues_msg()
1492 qi[k].model = cpu_to_le16(vport->txq_model); in idpf_send_config_tx_queues_msg()
1530 ctq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_tx_queues_msg()
1536 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_tx_queues_msg()
1552 * @vport: virtual port data structure
1557 static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) in idpf_send_config_rx_queues_msg() argument
1567 totqs = vport->num_rxq + vport->num_bufq; in idpf_send_config_rx_queues_msg()
1573 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_config_rx_queues_msg()
1574 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_config_rx_queues_msg()
1578 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1581 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_config_rx_queues_msg()
1586 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1596 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) in idpf_send_config_rx_queues_msg()
1601 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_config_rx_queues_msg()
1610 if (!idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_config_rx_queues_msg()
1625 if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { in idpf_send_config_rx_queues_msg()
1632 if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) in idpf_send_config_rx_queues_msg()
1646 qi[k].model = cpu_to_le16(vport->rxq_model); in idpf_send_config_rx_queues_msg()
1682 crq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_config_rx_queues_msg()
1688 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_config_rx_queues_msg()
1705 * @vport: virtual port data structure
1711 static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) in idpf_send_ena_dis_queues_msg() argument
1722 num_txq = vport->num_txq + vport->num_complq; in idpf_send_ena_dis_queues_msg()
1723 num_rxq = vport->num_rxq + vport->num_bufq; in idpf_send_ena_dis_queues_msg()
1730 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1731 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1739 if (vport->num_txq != k) in idpf_send_ena_dis_queues_msg()
1742 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_send_ena_dis_queues_msg()
1745 for (i = 0; i < vport->num_txq_grp; i++, k++) { in idpf_send_ena_dis_queues_msg()
1746 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_ena_dis_queues_msg()
1752 if (vport->num_complq != (k - vport->num_txq)) in idpf_send_ena_dis_queues_msg()
1756 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1757 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1759 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1765 if (idpf_is_queue_model_split(vport->rxq_model)) { in idpf_send_ena_dis_queues_msg()
1779 if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) in idpf_send_ena_dis_queues_msg()
1782 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_ena_dis_queues_msg()
1785 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_ena_dis_queues_msg()
1786 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_ena_dis_queues_msg()
1788 for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { in idpf_send_ena_dis_queues_msg()
1798 if (vport->num_bufq != k - (vport->num_txq + in idpf_send_ena_dis_queues_msg()
1799 vport->num_complq + in idpf_send_ena_dis_queues_msg()
1800 vport->num_rxq)) in idpf_send_ena_dis_queues_msg()
1827 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_queues_msg()
1834 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_queues_msg()
1851 * @vport: virtual port data structure
1857 int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) in idpf_send_map_unmap_queue_vector_msg() argument
1867 num_q = vport->num_txq + vport->num_rxq; in idpf_send_map_unmap_queue_vector_msg()
1874 for (i = 0; i < vport->num_txq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1875 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1882 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1896 if (vport->num_txq != k) in idpf_send_map_unmap_queue_vector_msg()
1899 for (i = 0; i < vport->num_rxq_grp; i++) { in idpf_send_map_unmap_queue_vector_msg()
1900 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in idpf_send_map_unmap_queue_vector_msg()
1903 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1911 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_map_unmap_queue_vector_msg()
1924 if (idpf_is_queue_model_split(vport->txq_model)) { in idpf_send_map_unmap_queue_vector_msg()
1925 if (vport->num_rxq != k - vport->num_complq) in idpf_send_map_unmap_queue_vector_msg()
1928 if (vport->num_rxq != k - vport->num_txq) in idpf_send_map_unmap_queue_vector_msg()
1957 vqvm->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_map_unmap_queue_vector_msg()
1961 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_map_unmap_queue_vector_msg()
1977 * @vport: Virtual port private data structure
1982 int idpf_send_enable_queues_msg(struct idpf_vport *vport) in idpf_send_enable_queues_msg() argument
1984 return idpf_send_ena_dis_queues_msg(vport, true); in idpf_send_enable_queues_msg()
1989 * @vport: Virtual port private data structure
1994 int idpf_send_disable_queues_msg(struct idpf_vport *vport) in idpf_send_disable_queues_msg() argument
1998 err = idpf_send_ena_dis_queues_msg(vport, false); in idpf_send_disable_queues_msg()
2005 for (i = 0; i < vport->num_txq; i++) in idpf_send_disable_queues_msg()
2006 idpf_queue_set(POLL_MODE, vport->txqs[i]); in idpf_send_disable_queues_msg()
2010 for (i = 0; i < vport->num_q_vectors; i++) in idpf_send_disable_queues_msg()
2011 napi_schedule(&vport->q_vectors[i].napi); in idpf_send_disable_queues_msg()
2014 return idpf_wait_for_marker_event(vport); in idpf_send_disable_queues_msg()
2039 * @vport: Virtual port private data structure
2044 int idpf_send_delete_queues_msg(struct idpf_vport *vport) in idpf_send_delete_queues_msg() argument
2051 u16 vport_idx = vport->idx; in idpf_send_delete_queues_msg()
2056 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_delete_queues_msg()
2060 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_send_delete_queues_msg()
2071 eq->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_delete_queues_msg()
2081 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_delete_queues_msg()
2088 * @vport: Virtual port private data structure
2093 int idpf_send_config_queues_msg(struct idpf_vport *vport) in idpf_send_config_queues_msg() argument
2097 err = idpf_send_config_tx_queues_msg(vport); in idpf_send_config_queues_msg()
2101 return idpf_send_config_rx_queues_msg(vport); in idpf_send_config_queues_msg()
2106 * @vport: Virtual port private data structure
2112 * Returns 0 on success, negative on failure. vport _MUST_ be const here as
2113 * we should not change any fields within vport itself in this function.
2115 int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q, in idpf_send_add_queues_msg() argument
2122 u16 vport_idx = vport->idx; in idpf_send_add_queues_msg()
2130 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_send_add_queues_msg()
2134 aq.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_add_queues_msg()
2146 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_add_queues_msg()
2150 /* compare vc_msg num queues with vport num queues */ in idpf_send_add_queues_msg()
2289 * @vport: vport to get stats for
2293 int idpf_send_get_stats_msg(struct idpf_vport *vport) in idpf_send_get_stats_msg() argument
2295 struct idpf_netdev_priv *np = netdev_priv(vport->netdev); in idpf_send_get_stats_msg()
2306 stats_msg.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_stats_msg()
2314 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_stats_msg()
2335 vport->port_stats.vport_stats = stats_msg; in idpf_send_get_stats_msg()
2344 * @vport: virtual port data structure
2349 int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get) in idpf_send_get_set_rss_lut_msg() argument
2360 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_lut_msg()
2366 rl->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_lut_msg()
2386 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_lut_msg()
2419 * @vport: virtual port data structure
2424 int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get) in idpf_send_get_set_rss_key_msg() argument
2435 &vport->adapter->vport_config[vport->idx]->user_config.rss_data; in idpf_send_get_set_rss_key_msg()
2441 rk->vport_id = cpu_to_le32(vport->vport_id); in idpf_send_get_set_rss_key_msg()
2461 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_get_set_rss_key_msg()
2548 * @vport: virtual port data structure
2552 int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) in idpf_send_get_rx_ptype_msg() argument
2558 struct idpf_adapter *adapter = vport->adapter; in idpf_send_get_rx_ptype_msg()
2564 if (vport->rx_ptype_lkup) in idpf_send_get_rx_ptype_msg()
2567 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2631 if (idpf_is_queue_model_split(vport->rxq_model)) in idpf_send_get_rx_ptype_msg()
2757 vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); in idpf_send_get_rx_ptype_msg()
2765 * @vport: virtual port data structure
2769 int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport) in idpf_send_ena_dis_loopback_msg() argument
2775 loopback.vport_id = cpu_to_le32(vport->vport_id); in idpf_send_ena_dis_loopback_msg()
2776 loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK); in idpf_send_ena_dis_loopback_msg()
2782 reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params); in idpf_send_ena_dis_loopback_msg()
2872 * Will release memory to hold the vport parameters received on MailBox
2888 * Will alloc memory to hold the vport parameters received on MailBox
2934 * initialized, allocate memory to store vport specific information and also
3012 dev_err(&adapter->pdev->dev, "Failed to alloc vport params buffer: %d\n", in idpf_vc_core_init()
3113 * @vport: virtual port data struct
3115 * This function requests the vector information required for the vport and
3117 * in the vport's queue vectors array.
3121 int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport) in idpf_vport_alloc_vec_indexes() argument
3126 vec_info.num_curr_vecs = vport->num_q_vectors; in idpf_vport_alloc_vec_indexes()
3127 vec_info.num_req_vecs = max(vport->num_txq, vport->num_rxq); in idpf_vport_alloc_vec_indexes()
3128 vec_info.default_vport = vport->default_vport; in idpf_vport_alloc_vec_indexes()
3129 vec_info.index = vport->idx; in idpf_vport_alloc_vec_indexes()
3131 num_alloc_vecs = idpf_req_rel_vector_indexes(vport->adapter, in idpf_vport_alloc_vec_indexes()
3132 vport->q_vector_idxs, in idpf_vport_alloc_vec_indexes()
3135 dev_err(&vport->adapter->pdev->dev, "Vector distribution failed: %d\n", in idpf_vport_alloc_vec_indexes()
3140 vport->num_q_vectors = num_alloc_vecs; in idpf_vport_alloc_vec_indexes()
3147 * @vport: virtual port to be initialized
3148 * @max_q: vport max queue info
3150 * Will initialize vport with the info received through MB earlier
3152 void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) in idpf_vport_init() argument
3154 struct idpf_adapter *adapter = vport->adapter; in idpf_vport_init()
3160 u16 idx = vport->idx; in idpf_vport_init()
3171 vport->txq_model = le16_to_cpu(vport_msg->txq_model); in idpf_vport_init()
3172 vport->rxq_model = le16_to_cpu(vport_msg->rxq_model); in idpf_vport_init()
3173 vport->vport_type = le16_to_cpu(vport_msg->vport_type); in idpf_vport_init()
3174 vport->vport_id = le32_to_cpu(vport_msg->vport_id); in idpf_vport_init()
3180 ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); in idpf_vport_init()
3181 vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; in idpf_vport_init()
3184 memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3185 memcpy(vport->tx_itr_profile, tx_itr, IDPF_DIM_PROFILE_SLOTS); in idpf_vport_init()
3187 idpf_vport_set_hsplit(vport, ETHTOOL_TCP_DATA_SPLIT_ENABLED); in idpf_vport_init()
3189 idpf_vport_init_num_qs(vport, vport_msg); in idpf_vport_init()
3190 idpf_vport_calc_num_q_desc(vport); in idpf_vport_init()
3191 idpf_vport_calc_num_q_groups(vport); in idpf_vport_init()
3192 idpf_vport_alloc_vec_indexes(vport); in idpf_vport_init()
3194 vport->crc_enable = adapter->crc_enable; in idpf_vport_init()
3285 * @vport: virtual port for which the queues ids are initialized
3293 static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, in __idpf_vport_queue_ids_init() argument
3302 for (i = 0; i < vport->num_txq_grp; i++) { in __idpf_vport_queue_ids_init()
3303 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3310 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3311 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3314 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3322 if (idpf_is_queue_model_split(vport->rxq_model)) in __idpf_vport_queue_ids_init()
3331 for (i = 0; i < vport->num_txq_grp && k < num_qids; i++, k++) { in __idpf_vport_queue_ids_init()
3332 struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; in __idpf_vport_queue_ids_init()
3338 for (i = 0; i < vport->num_rxq_grp; i++) { in __idpf_vport_queue_ids_init()
3339 struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; in __idpf_vport_queue_ids_init()
3340 u8 num_bufqs = vport->num_bufqs_per_qgrp; in __idpf_vport_queue_ids_init()
3359 * @vport: virtual port for which the queues ids are initialized
3364 int idpf_vport_queue_ids_init(struct idpf_vport *vport) in idpf_vport_queue_ids_init() argument
3369 u16 vport_idx = vport->idx; in idpf_vport_queue_ids_init()
3374 vport_config = vport->adapter->vport_config[vport_idx]; in idpf_vport_queue_ids_init()
3380 vport_params = vport->adapter->vport_params_recvd[vport_idx]; in idpf_vport_queue_ids_init()
3391 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3395 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, in idpf_vport_queue_ids_init()
3397 if (num_ids < vport->num_txq) { in idpf_vport_queue_ids_init()
3405 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3409 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, in idpf_vport_queue_ids_init()
3411 if (num_ids < vport->num_rxq) { in idpf_vport_queue_ids_init()
3416 if (!idpf_is_queue_model_split(vport->txq_model)) in idpf_vport_queue_ids_init()
3421 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3425 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); in idpf_vport_queue_ids_init()
3426 if (num_ids < vport->num_complq) { in idpf_vport_queue_ids_init()
3432 if (!idpf_is_queue_model_split(vport->rxq_model)) in idpf_vport_queue_ids_init()
3437 if (num_ids < vport->num_bufq) { in idpf_vport_queue_ids_init()
3441 num_ids = __idpf_vport_queue_ids_init(vport, qids, num_ids, q_type); in idpf_vport_queue_ids_init()
3442 if (num_ids < vport->num_bufq) in idpf_vport_queue_ids_init()
3453 * @vport: virtual port data struct
3457 int idpf_vport_adjust_qs(struct idpf_vport *vport) in idpf_vport_adjust_qs() argument
3462 vport_msg.txq_model = cpu_to_le16(vport->txq_model); in idpf_vport_adjust_qs()
3463 vport_msg.rxq_model = cpu_to_le16(vport->rxq_model); in idpf_vport_adjust_qs()
3464 err = idpf_vport_calc_total_qs(vport->adapter, vport->idx, &vport_msg, in idpf_vport_adjust_qs()
3469 idpf_vport_init_num_qs(vport, &vport_msg); in idpf_vport_adjust_qs()
3470 idpf_vport_calc_num_q_groups(vport); in idpf_vport_adjust_qs()
3505 * idpf_get_vport_id: Get vport id
3506 * @vport: virtual port structure
3508 * Return vport id from the adapter persistent data
3510 u32 idpf_get_vport_id(struct idpf_vport *vport) in idpf_get_vport_id() argument
3514 vport_msg = vport->adapter->vport_params_recvd[vport->idx]; in idpf_get_vport_id()
3540 struct idpf_vport *vport; in idpf_mac_filter_async_handler() local
3559 vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id)); in idpf_mac_filter_async_handler()
3560 if (!vport) in idpf_mac_filter_async_handler()
3590 * @vport: Virtual port data structure
3597 int idpf_add_del_mac_filters(struct idpf_vport *vport, in idpf_add_del_mac_filters() argument
3706 * @config_data: Vport specific config data
3707 * @vport_id: Vport identifier
3709 * Request to enable promiscuous mode for the vport. Message is sent