Lines Matching +full:1 +full:ac
125 int hsg = 1; /* num of SGEs of linear part */ in mana_map_skb()
126 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_map_skb()
153 sg_i = 1; in mana_map_skb()
179 for (i = sg_i - 1; i >= hsg; i--) in mana_map_skb()
183 for (i = hsg - 1; i >= 0; i--) in mana_map_skb()
200 int num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_fix_skb_head()
242 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_start_xmit()
276 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1; in mana_start_xmit()
300 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags; in mana_start_xmit()
331 pkg.tx_oob.s_oob.comp_iphdr_csum = 1; in mana_start_xmit()
332 pkg.tx_oob.s_oob.comp_tcp_csum = 1; in mana_start_xmit()
362 pkg.tx_oob.s_oob.comp_tcp_csum = 1; in mana_start_xmit()
369 pkg.tx_oob.s_oob.comp_udp_csum = 1; in mana_start_xmit()
500 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; in mana_get_tx_queue()
514 if (ndev->real_num_tx_queues == 1) in mana_select_queue()
535 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_dealloc_rxbufs()
639 dev = mpc->ac->gdma_dev->gdma_context->dev; in mana_pre_alloc_rxbufs()
670 mpc->rxbpre_total = i + 1; in mana_pre_alloc_rxbufs()
751 static int mana_send_request(struct mana_context *ac, void *in_buf, in mana_send_request() argument
754 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_send_request()
807 req.attached_gfid = 1; in mana_pf_register_hw_vport()
808 req.is_pf_default_vport = 1; in mana_pf_register_hw_vport()
809 req.allow_all_ether_types = 1; in mana_pf_register_hw_vport()
811 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_hw_vport()
840 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_hw_vport()
867 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_register_filter()
896 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_pf_deregister_filter()
912 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver, in mana_query_device_cfg() argument
916 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_query_device_cfg()
931 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp)); in mana_query_device_cfg()
971 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_vport_cfg()
1051 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_cfg_vport()
1122 err = mana_send_request(apc->ac, req, req_buf_size, &resp, in mana_cfg_vport_steering()
1171 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_create_wq_obj()
1217 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_destroy_wq_obj()
1232 static void mana_destroy_eq(struct mana_context *ac) in mana_destroy_eq() argument
1234 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_destroy_eq()
1238 if (!ac->eqs) in mana_destroy_eq()
1241 debugfs_remove_recursive(ac->mana_eqs_debugfs); in mana_destroy_eq()
1242 ac->mana_eqs_debugfs = NULL; in mana_destroy_eq()
1245 eq = ac->eqs[i].eq; in mana_destroy_eq()
1252 kfree(ac->eqs); in mana_destroy_eq()
1253 ac->eqs = NULL; in mana_destroy_eq()
1256 static void mana_create_eq_debugfs(struct mana_context *ac, int i) in mana_create_eq_debugfs() argument
1258 struct mana_eq eq = ac->eqs[i]; in mana_create_eq_debugfs()
1262 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs); in mana_create_eq_debugfs()
1268 static int mana_create_eq(struct mana_context *ac) in mana_create_eq() argument
1270 struct gdma_dev *gd = ac->gdma_dev; in mana_create_eq()
1276 ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), in mana_create_eq()
1278 if (!ac->eqs) in mana_create_eq()
1285 spec.eq.context = ac->eqs; in mana_create_eq()
1288 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs); in mana_create_eq()
1291 spec.eq.msix_index = (i + 1) % gc->num_msix_usable; in mana_create_eq()
1292 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); in mana_create_eq()
1295 mana_create_eq_debugfs(ac, i); in mana_create_eq()
1300 mana_destroy_eq(ac); in mana_create_eq()
1316 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_fence_rq()
1377 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_unmap_skb()
1382 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1; in mana_unmap_skb()
1416 if (comp_read < 1) in mana_poll_tx_cq()
1502 WARN_ON_ONCE(1); in mana_poll_tx_cq()
1524 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_post_pkt_rxq()
1758 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); in mana_process_rx_cqe()
1860 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_cq()
1870 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_deinit_txq()
1936 struct mana_context *ac = apc->ac; in mana_create_txq() local
1937 struct gdma_dev *gd = ac->gdma_dev; in mana_create_txq()
2001 spec.cq.parent_eq = ac->eqs[i].eq; in mana_create_txq()
2063 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_destroy_rxq()
2153 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; in mana_alloc_rx_wqe()
2168 rx_oob->num_sge = 1; in mana_alloc_rx_wqe()
2170 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq, in mana_alloc_rx_wqe()
2235 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_rxq()
2334 netif_napi_add_weight(ndev, &cq->napi, mana_poll, 1); in mana_create_rxq()
2383 struct mana_context *ac = apc->ac; in mana_add_rx_queues() local
2389 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); in mana_add_rx_queues()
2409 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_destroy_vport()
2432 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_create_vport()
2542 err = mana_send_request(apc->ac, &req, sizeof(req), &resp, in mana_query_gf_stats()
2593 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_init_port()
2634 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_alloc_queues()
2649 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE; in mana_alloc_queues()
2712 struct gdma_dev *gd = apc->ac->gdma_dev; in mana_dealloc_queues()
2745 tsleep <<= 1; in mana_dealloc_queues()
2812 static int mana_probe_port(struct mana_context *ac, int port_idx, in mana_probe_port() argument
2815 struct gdma_context *gc = ac->gdma_dev->gdma_context; in mana_probe_port()
2828 apc->ac = ac; in mana_probe_port()
2962 struct mana_context *ac = gd->driver_data; in mana_probe() local
2977 ac = kzalloc(sizeof(*ac), GFP_KERNEL); in mana_probe()
2978 if (!ac) in mana_probe()
2981 ac->gdma_dev = gd; in mana_probe()
2982 gd->driver_data = ac; in mana_probe()
2985 err = mana_create_eq(ac); in mana_probe()
2989 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, in mana_probe()
2995 ac->num_ports = num_ports; in mana_probe()
2997 if (ac->num_ports != num_ports) { in mana_probe()
2999 ac->num_ports, num_ports); in mana_probe()
3005 if (ac->num_ports == 0) in mana_probe()
3008 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV) in mana_probe()
3009 ac->num_ports = MAX_PORTS_IN_MANA_DEV; in mana_probe()
3012 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3013 err = mana_probe_port(ac, i, &ac->ports[i]); in mana_probe()
3025 for (i = 0; i < ac->num_ports; i++) { in mana_probe()
3027 err = mana_attach(ac->ports[i]); in mana_probe()
3052 struct mana_context *ac = gd->driver_data; in mana_remove() local
3063 for (i = 0; i < ac->num_ports; i++) { in mana_remove()
3064 ndev = ac->ports[i]; in mana_remove()
3096 mana_destroy_eq(ac); in mana_remove()
3105 kfree(ac); in mana_remove()
3108 struct net_device *mana_get_primary_netdev_rcu(struct mana_context *ac, u32 port_index) in mana_get_primary_netdev_rcu() argument
3114 if (port_index >= ac->num_ports) in mana_get_primary_netdev_rcu()
3118 if (ac->ports[port_index]->flags & IFF_SLAVE) in mana_get_primary_netdev_rcu()
3119 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]); in mana_get_primary_netdev_rcu()
3121 ndev = ac->ports[port_index]; in mana_get_primary_netdev_rcu()