Lines Matching +full:data +full:- +full:mapping
1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2016-2017 Broadcom Limited
27 dma_addr_t mapping, u32 len, in bnxt_xmit_bd() argument
40 num_frags = sinfo->nr_frags; in bnxt_xmit_bd()
44 prod = txr->tx_prod; in bnxt_xmit_bd()
45 tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_xmit_bd()
46 tx_buf->nr_frags = num_frags; in bnxt_xmit_bd()
48 tx_buf->page = virt_to_head_page(xdp->data); in bnxt_xmit_bd()
50 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
53 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
54 txbd->tx_bd_opaque = SET_TX_OPAQUE(bp, txr, prod, 1 + num_frags); in bnxt_xmit_bd()
55 txbd->tx_bd_haddr = cpu_to_le64(mapping); in bnxt_xmit_bd()
59 skb_frag_t *frag = &sinfo->frags[i]; in bnxt_xmit_bd()
65 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
68 frag_tx_buf = &txr->tx_buf_ring[RING_TX(bp, prod)]; in bnxt_xmit_bd()
69 frag_tx_buf->page = skb_frag_page(frag); in bnxt_xmit_bd()
71 txbd = &txr->tx_desc_ring[TX_RING(bp, prod)][TX_IDX(prod)]; in bnxt_xmit_bd()
75 txbd->tx_bd_len_flags_type = cpu_to_le32(flags); in bnxt_xmit_bd()
78 txbd->tx_bd_haddr = cpu_to_le64(frag_mapping); in bnxt_xmit_bd()
84 txbd->tx_bd_len_flags_type = cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags | in bnxt_xmit_bd()
89 WRITE_ONCE(txr->tx_prod, prod); in bnxt_xmit_bd()
95 dma_addr_t mapping, u32 len, u16 rx_prod, in __bnxt_xmit_xdp() argument
100 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, xdp); in __bnxt_xmit_xdp()
101 tx_buf->rx_prod = rx_prod; in __bnxt_xmit_xdp()
102 tx_buf->action = XDP_TX; in __bnxt_xmit_xdp()
108 dma_addr_t mapping, u32 len, in __bnxt_xmit_xdp_redirect() argument
113 tx_buf = bnxt_xmit_bd(bp, txr, mapping, len, NULL); in __bnxt_xmit_xdp_redirect()
114 tx_buf->action = XDP_REDIRECT; in __bnxt_xmit_xdp_redirect()
115 tx_buf->xdpf = xdpf; in __bnxt_xmit_xdp_redirect()
116 dma_unmap_addr_set(tx_buf, mapping, mapping); in __bnxt_xmit_xdp_redirect()
122 struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; in bnxt_tx_int_xdp()
123 struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; in bnxt_tx_int_xdp()
124 u16 tx_hw_cons = txr->tx_hw_cons; in bnxt_tx_int_xdp()
127 u16 tx_cons = txr->tx_cons; in bnxt_tx_int_xdp()
135 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; in bnxt_tx_int_xdp()
137 if (tx_buf->action == XDP_REDIRECT) { in bnxt_tx_int_xdp()
138 struct pci_dev *pdev = bp->pdev; in bnxt_tx_int_xdp()
140 dma_unmap_single(&pdev->dev, in bnxt_tx_int_xdp()
141 dma_unmap_addr(tx_buf, mapping), in bnxt_tx_int_xdp()
144 xdp_return_frame(tx_buf->xdpf); in bnxt_tx_int_xdp()
145 tx_buf->action = 0; in bnxt_tx_int_xdp()
146 tx_buf->xdpf = NULL; in bnxt_tx_int_xdp()
147 } else if (tx_buf->action == XDP_TX) { in bnxt_tx_int_xdp()
148 tx_buf->action = 0; in bnxt_tx_int_xdp()
152 frags = tx_buf->nr_frags; in bnxt_tx_int_xdp()
155 tx_buf = &txr->tx_buf_ring[RING_TX(bp, tx_cons)]; in bnxt_tx_int_xdp()
156 page_pool_recycle_direct(rxr->page_pool, tx_buf->page); in bnxt_tx_int_xdp()
165 bnapi->events &= ~BNXT_TX_CMP_EVENT; in bnxt_tx_int_xdp()
166 WRITE_ONCE(txr->tx_cons, tx_cons); in bnxt_tx_int_xdp()
168 tx_buf = &txr->tx_buf_ring[RING_TX(bp, last_tx_cons)]; in bnxt_tx_int_xdp()
169 bnxt_db_write(bp, &rxr->rx_db, tx_buf->rx_prod); in bnxt_tx_int_xdp()
176 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_xdp_attached()
188 dma_addr_t mapping; in bnxt_xdp_buff_init() local
191 pdev = bp->pdev; in bnxt_xdp_buff_init()
192 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_xdp_buff_init()
193 offset = bp->rx_offset; in bnxt_xdp_buff_init()
195 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_xdp_buff_init()
196 dma_sync_single_for_cpu(&pdev->dev, mapping + offset, len, bp->rx_dir); in bnxt_xdp_buff_init()
198 xdp_init_buff(xdp, buflen, &rxr->xdp_rxq); in bnxt_xdp_buff_init()
199 xdp_prepare_buff(xdp, data_ptr - offset, offset, len, true); in bnxt_xdp_buff_init()
211 for (i = 0; i < shinfo->nr_frags; i++) { in bnxt_xdp_buff_frags_free()
212 struct page *page = skb_frag_page(&shinfo->frags[i]); in bnxt_xdp_buff_frags_free()
214 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_xdp_buff_frags_free()
216 shinfo->nr_frags = 0; in bnxt_xdp_buff_frags_free()
220 * true - packet consumed by XDP and new buffer is allocated.
221 * false - packet should be passed to the stack.
227 struct bpf_prog *xdp_prog = READ_ONCE(rxr->xdp_prog); in bnxt_rx_xdp()
231 dma_addr_t mapping; in bnxt_rx_xdp() local
241 pdev = bp->pdev; in bnxt_rx_xdp()
242 offset = bp->rx_offset; in bnxt_rx_xdp()
244 txr = rxr->bnapi->tx_ring[0]; in bnxt_rx_xdp()
246 orig_data = xdp->data; in bnxt_rx_xdp()
254 if (tx_avail != bp->tx_ring_size) in bnxt_rx_xdp()
257 *len = xdp->data_end - xdp->data; in bnxt_rx_xdp()
258 if (orig_data != xdp->data) { in bnxt_rx_xdp()
259 offset = xdp->data - xdp->data_hard_start; in bnxt_rx_xdp()
260 *data_ptr = xdp->data_hard_start + offset; in bnxt_rx_xdp()
268 rx_buf = &rxr->rx_buf_ring[cons]; in bnxt_rx_xdp()
269 mapping = rx_buf->mapping - bp->rx_dma_offset; in bnxt_rx_xdp()
275 tx_needed += sinfo->nr_frags; in bnxt_rx_xdp()
280 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
286 dma_sync_single_for_device(&pdev->dev, mapping + offset, *len, in bnxt_rx_xdp()
287 bp->rx_dir); in bnxt_rx_xdp()
290 __bnxt_xmit_xdp(bp, txr, mapping + offset, *len, in bnxt_rx_xdp()
291 NEXT_RX(rxr->rx_prod), xdp); in bnxt_rx_xdp()
301 if (bnxt_alloc_rx_data(bp, rxr, rxr->rx_prod, GFP_ATOMIC)) { in bnxt_rx_xdp()
302 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
308 if (xdp_do_redirect(bp->dev, xdp, xdp_prog)) { in bnxt_rx_xdp()
309 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
310 page_pool_recycle_direct(rxr->page_pool, page); in bnxt_rx_xdp()
317 bpf_warn_invalid_xdp_action(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
320 trace_xdp_exception(bp->dev, xdp_prog, act); in bnxt_rx_xdp()
334 struct bpf_prog *xdp_prog = READ_ONCE(bp->xdp_prog); in bnxt_xdp_xmit()
335 struct pci_dev *pdev = bp->pdev; in bnxt_xdp_xmit()
337 dma_addr_t mapping; in bnxt_xdp_xmit() local
342 if (!test_bit(BNXT_STATE_OPEN, &bp->state) || in bnxt_xdp_xmit()
343 !bp->tx_nr_rings_xdp || in bnxt_xdp_xmit()
345 return -EINVAL; in bnxt_xdp_xmit()
347 ring = smp_processor_id() % bp->tx_nr_rings_xdp; in bnxt_xdp_xmit()
348 txr = &bp->tx_ring[ring]; in bnxt_xdp_xmit()
350 if (READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING) in bnxt_xdp_xmit()
351 return -EINVAL; in bnxt_xdp_xmit()
354 spin_lock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
362 mapping = dma_map_single(&pdev->dev, xdp->data, xdp->len, in bnxt_xdp_xmit()
365 if (dma_mapping_error(&pdev->dev, mapping)) in bnxt_xdp_xmit()
368 __bnxt_xmit_xdp_redirect(bp, txr, mapping, xdp->len, xdp); in bnxt_xdp_xmit()
373 /* Sync BD data before updating doorbell */ in bnxt_xdp_xmit()
375 bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); in bnxt_xdp_xmit()
379 spin_unlock(&txr->xdp_tx_lock); in bnxt_xdp_xmit()
387 struct net_device *dev = bp->dev; in bnxt_xdp_set()
391 if (prog && !prog->aux->xdp_has_frags && in bnxt_xdp_set()
392 bp->dev->mtu > BNXT_MAX_PAGE_MODE_MTU) { in bnxt_xdp_set()
394 bp->dev->mtu, BNXT_MAX_PAGE_MODE_MTU); in bnxt_xdp_set()
395 return -EOPNOTSUPP; in bnxt_xdp_set()
397 if (prog && bp->flags & BNXT_FLAG_HDS) { in bnxt_xdp_set()
399 return -EOPNOTSUPP; in bnxt_xdp_set()
401 if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) { in bnxt_xdp_set()
403 return -EOPNOTSUPP; in bnxt_xdp_set()
406 tx_xdp = bp->rx_nr_rings; in bnxt_xdp_set()
408 tc = bp->num_tc; in bnxt_xdp_set()
411 rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, in bnxt_xdp_set()
420 old = xchg(&bp->xdp_prog, prog); in bnxt_xdp_set()
431 bp->tx_nr_rings_xdp = tx_xdp; in bnxt_xdp_set()
432 bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; in bnxt_xdp_set()
433 tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings); in bnxt_xdp_set()
434 bp->cp_nr_rings = max_t(int, tx_cp, bp->rx_nr_rings); in bnxt_xdp_set()
449 switch (xdp->command) { in bnxt_xdp()
451 rc = bnxt_xdp_set(bp, xdp->prog); in bnxt_xdp()
454 rc = -EINVAL; in bnxt_xdp()
470 sinfo->xdp_frags_size, in bnxt_xdp_build_skb()