Lines Matching +full:prefetch +full:- +full:dma

1 // SPDX-License-Identifier: GPL-2.0
14 bool xdp_on = READ_ONCE(adapter->xdp_prog); in ixgbe_xsk_pool()
15 int qid = ring->ring_idx; in ixgbe_xsk_pool()
17 if (!xdp_on || !test_bit(qid, adapter->af_xdp_zc_qps)) in ixgbe_xsk_pool()
20 return xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool()
27 struct net_device *netdev = adapter->netdev; in ixgbe_xsk_pool_enable()
31 if (qid >= adapter->num_rx_queues) in ixgbe_xsk_pool_enable()
32 return -EINVAL; in ixgbe_xsk_pool_enable()
34 if (qid >= netdev->real_num_rx_queues || in ixgbe_xsk_pool_enable()
35 qid >= netdev->real_num_tx_queues) in ixgbe_xsk_pool_enable()
36 return -EINVAL; in ixgbe_xsk_pool_enable()
38 err = xsk_pool_dma_map(pool, &adapter->pdev->dev, IXGBE_RX_DMA_ATTR); in ixgbe_xsk_pool_enable()
42 if_running = netif_running(adapter->netdev) && in ixgbe_xsk_pool_enable()
48 set_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
54 err = ixgbe_xsk_wakeup(adapter->netdev, qid, XDP_WAKEUP_RX); in ixgbe_xsk_pool_enable()
56 clear_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_enable()
70 pool = xsk_get_pool_from_qid(adapter->netdev, qid); in ixgbe_xsk_pool_disable()
72 return -EINVAL; in ixgbe_xsk_pool_disable()
74 if_running = netif_running(adapter->netdev) && in ixgbe_xsk_pool_disable()
80 clear_bit(qid, adapter->af_xdp_zc_qps); in ixgbe_xsk_pool_disable()
107 xdp_prog = READ_ONCE(rx_ring->xdp_prog); in ixgbe_run_xdp_zc()
111 err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog); in ixgbe_run_xdp_zc()
114 if (xsk_uses_need_wakeup(rx_ring->xsk_pool) && err == -ENOBUFS) in ixgbe_run_xdp_zc()
130 spin_lock(&ring->tx_lock); in ixgbe_run_xdp_zc()
133 spin_unlock(&ring->tx_lock); in ixgbe_run_xdp_zc()
141 bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc()
146 trace_xdp_exception(rx_ring->netdev, xdp_prog, act); in ixgbe_run_xdp_zc()
155 u16 i = rx_ring->next_to_use; in ixgbe_alloc_rx_buffers_zc()
156 dma_addr_t dma; in ixgbe_alloc_rx_buffers_zc() local
164 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_alloc_rx_buffers_zc()
165 i -= rx_ring->count; in ixgbe_alloc_rx_buffers_zc()
168 bi->xdp = xsk_buff_alloc(rx_ring->xsk_pool); in ixgbe_alloc_rx_buffers_zc()
169 if (!bi->xdp) { in ixgbe_alloc_rx_buffers_zc()
174 dma = xsk_buff_xdp_get_dma(bi->xdp); in ixgbe_alloc_rx_buffers_zc()
177 * because each write-back erases this info. in ixgbe_alloc_rx_buffers_zc()
179 rx_desc->read.pkt_addr = cpu_to_le64(dma); in ixgbe_alloc_rx_buffers_zc()
186 bi = rx_ring->rx_buffer_info; in ixgbe_alloc_rx_buffers_zc()
187 i -= rx_ring->count; in ixgbe_alloc_rx_buffers_zc()
191 rx_desc->wb.upper.length = 0; in ixgbe_alloc_rx_buffers_zc()
193 count--; in ixgbe_alloc_rx_buffers_zc()
196 i += rx_ring->count; in ixgbe_alloc_rx_buffers_zc()
198 if (rx_ring->next_to_use != i) { in ixgbe_alloc_rx_buffers_zc()
199 rx_ring->next_to_use = i; in ixgbe_alloc_rx_buffers_zc()
203 * applicable for weak-ordered memory model archs, in ixgbe_alloc_rx_buffers_zc()
204 * such as IA-64). in ixgbe_alloc_rx_buffers_zc()
207 writel(i, rx_ring->tail); in ixgbe_alloc_rx_buffers_zc()
216 unsigned int totalsize = xdp->data_end - xdp->data_meta; in ixgbe_construct_skb_zc()
217 unsigned int metasize = xdp->data - xdp->data_meta; in ixgbe_construct_skb_zc()
220 net_prefetch(xdp->data_meta); in ixgbe_construct_skb_zc()
223 skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); in ixgbe_construct_skb_zc()
227 memcpy(__skb_put(skb, totalsize), xdp->data_meta, in ixgbe_construct_skb_zc()
240 u32 ntc = rx_ring->next_to_clean + 1; in ixgbe_inc_ntc()
242 ntc = (ntc < rx_ring->count) ? ntc : 0; in ixgbe_inc_ntc()
243 rx_ring->next_to_clean = ntc; in ixgbe_inc_ntc()
244 prefetch(IXGBE_RX_DESC(rx_ring, ntc)); in ixgbe_inc_ntc()
252 struct ixgbe_adapter *adapter = q_vector->adapter; in ixgbe_clean_rx_irq_zc()
271 rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); in ixgbe_clean_rx_irq_zc()
272 size = le16_to_cpu(rx_desc->wb.upper.length); in ixgbe_clean_rx_irq_zc()
282 bi = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_clean_rx_irq_zc()
288 xsk_buff_free(bi->xdp); in ixgbe_clean_rx_irq_zc()
289 bi->xdp = NULL; in ixgbe_clean_rx_irq_zc()
292 &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; in ixgbe_clean_rx_irq_zc()
293 next_bi->discard = true; in ixgbe_clean_rx_irq_zc()
297 if (unlikely(bi->discard)) { in ixgbe_clean_rx_irq_zc()
298 xsk_buff_free(bi->xdp); in ixgbe_clean_rx_irq_zc()
299 bi->xdp = NULL; in ixgbe_clean_rx_irq_zc()
300 bi->discard = false; in ixgbe_clean_rx_irq_zc()
305 bi->xdp->data_end = bi->xdp->data + size; in ixgbe_clean_rx_irq_zc()
306 xsk_buff_dma_sync_for_cpu(bi->xdp); in ixgbe_clean_rx_irq_zc()
307 xdp_res = ixgbe_run_xdp_zc(adapter, rx_ring, bi->xdp); in ixgbe_clean_rx_irq_zc()
315 xsk_buff_free(bi->xdp); in ixgbe_clean_rx_irq_zc()
320 bi->xdp = NULL; in ixgbe_clean_rx_irq_zc()
330 skb = ixgbe_construct_skb_zc(rx_ring, bi->xdp); in ixgbe_clean_rx_irq_zc()
332 rx_ring->rx_stats.alloc_rx_buff_failed++; in ixgbe_clean_rx_irq_zc()
336 xsk_buff_free(bi->xdp); in ixgbe_clean_rx_irq_zc()
337 bi->xdp = NULL; in ixgbe_clean_rx_irq_zc()
345 total_rx_bytes += skb->len; in ixgbe_clean_rx_irq_zc()
364 if (xsk_uses_need_wakeup(rx_ring->xsk_pool)) { in ixgbe_clean_rx_irq_zc()
365 if (failure || rx_ring->next_to_clean == rx_ring->next_to_use) in ixgbe_clean_rx_irq_zc()
366 xsk_set_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
368 xsk_clear_rx_need_wakeup(rx_ring->xsk_pool); in ixgbe_clean_rx_irq_zc()
380 for (i = 0; i < rx_ring->count; i++) { in ixgbe_xsk_clean_rx_ring()
381 bi = &rx_ring->rx_buffer_info[i]; in ixgbe_xsk_clean_rx_ring()
383 if (!bi->xdp) in ixgbe_xsk_clean_rx_ring()
386 xsk_buff_free(bi->xdp); in ixgbe_xsk_clean_rx_ring()
387 bi->xdp = NULL; in ixgbe_xsk_clean_rx_ring()
393 struct xsk_buff_pool *pool = xdp_ring->xsk_pool; in ixgbe_xmit_zc()
398 dma_addr_t dma; in ixgbe_xmit_zc() local
401 while (budget-- > 0) { in ixgbe_xmit_zc()
407 if (!netif_carrier_ok(xdp_ring->netdev)) in ixgbe_xmit_zc()
413 dma = xsk_buff_raw_get_dma(pool, desc.addr); in ixgbe_xmit_zc()
414 xsk_buff_raw_dma_sync_for_device(pool, dma, desc.len); in ixgbe_xmit_zc()
416 tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; in ixgbe_xmit_zc()
417 tx_bi->bytecount = desc.len; in ixgbe_xmit_zc()
418 tx_bi->xdpf = NULL; in ixgbe_xmit_zc()
419 tx_bi->gso_segs = 1; in ixgbe_xmit_zc()
421 tx_desc = IXGBE_TX_DESC(xdp_ring, xdp_ring->next_to_use); in ixgbe_xmit_zc()
422 tx_desc->read.buffer_addr = cpu_to_le64(dma); in ixgbe_xmit_zc()
429 tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); in ixgbe_xmit_zc()
430 tx_desc->read.olinfo_status = in ixgbe_xmit_zc()
433 xdp_ring->next_to_use++; in ixgbe_xmit_zc()
434 if (xdp_ring->next_to_use == xdp_ring->count) in ixgbe_xmit_zc()
435 xdp_ring->next_to_use = 0; in ixgbe_xmit_zc()
449 xdp_return_frame(tx_bi->xdpf); in ixgbe_clean_xdp_tx_buffer()
450 dma_unmap_single(tx_ring->dev, in ixgbe_clean_xdp_tx_buffer()
451 dma_unmap_addr(tx_bi, dma), in ixgbe_clean_xdp_tx_buffer()
459 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; in ixgbe_clean_xdp_tx_irq()
461 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_clean_xdp_tx_irq()
466 tx_bi = &tx_ring->tx_buffer_info[ntc]; in ixgbe_clean_xdp_tx_irq()
470 if (!(tx_desc->wb.status & cpu_to_le32(IXGBE_TXD_STAT_DD))) in ixgbe_clean_xdp_tx_irq()
473 total_bytes += tx_bi->bytecount; in ixgbe_clean_xdp_tx_irq()
474 total_packets += tx_bi->gso_segs; in ixgbe_clean_xdp_tx_irq()
476 if (tx_bi->xdpf) in ixgbe_clean_xdp_tx_irq()
481 tx_bi->xdpf = NULL; in ixgbe_clean_xdp_tx_irq()
486 if (unlikely(ntc == tx_ring->count)) { in ixgbe_clean_xdp_tx_irq()
488 tx_bi = tx_ring->tx_buffer_info; in ixgbe_clean_xdp_tx_irq()
492 /* issue prefetch for next Tx descriptor */ in ixgbe_clean_xdp_tx_irq()
493 prefetch(tx_desc); in ixgbe_clean_xdp_tx_irq()
496 tx_ring->next_to_clean = ntc; in ixgbe_clean_xdp_tx_irq()
506 return ixgbe_xmit_zc(tx_ring, q_vector->tx.work_limit); in ixgbe_clean_xdp_tx_irq()
514 if (test_bit(__IXGBE_DOWN, &adapter->state)) in ixgbe_xsk_wakeup()
515 return -ENETDOWN; in ixgbe_xsk_wakeup()
517 if (!READ_ONCE(adapter->xdp_prog)) in ixgbe_xsk_wakeup()
518 return -EINVAL; in ixgbe_xsk_wakeup()
520 if (qid >= adapter->num_xdp_queues) in ixgbe_xsk_wakeup()
521 return -EINVAL; in ixgbe_xsk_wakeup()
523 ring = adapter->xdp_ring[qid]; in ixgbe_xsk_wakeup()
525 if (test_bit(__IXGBE_TX_DISABLED, &ring->state)) in ixgbe_xsk_wakeup()
526 return -ENETDOWN; in ixgbe_xsk_wakeup()
528 if (!ring->xsk_pool) in ixgbe_xsk_wakeup()
529 return -EINVAL; in ixgbe_xsk_wakeup()
531 if (!napi_if_scheduled_mark_missed(&ring->q_vector->napi)) { in ixgbe_xsk_wakeup()
532 u64 eics = BIT_ULL(ring->q_vector->v_idx); in ixgbe_xsk_wakeup()
542 u16 ntc = tx_ring->next_to_clean, ntu = tx_ring->next_to_use; in ixgbe_xsk_clean_tx_ring()
543 struct xsk_buff_pool *pool = tx_ring->xsk_pool; in ixgbe_xsk_clean_tx_ring()
548 tx_bi = &tx_ring->tx_buffer_info[ntc]; in ixgbe_xsk_clean_tx_ring()
550 if (tx_bi->xdpf) in ixgbe_xsk_clean_tx_ring()
555 tx_bi->xdpf = NULL; in ixgbe_xsk_clean_tx_ring()
558 if (ntc == tx_ring->count) in ixgbe_xsk_clean_tx_ring()