1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2020, Intel Corporation. */
3
4 #include <linux/if_vlan.h>
5 #include <net/xdp_sock_drv.h>
6
7 #include "igc.h"
8 #include "igc_xdp.h"
9
igc_xdp_set_prog(struct igc_adapter * adapter,struct bpf_prog * prog,struct netlink_ext_ack * extack)10 int igc_xdp_set_prog(struct igc_adapter *adapter, struct bpf_prog *prog,
11 struct netlink_ext_ack *extack)
12 {
13 struct net_device *dev = adapter->netdev;
14 bool if_running = netif_running(dev);
15 struct bpf_prog *old_prog;
16 bool need_update;
17
18 if (dev->mtu > ETH_DATA_LEN) {
19 /* For now, the driver doesn't support XDP functionality with
20 * jumbo frames so we return error.
21 */
22 NL_SET_ERR_MSG_MOD(extack, "Jumbo frames not supported");
23 return -EOPNOTSUPP;
24 }
25
26 need_update = !!adapter->xdp_prog != !!prog;
27 if (if_running && need_update)
28 igc_close(dev);
29
30 old_prog = xchg(&adapter->xdp_prog, prog);
31 if (old_prog)
32 bpf_prog_put(old_prog);
33
34 if (prog)
35 xdp_features_set_redirect_target(dev, true);
36 else
37 xdp_features_clear_redirect_target(dev);
38
39 if (if_running && need_update)
40 igc_open(dev);
41
42 return 0;
43 }
44
igc_xdp_enable_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)45 static int igc_xdp_enable_pool(struct igc_adapter *adapter,
46 struct xsk_buff_pool *pool, u16 queue_id)
47 {
48 struct net_device *ndev = adapter->netdev;
49 struct device *dev = &adapter->pdev->dev;
50 struct igc_ring *rx_ring, *tx_ring;
51 struct napi_struct *napi;
52 bool needs_reset;
53 u32 frame_size;
54 int err;
55
56 if (queue_id >= adapter->num_rx_queues ||
57 queue_id >= adapter->num_tx_queues)
58 return -EINVAL;
59
60 frame_size = xsk_pool_get_rx_frame_size(pool);
61 if (frame_size < ETH_FRAME_LEN + VLAN_HLEN * 2) {
62 /* When XDP is enabled, the driver doesn't support frames that
63 * span over multiple buffers. To avoid that, we check if xsk
64 * frame size is big enough to fit the max ethernet frame size
65 * + vlan double tagging.
66 */
67 return -EOPNOTSUPP;
68 }
69
70 err = xsk_pool_dma_map(pool, dev, IGC_RX_DMA_ATTR);
71 if (err) {
72 netdev_err(ndev, "Failed to map xsk pool\n");
73 return err;
74 }
75
76 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
77
78 rx_ring = adapter->rx_ring[queue_id];
79 tx_ring = adapter->tx_ring[queue_id];
80 /* Rx and Tx rings share the same napi context. */
81 napi = &rx_ring->q_vector->napi;
82
83 if (needs_reset) {
84 igc_disable_rx_ring(rx_ring);
85 igc_disable_tx_ring(tx_ring);
86 napi_disable(napi);
87 }
88
89 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
90 set_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
91
92 if (needs_reset) {
93 napi_enable(napi);
94 igc_enable_rx_ring(rx_ring);
95 igc_enable_tx_ring(tx_ring);
96
97 err = igc_xsk_wakeup(ndev, queue_id, XDP_WAKEUP_RX);
98 if (err) {
99 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
100 return err;
101 }
102 }
103
104 return 0;
105 }
106
igc_xdp_disable_pool(struct igc_adapter * adapter,u16 queue_id)107 static int igc_xdp_disable_pool(struct igc_adapter *adapter, u16 queue_id)
108 {
109 struct igc_ring *rx_ring, *tx_ring;
110 struct xsk_buff_pool *pool;
111 struct napi_struct *napi;
112 bool needs_reset;
113
114 if (queue_id >= adapter->num_rx_queues ||
115 queue_id >= adapter->num_tx_queues)
116 return -EINVAL;
117
118 pool = xsk_get_pool_from_qid(adapter->netdev, queue_id);
119 if (!pool)
120 return -EINVAL;
121
122 needs_reset = netif_running(adapter->netdev) && igc_xdp_is_enabled(adapter);
123
124 rx_ring = adapter->rx_ring[queue_id];
125 tx_ring = adapter->tx_ring[queue_id];
126 /* Rx and Tx rings share the same napi context. */
127 napi = &rx_ring->q_vector->napi;
128
129 if (needs_reset) {
130 igc_disable_rx_ring(rx_ring);
131 igc_disable_tx_ring(tx_ring);
132 napi_disable(napi);
133 }
134
135 xsk_pool_dma_unmap(pool, IGC_RX_DMA_ATTR);
136 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &rx_ring->flags);
137 clear_bit(IGC_RING_FLAG_AF_XDP_ZC, &tx_ring->flags);
138
139 if (needs_reset) {
140 napi_enable(napi);
141 igc_enable_rx_ring(rx_ring);
142 igc_enable_tx_ring(tx_ring);
143 }
144
145 return 0;
146 }
147
igc_xdp_setup_pool(struct igc_adapter * adapter,struct xsk_buff_pool * pool,u16 queue_id)148 int igc_xdp_setup_pool(struct igc_adapter *adapter, struct xsk_buff_pool *pool,
149 u16 queue_id)
150 {
151 return pool ? igc_xdp_enable_pool(adapter, pool, queue_id) :
152 igc_xdp_disable_pool(adapter, queue_id);
153 }
154