1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Interface for implementing AF_XDP zero-copy support in drivers.
3 * Copyright(c) 2020 Intel Corporation.
4 */
5
6 #ifndef _LINUX_XDP_SOCK_DRV_H
7 #define _LINUX_XDP_SOCK_DRV_H
8
9 #include <net/xdp_sock.h>
10 #include <net/xsk_buff_pool.h>
11
12 #define XDP_UMEM_MIN_CHUNK_SHIFT 11
13 #define XDP_UMEM_MIN_CHUNK_SIZE (1 << XDP_UMEM_MIN_CHUNK_SHIFT)
14
15 struct xsk_cb_desc {
16 void *src;
17 u8 off;
18 u8 bytes;
19 };
20
21 #ifdef CONFIG_XDP_SOCKETS
22
23 void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries);
24 bool xsk_tx_peek_desc(struct xsk_buff_pool *pool, struct xdp_desc *desc);
25 u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max);
26 void xsk_tx_release(struct xsk_buff_pool *pool);
27 struct xsk_buff_pool *xsk_get_pool_from_qid(struct net_device *dev,
28 u16 queue_id);
29 void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool);
30 void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool);
31 void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool);
32 void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool);
33 bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool);
34
xsk_pool_get_headroom(struct xsk_buff_pool * pool)35 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
36 {
37 return XDP_PACKET_HEADROOM + pool->headroom;
38 }
39
xsk_pool_get_chunk_size(struct xsk_buff_pool * pool)40 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
41 {
42 return pool->chunk_size;
43 }
44
xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool)45 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
46 {
47 return xsk_pool_get_chunk_size(pool) - xsk_pool_get_headroom(pool);
48 }
49
xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)50 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
51 struct xdp_rxq_info *rxq)
52 {
53 xp_set_rxq_info(pool, rxq);
54 }
55
xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc)56 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
57 struct xsk_cb_desc *desc)
58 {
59 xp_fill_cb(pool, desc);
60 }
61
xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)62 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
63 unsigned long attrs)
64 {
65 xp_dma_unmap(pool, attrs);
66 }
67
xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs)68 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
69 struct device *dev, unsigned long attrs)
70 {
71 struct xdp_umem *umem = pool->umem;
72
73 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs);
74 }
75
xsk_buff_xdp_get_dma(struct xdp_buff * xdp)76 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
77 {
78 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
79
80 return xp_get_dma(xskb);
81 }
82
xsk_buff_xdp_get_frame_dma(struct xdp_buff * xdp)83 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
84 {
85 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
86
87 return xp_get_frame_dma(xskb);
88 }
89
xsk_buff_alloc(struct xsk_buff_pool * pool)90 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
91 {
92 return xp_alloc(pool);
93 }
94
xsk_is_eop_desc(const struct xdp_desc * desc)95 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
96 {
97 return !xp_mb_desc(desc);
98 }
99
100 /* Returns as many entries as possible up to max. 0 <= N <= max. */
xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)101 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
102 {
103 return xp_alloc_batch(pool, xdp, max);
104 }
105
xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count)106 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
107 {
108 return xp_can_alloc(pool, count);
109 }
110
xsk_buff_free(struct xdp_buff * xdp)111 static inline void xsk_buff_free(struct xdp_buff *xdp)
112 {
113 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
114 struct list_head *xskb_list = &xskb->pool->xskb_list;
115 struct xdp_buff_xsk *pos, *tmp;
116
117 if (likely(!xdp_buff_has_frags(xdp)))
118 goto out;
119
120 list_for_each_entry_safe(pos, tmp, xskb_list, list_node) {
121 list_del(&pos->list_node);
122 xp_free(pos);
123 }
124
125 xdp_get_shared_info_from_buff(xdp)->nr_frags = 0;
126 out:
127 xp_free(xskb);
128 }
129
xsk_buff_add_frag(struct xdp_buff * head,struct xdp_buff * xdp)130 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
131 struct xdp_buff *xdp)
132 {
133 const void *data = xdp->data;
134 struct xdp_buff_xsk *frag;
135
136 if (!__xdp_buff_add_frag(head, virt_to_netmem(data),
137 offset_in_page(data), xdp->data_end - data,
138 xdp->frame_sz, false))
139 return false;
140
141 frag = container_of(xdp, struct xdp_buff_xsk, xdp);
142 list_add_tail(&frag->list_node, &frag->pool->xskb_list);
143
144 return true;
145 }
146
xsk_buff_get_frag(const struct xdp_buff * first)147 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
148 {
149 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
150 struct xdp_buff *ret = NULL;
151 struct xdp_buff_xsk *frag;
152
153 frag = list_first_entry_or_null(&xskb->pool->xskb_list,
154 struct xdp_buff_xsk, list_node);
155 if (frag) {
156 list_del(&frag->list_node);
157 ret = &frag->xdp;
158 }
159
160 return ret;
161 }
162
xsk_buff_del_tail(struct xdp_buff * tail)163 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
164 {
165 struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
166
167 list_del(&xskb->list_node);
168 }
169
xsk_buff_get_tail(struct xdp_buff * first)170 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
171 {
172 struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
173 struct xdp_buff_xsk *frag;
174
175 frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
176 list_node);
177 return &frag->xdp;
178 }
179
xsk_buff_set_size(struct xdp_buff * xdp,u32 size)180 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
181 {
182 xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
183 xdp->data_meta = xdp->data;
184 xdp->data_end = xdp->data + size;
185 xdp->flags = 0;
186 }
187
xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)188 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
189 u64 addr)
190 {
191 return xp_raw_get_dma(pool, addr);
192 }
193
xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr)194 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
195 {
196 return xp_raw_get_data(pool, addr);
197 }
198
199 #define XDP_TXMD_FLAGS_VALID ( \
200 XDP_TXMD_FLAGS_TIMESTAMP | \
201 XDP_TXMD_FLAGS_CHECKSUM | \
202 XDP_TXMD_FLAGS_LAUNCH_TIME | \
203 0)
204
205 static inline bool
xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata * meta)206 xsk_buff_valid_tx_metadata(const struct xsk_tx_metadata *meta)
207 {
208 return !(meta->flags & ~XDP_TXMD_FLAGS_VALID);
209 }
210
xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr)211 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
212 {
213 struct xsk_tx_metadata *meta;
214
215 if (!pool->tx_metadata_len)
216 return NULL;
217
218 meta = xp_raw_get_data(pool, addr) - pool->tx_metadata_len;
219 if (unlikely(!xsk_buff_valid_tx_metadata(meta)))
220 return NULL; /* no way to signal the error to the user */
221
222 return meta;
223 }
224
xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp)225 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
226 {
227 struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
228
229 xp_dma_sync_for_cpu(xskb);
230 }
231
xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)232 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
233 dma_addr_t dma,
234 size_t size)
235 {
236 xp_dma_sync_for_device(pool, dma, size);
237 }
238
239 #else
240
xsk_tx_completed(struct xsk_buff_pool * pool,u32 nb_entries)241 static inline void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries)
242 {
243 }
244
xsk_tx_peek_desc(struct xsk_buff_pool * pool,struct xdp_desc * desc)245 static inline bool xsk_tx_peek_desc(struct xsk_buff_pool *pool,
246 struct xdp_desc *desc)
247 {
248 return false;
249 }
250
xsk_tx_peek_release_desc_batch(struct xsk_buff_pool * pool,u32 max)251 static inline u32 xsk_tx_peek_release_desc_batch(struct xsk_buff_pool *pool, u32 max)
252 {
253 return 0;
254 }
255
xsk_tx_release(struct xsk_buff_pool * pool)256 static inline void xsk_tx_release(struct xsk_buff_pool *pool)
257 {
258 }
259
260 static inline struct xsk_buff_pool *
xsk_get_pool_from_qid(struct net_device * dev,u16 queue_id)261 xsk_get_pool_from_qid(struct net_device *dev, u16 queue_id)
262 {
263 return NULL;
264 }
265
xsk_set_rx_need_wakeup(struct xsk_buff_pool * pool)266 static inline void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool)
267 {
268 }
269
xsk_set_tx_need_wakeup(struct xsk_buff_pool * pool)270 static inline void xsk_set_tx_need_wakeup(struct xsk_buff_pool *pool)
271 {
272 }
273
xsk_clear_rx_need_wakeup(struct xsk_buff_pool * pool)274 static inline void xsk_clear_rx_need_wakeup(struct xsk_buff_pool *pool)
275 {
276 }
277
xsk_clear_tx_need_wakeup(struct xsk_buff_pool * pool)278 static inline void xsk_clear_tx_need_wakeup(struct xsk_buff_pool *pool)
279 {
280 }
281
xsk_uses_need_wakeup(struct xsk_buff_pool * pool)282 static inline bool xsk_uses_need_wakeup(struct xsk_buff_pool *pool)
283 {
284 return false;
285 }
286
xsk_pool_get_headroom(struct xsk_buff_pool * pool)287 static inline u32 xsk_pool_get_headroom(struct xsk_buff_pool *pool)
288 {
289 return 0;
290 }
291
xsk_pool_get_chunk_size(struct xsk_buff_pool * pool)292 static inline u32 xsk_pool_get_chunk_size(struct xsk_buff_pool *pool)
293 {
294 return 0;
295 }
296
xsk_pool_get_rx_frame_size(struct xsk_buff_pool * pool)297 static inline u32 xsk_pool_get_rx_frame_size(struct xsk_buff_pool *pool)
298 {
299 return 0;
300 }
301
xsk_pool_set_rxq_info(struct xsk_buff_pool * pool,struct xdp_rxq_info * rxq)302 static inline void xsk_pool_set_rxq_info(struct xsk_buff_pool *pool,
303 struct xdp_rxq_info *rxq)
304 {
305 }
306
xsk_pool_fill_cb(struct xsk_buff_pool * pool,struct xsk_cb_desc * desc)307 static inline void xsk_pool_fill_cb(struct xsk_buff_pool *pool,
308 struct xsk_cb_desc *desc)
309 {
310 }
311
xsk_pool_dma_unmap(struct xsk_buff_pool * pool,unsigned long attrs)312 static inline void xsk_pool_dma_unmap(struct xsk_buff_pool *pool,
313 unsigned long attrs)
314 {
315 }
316
xsk_pool_dma_map(struct xsk_buff_pool * pool,struct device * dev,unsigned long attrs)317 static inline int xsk_pool_dma_map(struct xsk_buff_pool *pool,
318 struct device *dev, unsigned long attrs)
319 {
320 return 0;
321 }
322
xsk_buff_xdp_get_dma(struct xdp_buff * xdp)323 static inline dma_addr_t xsk_buff_xdp_get_dma(struct xdp_buff *xdp)
324 {
325 return 0;
326 }
327
xsk_buff_xdp_get_frame_dma(struct xdp_buff * xdp)328 static inline dma_addr_t xsk_buff_xdp_get_frame_dma(struct xdp_buff *xdp)
329 {
330 return 0;
331 }
332
xsk_buff_alloc(struct xsk_buff_pool * pool)333 static inline struct xdp_buff *xsk_buff_alloc(struct xsk_buff_pool *pool)
334 {
335 return NULL;
336 }
337
xsk_is_eop_desc(const struct xdp_desc * desc)338 static inline bool xsk_is_eop_desc(const struct xdp_desc *desc)
339 {
340 return false;
341 }
342
xsk_buff_alloc_batch(struct xsk_buff_pool * pool,struct xdp_buff ** xdp,u32 max)343 static inline u32 xsk_buff_alloc_batch(struct xsk_buff_pool *pool, struct xdp_buff **xdp, u32 max)
344 {
345 return 0;
346 }
347
xsk_buff_can_alloc(struct xsk_buff_pool * pool,u32 count)348 static inline bool xsk_buff_can_alloc(struct xsk_buff_pool *pool, u32 count)
349 {
350 return false;
351 }
352
xsk_buff_free(struct xdp_buff * xdp)353 static inline void xsk_buff_free(struct xdp_buff *xdp)
354 {
355 }
356
xsk_buff_add_frag(struct xdp_buff * head,struct xdp_buff * xdp)357 static inline bool xsk_buff_add_frag(struct xdp_buff *head,
358 struct xdp_buff *xdp)
359 {
360 return false;
361 }
362
xsk_buff_get_frag(const struct xdp_buff * first)363 static inline struct xdp_buff *xsk_buff_get_frag(const struct xdp_buff *first)
364 {
365 return NULL;
366 }
367
xsk_buff_del_tail(struct xdp_buff * tail)368 static inline void xsk_buff_del_tail(struct xdp_buff *tail)
369 {
370 }
371
xsk_buff_get_tail(struct xdp_buff * first)372 static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
373 {
374 return NULL;
375 }
376
xsk_buff_set_size(struct xdp_buff * xdp,u32 size)377 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
378 {
379 }
380
xsk_buff_raw_get_dma(struct xsk_buff_pool * pool,u64 addr)381 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
382 u64 addr)
383 {
384 return 0;
385 }
386
xsk_buff_raw_get_data(struct xsk_buff_pool * pool,u64 addr)387 static inline void *xsk_buff_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
388 {
389 return NULL;
390 }
391
xsk_buff_valid_tx_metadata(struct xsk_tx_metadata * meta)392 static inline bool xsk_buff_valid_tx_metadata(struct xsk_tx_metadata *meta)
393 {
394 return false;
395 }
396
xsk_buff_get_metadata(struct xsk_buff_pool * pool,u64 addr)397 static inline struct xsk_tx_metadata *xsk_buff_get_metadata(struct xsk_buff_pool *pool, u64 addr)
398 {
399 return NULL;
400 }
401
xsk_buff_dma_sync_for_cpu(struct xdp_buff * xdp)402 static inline void xsk_buff_dma_sync_for_cpu(struct xdp_buff *xdp)
403 {
404 }
405
xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool * pool,dma_addr_t dma,size_t size)406 static inline void xsk_buff_raw_dma_sync_for_device(struct xsk_buff_pool *pool,
407 dma_addr_t dma,
408 size_t size)
409 {
410 }
411
412 #endif /* CONFIG_XDP_SOCKETS */
413
414 #endif /* _LINUX_XDP_SOCK_DRV_H */
415