1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * tcpdevmem netcat. Works similarly to netcat but does device memory TCP
4 * instead of regular TCP. Uses udmabuf to mock a dmabuf provider.
5 *
6 * Usage:
7 *
8 * On server:
9 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201
10 *
11 * On client:
12 * echo -n "hello\nworld" | nc -s <server IP> 5201 -p 5201
13 *
14 * Test data validation:
15 *
16 * On server:
17 * ncdevmem -s <server IP> [-c <client IP>] -f eth1 -l -p 5201 -v 7
18 *
19 * On client:
20 * yes $(echo -e \\x01\\x02\\x03\\x04\\x05\\x06) | \
21 * tr \\n \\0 | \
22 * head -c 5G | \
23 * nc <server IP> 5201 -p 5201
24 *
25 *
26 * Note this is compatible with regular netcat. i.e. the sender or receiver can
27 * be replaced with regular netcat to test the RX or TX path in isolation.
28 */
29 #define _GNU_SOURCE
30 #define __EXPORTED_HEADERS__
31
32 #include <linux/uio.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <unistd.h>
36 #include <stdbool.h>
37 #include <string.h>
38 #include <errno.h>
39 #define __iovec_defined
40 #include <fcntl.h>
41 #include <malloc.h>
42 #include <error.h>
43
44 #include <arpa/inet.h>
45 #include <sys/socket.h>
46 #include <sys/mman.h>
47 #include <sys/ioctl.h>
48 #include <sys/syscall.h>
49
50 #include <linux/memfd.h>
51 #include <linux/dma-buf.h>
52 #include <linux/udmabuf.h>
53 #include <libmnl/libmnl.h>
54 #include <linux/types.h>
55 #include <linux/netlink.h>
56 #include <linux/genetlink.h>
57 #include <linux/netdev.h>
58 #include <linux/ethtool_netlink.h>
59 #include <time.h>
60 #include <net/if.h>
61
62 #include "netdev-user.h"
63 #include "ethtool-user.h"
64 #include <ynl.h>
65
66 #define PAGE_SHIFT 12
67 #define TEST_PREFIX "ncdevmem"
68 #define NUM_PAGES 16000
69
70 #ifndef MSG_SOCK_DEVMEM
71 #define MSG_SOCK_DEVMEM 0x2000000
72 #endif
73
74 static char *server_ip;
75 static char *client_ip;
76 static char *port;
77 static size_t do_validation;
78 static int start_queue = -1;
79 static int num_queues = -1;
80 static char *ifname;
81 static unsigned int ifindex;
82 static unsigned int dmabuf_id;
83
84 struct memory_buffer {
85 int fd;
86 size_t size;
87
88 int devfd;
89 int memfd;
90 char *buf_mem;
91 };
92
93 struct memory_provider {
94 struct memory_buffer *(*alloc)(size_t size);
95 void (*free)(struct memory_buffer *ctx);
96 void (*memcpy_from_device)(void *dst, struct memory_buffer *src,
97 size_t off, int n);
98 };
99
udmabuf_alloc(size_t size)100 static struct memory_buffer *udmabuf_alloc(size_t size)
101 {
102 struct udmabuf_create create;
103 struct memory_buffer *ctx;
104 int ret;
105
106 ctx = malloc(sizeof(*ctx));
107 if (!ctx)
108 error(1, ENOMEM, "malloc failed");
109
110 ctx->size = size;
111
112 ctx->devfd = open("/dev/udmabuf", O_RDWR);
113 if (ctx->devfd < 0)
114 error(1, errno,
115 "%s: [skip,no-udmabuf: Unable to access DMA buffer device file]\n",
116 TEST_PREFIX);
117
118 ctx->memfd = memfd_create("udmabuf-test", MFD_ALLOW_SEALING);
119 if (ctx->memfd < 0)
120 error(1, errno, "%s: [skip,no-memfd]\n", TEST_PREFIX);
121
122 ret = fcntl(ctx->memfd, F_ADD_SEALS, F_SEAL_SHRINK);
123 if (ret < 0)
124 error(1, errno, "%s: [skip,fcntl-add-seals]\n", TEST_PREFIX);
125
126 ret = ftruncate(ctx->memfd, size);
127 if (ret == -1)
128 error(1, errno, "%s: [FAIL,memfd-truncate]\n", TEST_PREFIX);
129
130 memset(&create, 0, sizeof(create));
131
132 create.memfd = ctx->memfd;
133 create.offset = 0;
134 create.size = size;
135 ctx->fd = ioctl(ctx->devfd, UDMABUF_CREATE, &create);
136 if (ctx->fd < 0)
137 error(1, errno, "%s: [FAIL, create udmabuf]\n", TEST_PREFIX);
138
139 ctx->buf_mem = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED,
140 ctx->fd, 0);
141 if (ctx->buf_mem == MAP_FAILED)
142 error(1, errno, "%s: [FAIL, map udmabuf]\n", TEST_PREFIX);
143
144 return ctx;
145 }
146
udmabuf_free(struct memory_buffer * ctx)147 static void udmabuf_free(struct memory_buffer *ctx)
148 {
149 munmap(ctx->buf_mem, ctx->size);
150 close(ctx->fd);
151 close(ctx->memfd);
152 close(ctx->devfd);
153 free(ctx);
154 }
155
udmabuf_memcpy_from_device(void * dst,struct memory_buffer * src,size_t off,int n)156 static void udmabuf_memcpy_from_device(void *dst, struct memory_buffer *src,
157 size_t off, int n)
158 {
159 struct dma_buf_sync sync = {};
160
161 sync.flags = DMA_BUF_SYNC_START;
162 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
163
164 memcpy(dst, src->buf_mem + off, n);
165
166 sync.flags = DMA_BUF_SYNC_END;
167 ioctl(src->fd, DMA_BUF_IOCTL_SYNC, &sync);
168 }
169
170 static struct memory_provider udmabuf_memory_provider = {
171 .alloc = udmabuf_alloc,
172 .free = udmabuf_free,
173 .memcpy_from_device = udmabuf_memcpy_from_device,
174 };
175
176 static struct memory_provider *provider = &udmabuf_memory_provider;
177
print_nonzero_bytes(void * ptr,size_t size)178 static void print_nonzero_bytes(void *ptr, size_t size)
179 {
180 unsigned char *p = ptr;
181 unsigned int i;
182
183 for (i = 0; i < size; i++)
184 putchar(p[i]);
185 }
186
validate_buffer(void * line,size_t size)187 void validate_buffer(void *line, size_t size)
188 {
189 static unsigned char seed = 1;
190 unsigned char *ptr = line;
191 int errors = 0;
192 size_t i;
193
194 for (i = 0; i < size; i++) {
195 if (ptr[i] != seed) {
196 fprintf(stderr,
197 "Failed validation: expected=%u, actual=%u, index=%lu\n",
198 seed, ptr[i], i);
199 errors++;
200 if (errors > 20)
201 error(1, 0, "validation failed.");
202 }
203 seed++;
204 if (seed == do_validation)
205 seed = 0;
206 }
207
208 fprintf(stdout, "Validated buffer\n");
209 }
210
rxq_num(int ifindex)211 static int rxq_num(int ifindex)
212 {
213 struct ethtool_channels_get_req *req;
214 struct ethtool_channels_get_rsp *rsp;
215 struct ynl_error yerr;
216 struct ynl_sock *ys;
217 int num = -1;
218
219 ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
220 if (!ys) {
221 fprintf(stderr, "YNL: %s\n", yerr.msg);
222 return -1;
223 }
224
225 req = ethtool_channels_get_req_alloc();
226 ethtool_channels_get_req_set_header_dev_index(req, ifindex);
227 rsp = ethtool_channels_get(ys, req);
228 if (rsp)
229 num = rsp->rx_count + rsp->combined_count;
230 ethtool_channels_get_req_free(req);
231 ethtool_channels_get_rsp_free(rsp);
232
233 ynl_sock_destroy(ys);
234
235 return num;
236 }
237
238 #define run_command(cmd, ...) \
239 ({ \
240 char command[256]; \
241 memset(command, 0, sizeof(command)); \
242 snprintf(command, sizeof(command), cmd, ##__VA_ARGS__); \
243 fprintf(stderr, "Running: %s\n", command); \
244 system(command); \
245 })
246
reset_flow_steering(void)247 static int reset_flow_steering(void)
248 {
249 /* Depending on the NIC, toggling ntuple off and on might not
250 * be allowed. Additionally, attempting to delete existing filters
251 * will fail if no filters are present. Therefore, do not enforce
252 * the exit status.
253 */
254
255 run_command("sudo ethtool -K %s ntuple off >&2", ifname);
256 run_command("sudo ethtool -K %s ntuple on >&2", ifname);
257 run_command(
258 "sudo ethtool -n %s | grep 'Filter:' | awk '{print $2}' | xargs -n1 ethtool -N %s delete >&2",
259 ifname, ifname);
260 return 0;
261 }
262
tcp_data_split_str(int val)263 static const char *tcp_data_split_str(int val)
264 {
265 switch (val) {
266 case 0:
267 return "off";
268 case 1:
269 return "auto";
270 case 2:
271 return "on";
272 default:
273 return "?";
274 }
275 }
276
configure_headersplit(bool on)277 static int configure_headersplit(bool on)
278 {
279 struct ethtool_rings_get_req *get_req;
280 struct ethtool_rings_get_rsp *get_rsp;
281 struct ethtool_rings_set_req *req;
282 struct ynl_error yerr;
283 struct ynl_sock *ys;
284 int ret;
285
286 ys = ynl_sock_create(&ynl_ethtool_family, &yerr);
287 if (!ys) {
288 fprintf(stderr, "YNL: %s\n", yerr.msg);
289 return -1;
290 }
291
292 req = ethtool_rings_set_req_alloc();
293 ethtool_rings_set_req_set_header_dev_index(req, ifindex);
294 /* 0 - off, 1 - auto, 2 - on */
295 ethtool_rings_set_req_set_tcp_data_split(req, on ? 2 : 0);
296 ret = ethtool_rings_set(ys, req);
297 if (ret < 0)
298 fprintf(stderr, "YNL failed: %s\n", ys->err.msg);
299 ethtool_rings_set_req_free(req);
300
301 if (ret == 0) {
302 get_req = ethtool_rings_get_req_alloc();
303 ethtool_rings_get_req_set_header_dev_index(get_req, ifindex);
304 get_rsp = ethtool_rings_get(ys, get_req);
305 ethtool_rings_get_req_free(get_req);
306 if (get_rsp)
307 fprintf(stderr, "TCP header split: %s\n",
308 tcp_data_split_str(get_rsp->tcp_data_split));
309 ethtool_rings_get_rsp_free(get_rsp);
310 }
311
312 ynl_sock_destroy(ys);
313
314 return ret;
315 }
316
configure_rss(void)317 static int configure_rss(void)
318 {
319 return run_command("sudo ethtool -X %s equal %d >&2", ifname, start_queue);
320 }
321
configure_channels(unsigned int rx,unsigned int tx)322 static int configure_channels(unsigned int rx, unsigned int tx)
323 {
324 return run_command("sudo ethtool -L %s rx %u tx %u", ifname, rx, tx);
325 }
326
configure_flow_steering(struct sockaddr_in6 * server_sin)327 static int configure_flow_steering(struct sockaddr_in6 *server_sin)
328 {
329 const char *type = "tcp6";
330 const char *server_addr;
331 char buf[40];
332
333 inet_ntop(AF_INET6, &server_sin->sin6_addr, buf, sizeof(buf));
334 server_addr = buf;
335
336 if (IN6_IS_ADDR_V4MAPPED(&server_sin->sin6_addr)) {
337 type = "tcp4";
338 server_addr = strrchr(server_addr, ':') + 1;
339 }
340
341 return run_command("sudo ethtool -N %s flow-type %s %s %s dst-ip %s %s %s dst-port %s queue %d >&2",
342 ifname,
343 type,
344 client_ip ? "src-ip" : "",
345 client_ip ?: "",
346 server_addr,
347 client_ip ? "src-port" : "",
348 client_ip ? port : "",
349 port, start_queue);
350 }
351
bind_rx_queue(unsigned int ifindex,unsigned int dmabuf_fd,struct netdev_queue_id * queues,unsigned int n_queue_index,struct ynl_sock ** ys)352 static int bind_rx_queue(unsigned int ifindex, unsigned int dmabuf_fd,
353 struct netdev_queue_id *queues,
354 unsigned int n_queue_index, struct ynl_sock **ys)
355 {
356 struct netdev_bind_rx_req *req = NULL;
357 struct netdev_bind_rx_rsp *rsp = NULL;
358 struct ynl_error yerr;
359
360 *ys = ynl_sock_create(&ynl_netdev_family, &yerr);
361 if (!*ys) {
362 fprintf(stderr, "YNL: %s\n", yerr.msg);
363 return -1;
364 }
365
366 req = netdev_bind_rx_req_alloc();
367 netdev_bind_rx_req_set_ifindex(req, ifindex);
368 netdev_bind_rx_req_set_fd(req, dmabuf_fd);
369 __netdev_bind_rx_req_set_queues(req, queues, n_queue_index);
370
371 rsp = netdev_bind_rx(*ys, req);
372 if (!rsp) {
373 perror("netdev_bind_rx");
374 goto err_close;
375 }
376
377 if (!rsp->_present.id) {
378 perror("id not present");
379 goto err_close;
380 }
381
382 fprintf(stderr, "got dmabuf id=%d\n", rsp->id);
383 dmabuf_id = rsp->id;
384
385 netdev_bind_rx_req_free(req);
386 netdev_bind_rx_rsp_free(rsp);
387
388 return 0;
389
390 err_close:
391 fprintf(stderr, "YNL failed: %s\n", (*ys)->err.msg);
392 netdev_bind_rx_req_free(req);
393 ynl_sock_destroy(*ys);
394 return -1;
395 }
396
enable_reuseaddr(int fd)397 static void enable_reuseaddr(int fd)
398 {
399 int opt = 1;
400 int ret;
401
402 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &opt, sizeof(opt));
403 if (ret)
404 error(1, errno, "%s: [FAIL, SO_REUSEPORT]\n", TEST_PREFIX);
405
406 ret = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &opt, sizeof(opt));
407 if (ret)
408 error(1, errno, "%s: [FAIL, SO_REUSEADDR]\n", TEST_PREFIX);
409 }
410
parse_address(const char * str,int port,struct sockaddr_in6 * sin6)411 static int parse_address(const char *str, int port, struct sockaddr_in6 *sin6)
412 {
413 int ret;
414
415 sin6->sin6_family = AF_INET6;
416 sin6->sin6_port = htons(port);
417
418 ret = inet_pton(sin6->sin6_family, str, &sin6->sin6_addr);
419 if (ret != 1) {
420 /* fallback to plain IPv4 */
421 ret = inet_pton(AF_INET, str, &sin6->sin6_addr.s6_addr32[3]);
422 if (ret != 1)
423 return -1;
424
425 /* add ::ffff prefix */
426 sin6->sin6_addr.s6_addr32[0] = 0;
427 sin6->sin6_addr.s6_addr32[1] = 0;
428 sin6->sin6_addr.s6_addr16[4] = 0;
429 sin6->sin6_addr.s6_addr16[5] = 0xffff;
430 }
431
432 return 0;
433 }
434
do_server(struct memory_buffer * mem)435 int do_server(struct memory_buffer *mem)
436 {
437 char ctrl_data[sizeof(int) * 20000];
438 struct netdev_queue_id *queues;
439 size_t non_page_aligned_frags = 0;
440 struct sockaddr_in6 client_addr;
441 struct sockaddr_in6 server_sin;
442 size_t page_aligned_frags = 0;
443 size_t total_received = 0;
444 socklen_t client_addr_len;
445 bool is_devmem = false;
446 char *tmp_mem = NULL;
447 struct ynl_sock *ys;
448 char iobuf[819200];
449 char buffer[256];
450 int socket_fd;
451 int client_fd;
452 size_t i = 0;
453 int ret;
454
455 ret = parse_address(server_ip, atoi(port), &server_sin);
456 if (ret < 0)
457 error(1, 0, "parse server address");
458
459 if (reset_flow_steering())
460 error(1, 0, "Failed to reset flow steering\n");
461
462 if (configure_headersplit(1))
463 error(1, 0, "Failed to enable TCP header split\n");
464
465 /* Configure RSS to divert all traffic from our devmem queues */
466 if (configure_rss())
467 error(1, 0, "Failed to configure rss\n");
468
469 /* Flow steer our devmem flows to start_queue */
470 if (configure_flow_steering(&server_sin))
471 error(1, 0, "Failed to configure flow steering\n");
472
473 sleep(1);
474
475 queues = malloc(sizeof(*queues) * num_queues);
476
477 for (i = 0; i < num_queues; i++) {
478 queues[i]._present.type = 1;
479 queues[i]._present.id = 1;
480 queues[i].type = NETDEV_QUEUE_TYPE_RX;
481 queues[i].id = start_queue + i;
482 }
483
484 if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
485 error(1, 0, "Failed to bind\n");
486
487 tmp_mem = malloc(mem->size);
488 if (!tmp_mem)
489 error(1, ENOMEM, "malloc failed");
490
491 socket_fd = socket(AF_INET6, SOCK_STREAM, 0);
492 if (socket_fd < 0)
493 error(1, errno, "%s: [FAIL, create socket]\n", TEST_PREFIX);
494
495 enable_reuseaddr(socket_fd);
496
497 fprintf(stderr, "binding to address %s:%d\n", server_ip,
498 ntohs(server_sin.sin6_port));
499
500 ret = bind(socket_fd, &server_sin, sizeof(server_sin));
501 if (ret)
502 error(1, errno, "%s: [FAIL, bind]\n", TEST_PREFIX);
503
504 ret = listen(socket_fd, 1);
505 if (ret)
506 error(1, errno, "%s: [FAIL, listen]\n", TEST_PREFIX);
507
508 client_addr_len = sizeof(client_addr);
509
510 inet_ntop(AF_INET6, &server_sin.sin6_addr, buffer,
511 sizeof(buffer));
512 fprintf(stderr, "Waiting or connection on %s:%d\n", buffer,
513 ntohs(server_sin.sin6_port));
514 client_fd = accept(socket_fd, &client_addr, &client_addr_len);
515
516 inet_ntop(AF_INET6, &client_addr.sin6_addr, buffer,
517 sizeof(buffer));
518 fprintf(stderr, "Got connection from %s:%d\n", buffer,
519 ntohs(client_addr.sin6_port));
520
521 while (1) {
522 struct iovec iov = { .iov_base = iobuf,
523 .iov_len = sizeof(iobuf) };
524 struct dmabuf_cmsg *dmabuf_cmsg = NULL;
525 struct cmsghdr *cm = NULL;
526 struct msghdr msg = { 0 };
527 struct dmabuf_token token;
528 ssize_t ret;
529
530 is_devmem = false;
531
532 msg.msg_iov = &iov;
533 msg.msg_iovlen = 1;
534 msg.msg_control = ctrl_data;
535 msg.msg_controllen = sizeof(ctrl_data);
536 ret = recvmsg(client_fd, &msg, MSG_SOCK_DEVMEM);
537 fprintf(stderr, "recvmsg ret=%ld\n", ret);
538 if (ret < 0 && (errno == EAGAIN || errno == EWOULDBLOCK))
539 continue;
540 if (ret < 0) {
541 perror("recvmsg");
542 continue;
543 }
544 if (ret == 0) {
545 fprintf(stderr, "client exited\n");
546 goto cleanup;
547 }
548
549 i++;
550 for (cm = CMSG_FIRSTHDR(&msg); cm; cm = CMSG_NXTHDR(&msg, cm)) {
551 if (cm->cmsg_level != SOL_SOCKET ||
552 (cm->cmsg_type != SCM_DEVMEM_DMABUF &&
553 cm->cmsg_type != SCM_DEVMEM_LINEAR)) {
554 fprintf(stderr, "skipping non-devmem cmsg\n");
555 continue;
556 }
557
558 dmabuf_cmsg = (struct dmabuf_cmsg *)CMSG_DATA(cm);
559 is_devmem = true;
560
561 if (cm->cmsg_type == SCM_DEVMEM_LINEAR) {
562 /* TODO: process data copied from skb's linear
563 * buffer.
564 */
565 fprintf(stderr,
566 "SCM_DEVMEM_LINEAR. dmabuf_cmsg->frag_size=%u\n",
567 dmabuf_cmsg->frag_size);
568
569 continue;
570 }
571
572 token.token_start = dmabuf_cmsg->frag_token;
573 token.token_count = 1;
574
575 total_received += dmabuf_cmsg->frag_size;
576 fprintf(stderr,
577 "received frag_page=%llu, in_page_offset=%llu, frag_offset=%llu, frag_size=%u, token=%u, total_received=%lu, dmabuf_id=%u\n",
578 dmabuf_cmsg->frag_offset >> PAGE_SHIFT,
579 dmabuf_cmsg->frag_offset % getpagesize(),
580 dmabuf_cmsg->frag_offset,
581 dmabuf_cmsg->frag_size, dmabuf_cmsg->frag_token,
582 total_received, dmabuf_cmsg->dmabuf_id);
583
584 if (dmabuf_cmsg->dmabuf_id != dmabuf_id)
585 error(1, 0,
586 "received on wrong dmabuf_id: flow steering error\n");
587
588 if (dmabuf_cmsg->frag_size % getpagesize())
589 non_page_aligned_frags++;
590 else
591 page_aligned_frags++;
592
593 provider->memcpy_from_device(tmp_mem, mem,
594 dmabuf_cmsg->frag_offset,
595 dmabuf_cmsg->frag_size);
596
597 if (do_validation)
598 validate_buffer(tmp_mem,
599 dmabuf_cmsg->frag_size);
600 else
601 print_nonzero_bytes(tmp_mem,
602 dmabuf_cmsg->frag_size);
603
604 ret = setsockopt(client_fd, SOL_SOCKET,
605 SO_DEVMEM_DONTNEED, &token,
606 sizeof(token));
607 if (ret != 1)
608 error(1, 0,
609 "SO_DEVMEM_DONTNEED not enough tokens");
610 }
611 if (!is_devmem)
612 error(1, 0, "flow steering error\n");
613
614 fprintf(stderr, "total_received=%lu\n", total_received);
615 }
616
617 fprintf(stderr, "%s: ok\n", TEST_PREFIX);
618
619 fprintf(stderr, "page_aligned_frags=%lu, non_page_aligned_frags=%lu\n",
620 page_aligned_frags, non_page_aligned_frags);
621
622 cleanup:
623
624 free(tmp_mem);
625 close(client_fd);
626 close(socket_fd);
627 ynl_sock_destroy(ys);
628
629 return 0;
630 }
631
run_devmem_tests(void)632 void run_devmem_tests(void)
633 {
634 struct netdev_queue_id *queues;
635 struct memory_buffer *mem;
636 struct ynl_sock *ys;
637 size_t i = 0;
638
639 mem = provider->alloc(getpagesize() * NUM_PAGES);
640
641 /* Configure RSS to divert all traffic from our devmem queues */
642 if (configure_rss())
643 error(1, 0, "rss error\n");
644
645 queues = calloc(num_queues, sizeof(*queues));
646
647 if (configure_headersplit(1))
648 error(1, 0, "Failed to configure header split\n");
649
650 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
651 error(1, 0, "Binding empty queues array should have failed\n");
652
653 for (i = 0; i < num_queues; i++) {
654 queues[i]._present.type = 1;
655 queues[i]._present.id = 1;
656 queues[i].type = NETDEV_QUEUE_TYPE_RX;
657 queues[i].id = start_queue + i;
658 }
659
660 if (configure_headersplit(0))
661 error(1, 0, "Failed to configure header split\n");
662
663 if (!bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
664 error(1, 0, "Configure dmabuf with header split off should have failed\n");
665
666 if (configure_headersplit(1))
667 error(1, 0, "Failed to configure header split\n");
668
669 for (i = 0; i < num_queues; i++) {
670 queues[i]._present.type = 1;
671 queues[i]._present.id = 1;
672 queues[i].type = NETDEV_QUEUE_TYPE_RX;
673 queues[i].id = start_queue + i;
674 }
675
676 if (bind_rx_queue(ifindex, mem->fd, queues, num_queues, &ys))
677 error(1, 0, "Failed to bind\n");
678
679 /* Deactivating a bound queue should not be legal */
680 if (!configure_channels(num_queues, num_queues - 1))
681 error(1, 0, "Deactivating a bound queue should be illegal.\n");
682
683 /* Closing the netlink socket does an implicit unbind */
684 ynl_sock_destroy(ys);
685
686 provider->free(mem);
687 }
688
main(int argc,char * argv[])689 int main(int argc, char *argv[])
690 {
691 struct memory_buffer *mem;
692 int is_server = 0, opt;
693 int ret;
694
695 while ((opt = getopt(argc, argv, "ls:c:p:v:q:t:f:")) != -1) {
696 switch (opt) {
697 case 'l':
698 is_server = 1;
699 break;
700 case 's':
701 server_ip = optarg;
702 break;
703 case 'c':
704 client_ip = optarg;
705 break;
706 case 'p':
707 port = optarg;
708 break;
709 case 'v':
710 do_validation = atoll(optarg);
711 break;
712 case 'q':
713 num_queues = atoi(optarg);
714 break;
715 case 't':
716 start_queue = atoi(optarg);
717 break;
718 case 'f':
719 ifname = optarg;
720 break;
721 case '?':
722 fprintf(stderr, "unknown option: %c\n", optopt);
723 break;
724 }
725 }
726
727 if (!ifname)
728 error(1, 0, "Missing -f argument\n");
729
730 ifindex = if_nametoindex(ifname);
731
732 if (!server_ip && !client_ip) {
733 if (start_queue < 0 && num_queues < 0) {
734 num_queues = rxq_num(ifindex);
735 if (num_queues < 0)
736 error(1, 0, "couldn't detect number of queues\n");
737 if (num_queues < 2)
738 error(1, 0,
739 "number of device queues is too low\n");
740 /* make sure can bind to multiple queues */
741 start_queue = num_queues / 2;
742 num_queues /= 2;
743 }
744
745 if (start_queue < 0 || num_queues < 0)
746 error(1, 0, "Both -t and -q are required\n");
747
748 run_devmem_tests();
749 return 0;
750 }
751
752 if (start_queue < 0 && num_queues < 0) {
753 num_queues = rxq_num(ifindex);
754 if (num_queues < 2)
755 error(1, 0, "number of device queues is too low\n");
756
757 num_queues = 1;
758 start_queue = rxq_num(ifindex) - num_queues;
759
760 if (start_queue < 0)
761 error(1, 0, "couldn't detect number of queues\n");
762
763 fprintf(stderr, "using queues %d..%d\n", start_queue, start_queue + num_queues);
764 }
765
766 for (; optind < argc; optind++)
767 fprintf(stderr, "extra arguments: %s\n", argv[optind]);
768
769 if (start_queue < 0)
770 error(1, 0, "Missing -t argument\n");
771
772 if (num_queues < 0)
773 error(1, 0, "Missing -q argument\n");
774
775 if (!server_ip)
776 error(1, 0, "Missing -s argument\n");
777
778 if (!port)
779 error(1, 0, "Missing -p argument\n");
780
781 mem = provider->alloc(getpagesize() * NUM_PAGES);
782 ret = is_server ? do_server(mem) : 1;
783 provider->free(mem);
784
785 return ret;
786 }
787