1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Various unit tests for the "ntsync" synchronization primitive driver.
4 *
5 * Copyright (C) 2021-2022 Elizabeth Figura <[email protected]>
6 */
7
8 #define _GNU_SOURCE
9 #include <sys/ioctl.h>
10 #include <sys/stat.h>
11 #include <fcntl.h>
12 #include <time.h>
13 #include <pthread.h>
14 #include <linux/ntsync.h>
15 #include "../../kselftest_harness.h"
16
read_sem_state(int sem,__u32 * count,__u32 * max)17 static int read_sem_state(int sem, __u32 *count, __u32 *max)
18 {
19 struct ntsync_sem_args args;
20 int ret;
21
22 memset(&args, 0xcc, sizeof(args));
23 ret = ioctl(sem, NTSYNC_IOC_SEM_READ, &args);
24 *count = args.count;
25 *max = args.max;
26 return ret;
27 }
28
29 #define check_sem_state(sem, count, max) \
30 ({ \
31 __u32 __count, __max; \
32 int ret = read_sem_state((sem), &__count, &__max); \
33 EXPECT_EQ(0, ret); \
34 EXPECT_EQ((count), __count); \
35 EXPECT_EQ((max), __max); \
36 })
37
release_sem(int sem,__u32 * count)38 static int release_sem(int sem, __u32 *count)
39 {
40 return ioctl(sem, NTSYNC_IOC_SEM_RELEASE, count);
41 }
42
read_mutex_state(int mutex,__u32 * count,__u32 * owner)43 static int read_mutex_state(int mutex, __u32 *count, __u32 *owner)
44 {
45 struct ntsync_mutex_args args;
46 int ret;
47
48 memset(&args, 0xcc, sizeof(args));
49 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &args);
50 *count = args.count;
51 *owner = args.owner;
52 return ret;
53 }
54
55 #define check_mutex_state(mutex, count, owner) \
56 ({ \
57 __u32 __count, __owner; \
58 int ret = read_mutex_state((mutex), &__count, &__owner); \
59 EXPECT_EQ(0, ret); \
60 EXPECT_EQ((count), __count); \
61 EXPECT_EQ((owner), __owner); \
62 })
63
unlock_mutex(int mutex,__u32 owner,__u32 * count)64 static int unlock_mutex(int mutex, __u32 owner, __u32 *count)
65 {
66 struct ntsync_mutex_args args;
67 int ret;
68
69 args.owner = owner;
70 args.count = 0xdeadbeef;
71 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_UNLOCK, &args);
72 *count = args.count;
73 return ret;
74 }
75
read_event_state(int event,__u32 * signaled,__u32 * manual)76 static int read_event_state(int event, __u32 *signaled, __u32 *manual)
77 {
78 struct ntsync_event_args args;
79 int ret;
80
81 memset(&args, 0xcc, sizeof(args));
82 ret = ioctl(event, NTSYNC_IOC_EVENT_READ, &args);
83 *signaled = args.signaled;
84 *manual = args.manual;
85 return ret;
86 }
87
88 #define check_event_state(event, signaled, manual) \
89 ({ \
90 __u32 __signaled, __manual; \
91 int ret = read_event_state((event), &__signaled, &__manual); \
92 EXPECT_EQ(0, ret); \
93 EXPECT_EQ((signaled), __signaled); \
94 EXPECT_EQ((manual), __manual); \
95 })
96
wait_objs(int fd,unsigned long request,__u32 count,const int * objs,__u32 owner,int alert,__u32 * index)97 static int wait_objs(int fd, unsigned long request, __u32 count,
98 const int *objs, __u32 owner, int alert, __u32 *index)
99 {
100 struct ntsync_wait_args args = {0};
101 struct timespec timeout;
102 int ret;
103
104 clock_gettime(CLOCK_MONOTONIC, &timeout);
105
106 args.timeout = timeout.tv_sec * 1000000000 + timeout.tv_nsec;
107 args.count = count;
108 args.objs = (uintptr_t)objs;
109 args.owner = owner;
110 args.index = 0xdeadbeef;
111 args.alert = alert;
112 ret = ioctl(fd, request, &args);
113 *index = args.index;
114 return ret;
115 }
116
wait_any(int fd,__u32 count,const int * objs,__u32 owner,__u32 * index)117 static int wait_any(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
118 {
119 return wait_objs(fd, NTSYNC_IOC_WAIT_ANY, count, objs, owner, 0, index);
120 }
121
wait_all(int fd,__u32 count,const int * objs,__u32 owner,__u32 * index)122 static int wait_all(int fd, __u32 count, const int *objs, __u32 owner, __u32 *index)
123 {
124 return wait_objs(fd, NTSYNC_IOC_WAIT_ALL, count, objs, owner, 0, index);
125 }
126
wait_any_alert(int fd,__u32 count,const int * objs,__u32 owner,int alert,__u32 * index)127 static int wait_any_alert(int fd, __u32 count, const int *objs,
128 __u32 owner, int alert, __u32 *index)
129 {
130 return wait_objs(fd, NTSYNC_IOC_WAIT_ANY,
131 count, objs, owner, alert, index);
132 }
133
wait_all_alert(int fd,__u32 count,const int * objs,__u32 owner,int alert,__u32 * index)134 static int wait_all_alert(int fd, __u32 count, const int *objs,
135 __u32 owner, int alert, __u32 *index)
136 {
137 return wait_objs(fd, NTSYNC_IOC_WAIT_ALL,
138 count, objs, owner, alert, index);
139 }
140
TEST(semaphore_state)141 TEST(semaphore_state)
142 {
143 struct ntsync_sem_args sem_args;
144 struct timespec timeout;
145 __u32 count, index;
146 int fd, ret, sem;
147
148 clock_gettime(CLOCK_MONOTONIC, &timeout);
149
150 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
151 ASSERT_LE(0, fd);
152
153 sem_args.count = 3;
154 sem_args.max = 2;
155 sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
156 EXPECT_EQ(-1, sem);
157 EXPECT_EQ(EINVAL, errno);
158
159 sem_args.count = 2;
160 sem_args.max = 2;
161 sem = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
162 EXPECT_LE(0, sem);
163 check_sem_state(sem, 2, 2);
164
165 count = 0;
166 ret = release_sem(sem, &count);
167 EXPECT_EQ(0, ret);
168 EXPECT_EQ(2, count);
169 check_sem_state(sem, 2, 2);
170
171 count = 1;
172 ret = release_sem(sem, &count);
173 EXPECT_EQ(-1, ret);
174 EXPECT_EQ(EOVERFLOW, errno);
175 check_sem_state(sem, 2, 2);
176
177 ret = wait_any(fd, 1, &sem, 123, &index);
178 EXPECT_EQ(0, ret);
179 EXPECT_EQ(0, index);
180 check_sem_state(sem, 1, 2);
181
182 ret = wait_any(fd, 1, &sem, 123, &index);
183 EXPECT_EQ(0, ret);
184 EXPECT_EQ(0, index);
185 check_sem_state(sem, 0, 2);
186
187 ret = wait_any(fd, 1, &sem, 123, &index);
188 EXPECT_EQ(-1, ret);
189 EXPECT_EQ(ETIMEDOUT, errno);
190
191 count = 3;
192 ret = release_sem(sem, &count);
193 EXPECT_EQ(-1, ret);
194 EXPECT_EQ(EOVERFLOW, errno);
195 check_sem_state(sem, 0, 2);
196
197 count = 2;
198 ret = release_sem(sem, &count);
199 EXPECT_EQ(0, ret);
200 EXPECT_EQ(0, count);
201 check_sem_state(sem, 2, 2);
202
203 ret = wait_any(fd, 1, &sem, 123, &index);
204 EXPECT_EQ(0, ret);
205 ret = wait_any(fd, 1, &sem, 123, &index);
206 EXPECT_EQ(0, ret);
207
208 count = 1;
209 ret = release_sem(sem, &count);
210 EXPECT_EQ(0, ret);
211 EXPECT_EQ(0, count);
212 check_sem_state(sem, 1, 2);
213
214 count = ~0u;
215 ret = release_sem(sem, &count);
216 EXPECT_EQ(-1, ret);
217 EXPECT_EQ(EOVERFLOW, errno);
218 check_sem_state(sem, 1, 2);
219
220 close(sem);
221
222 close(fd);
223 }
224
TEST(mutex_state)225 TEST(mutex_state)
226 {
227 struct ntsync_mutex_args mutex_args;
228 __u32 owner, count, index;
229 struct timespec timeout;
230 int fd, ret, mutex;
231
232 clock_gettime(CLOCK_MONOTONIC, &timeout);
233
234 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
235 ASSERT_LE(0, fd);
236
237 mutex_args.owner = 123;
238 mutex_args.count = 0;
239 mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
240 EXPECT_EQ(-1, mutex);
241 EXPECT_EQ(EINVAL, errno);
242
243 mutex_args.owner = 0;
244 mutex_args.count = 2;
245 mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
246 EXPECT_EQ(-1, mutex);
247 EXPECT_EQ(EINVAL, errno);
248
249 mutex_args.owner = 123;
250 mutex_args.count = 2;
251 mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
252 EXPECT_LE(0, mutex);
253 check_mutex_state(mutex, 2, 123);
254
255 ret = unlock_mutex(mutex, 0, &count);
256 EXPECT_EQ(-1, ret);
257 EXPECT_EQ(EINVAL, errno);
258
259 ret = unlock_mutex(mutex, 456, &count);
260 EXPECT_EQ(-1, ret);
261 EXPECT_EQ(EPERM, errno);
262 check_mutex_state(mutex, 2, 123);
263
264 ret = unlock_mutex(mutex, 123, &count);
265 EXPECT_EQ(0, ret);
266 EXPECT_EQ(2, count);
267 check_mutex_state(mutex, 1, 123);
268
269 ret = unlock_mutex(mutex, 123, &count);
270 EXPECT_EQ(0, ret);
271 EXPECT_EQ(1, count);
272 check_mutex_state(mutex, 0, 0);
273
274 ret = unlock_mutex(mutex, 123, &count);
275 EXPECT_EQ(-1, ret);
276 EXPECT_EQ(EPERM, errno);
277
278 ret = wait_any(fd, 1, &mutex, 456, &index);
279 EXPECT_EQ(0, ret);
280 EXPECT_EQ(0, index);
281 check_mutex_state(mutex, 1, 456);
282
283 ret = wait_any(fd, 1, &mutex, 456, &index);
284 EXPECT_EQ(0, ret);
285 EXPECT_EQ(0, index);
286 check_mutex_state(mutex, 2, 456);
287
288 ret = unlock_mutex(mutex, 456, &count);
289 EXPECT_EQ(0, ret);
290 EXPECT_EQ(2, count);
291 check_mutex_state(mutex, 1, 456);
292
293 ret = wait_any(fd, 1, &mutex, 123, &index);
294 EXPECT_EQ(-1, ret);
295 EXPECT_EQ(ETIMEDOUT, errno);
296
297 owner = 0;
298 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
299 EXPECT_EQ(-1, ret);
300 EXPECT_EQ(EINVAL, errno);
301
302 owner = 123;
303 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
304 EXPECT_EQ(-1, ret);
305 EXPECT_EQ(EPERM, errno);
306 check_mutex_state(mutex, 1, 456);
307
308 owner = 456;
309 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
310 EXPECT_EQ(0, ret);
311
312 memset(&mutex_args, 0xcc, sizeof(mutex_args));
313 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
314 EXPECT_EQ(-1, ret);
315 EXPECT_EQ(EOWNERDEAD, errno);
316 EXPECT_EQ(0, mutex_args.count);
317 EXPECT_EQ(0, mutex_args.owner);
318
319 memset(&mutex_args, 0xcc, sizeof(mutex_args));
320 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
321 EXPECT_EQ(-1, ret);
322 EXPECT_EQ(EOWNERDEAD, errno);
323 EXPECT_EQ(0, mutex_args.count);
324 EXPECT_EQ(0, mutex_args.owner);
325
326 ret = wait_any(fd, 1, &mutex, 123, &index);
327 EXPECT_EQ(-1, ret);
328 EXPECT_EQ(EOWNERDEAD, errno);
329 EXPECT_EQ(0, index);
330 check_mutex_state(mutex, 1, 123);
331
332 owner = 123;
333 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_KILL, &owner);
334 EXPECT_EQ(0, ret);
335
336 memset(&mutex_args, 0xcc, sizeof(mutex_args));
337 ret = ioctl(mutex, NTSYNC_IOC_MUTEX_READ, &mutex_args);
338 EXPECT_EQ(-1, ret);
339 EXPECT_EQ(EOWNERDEAD, errno);
340 EXPECT_EQ(0, mutex_args.count);
341 EXPECT_EQ(0, mutex_args.owner);
342
343 ret = wait_any(fd, 1, &mutex, 123, &index);
344 EXPECT_EQ(-1, ret);
345 EXPECT_EQ(EOWNERDEAD, errno);
346 EXPECT_EQ(0, index);
347 check_mutex_state(mutex, 1, 123);
348
349 close(mutex);
350
351 mutex_args.owner = 0;
352 mutex_args.count = 0;
353 mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
354 EXPECT_LE(0, mutex);
355 check_mutex_state(mutex, 0, 0);
356
357 ret = wait_any(fd, 1, &mutex, 123, &index);
358 EXPECT_EQ(0, ret);
359 EXPECT_EQ(0, index);
360 check_mutex_state(mutex, 1, 123);
361
362 close(mutex);
363
364 mutex_args.owner = 123;
365 mutex_args.count = ~0u;
366 mutex = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
367 EXPECT_LE(0, mutex);
368 check_mutex_state(mutex, ~0u, 123);
369
370 ret = wait_any(fd, 1, &mutex, 123, &index);
371 EXPECT_EQ(-1, ret);
372 EXPECT_EQ(ETIMEDOUT, errno);
373
374 close(mutex);
375
376 close(fd);
377 }
378
TEST(manual_event_state)379 TEST(manual_event_state)
380 {
381 struct ntsync_event_args event_args;
382 __u32 index, signaled;
383 int fd, event, ret;
384
385 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
386 ASSERT_LE(0, fd);
387
388 event_args.manual = 1;
389 event_args.signaled = 0;
390 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
391 EXPECT_LE(0, event);
392 check_event_state(event, 0, 1);
393
394 signaled = 0xdeadbeef;
395 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
396 EXPECT_EQ(0, ret);
397 EXPECT_EQ(0, signaled);
398 check_event_state(event, 1, 1);
399
400 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
401 EXPECT_EQ(0, ret);
402 EXPECT_EQ(1, signaled);
403 check_event_state(event, 1, 1);
404
405 ret = wait_any(fd, 1, &event, 123, &index);
406 EXPECT_EQ(0, ret);
407 EXPECT_EQ(0, index);
408 check_event_state(event, 1, 1);
409
410 signaled = 0xdeadbeef;
411 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
412 EXPECT_EQ(0, ret);
413 EXPECT_EQ(1, signaled);
414 check_event_state(event, 0, 1);
415
416 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
417 EXPECT_EQ(0, ret);
418 EXPECT_EQ(0, signaled);
419 check_event_state(event, 0, 1);
420
421 ret = wait_any(fd, 1, &event, 123, &index);
422 EXPECT_EQ(-1, ret);
423 EXPECT_EQ(ETIMEDOUT, errno);
424
425 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
426 EXPECT_EQ(0, ret);
427 EXPECT_EQ(0, signaled);
428
429 ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
430 EXPECT_EQ(0, ret);
431 EXPECT_EQ(1, signaled);
432 check_event_state(event, 0, 1);
433
434 ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
435 EXPECT_EQ(0, ret);
436 EXPECT_EQ(0, signaled);
437 check_event_state(event, 0, 1);
438
439 close(event);
440
441 close(fd);
442 }
443
TEST(auto_event_state)444 TEST(auto_event_state)
445 {
446 struct ntsync_event_args event_args;
447 __u32 index, signaled;
448 int fd, event, ret;
449
450 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
451 ASSERT_LE(0, fd);
452
453 event_args.manual = 0;
454 event_args.signaled = 1;
455 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
456 EXPECT_LE(0, event);
457
458 check_event_state(event, 1, 0);
459
460 signaled = 0xdeadbeef;
461 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
462 EXPECT_EQ(0, ret);
463 EXPECT_EQ(1, signaled);
464 check_event_state(event, 1, 0);
465
466 ret = wait_any(fd, 1, &event, 123, &index);
467 EXPECT_EQ(0, ret);
468 EXPECT_EQ(0, index);
469 check_event_state(event, 0, 0);
470
471 signaled = 0xdeadbeef;
472 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
473 EXPECT_EQ(0, ret);
474 EXPECT_EQ(0, signaled);
475 check_event_state(event, 0, 0);
476
477 ret = wait_any(fd, 1, &event, 123, &index);
478 EXPECT_EQ(-1, ret);
479 EXPECT_EQ(ETIMEDOUT, errno);
480
481 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
482 EXPECT_EQ(0, ret);
483 EXPECT_EQ(0, signaled);
484
485 ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
486 EXPECT_EQ(0, ret);
487 EXPECT_EQ(1, signaled);
488 check_event_state(event, 0, 0);
489
490 ret = ioctl(event, NTSYNC_IOC_EVENT_PULSE, &signaled);
491 EXPECT_EQ(0, ret);
492 EXPECT_EQ(0, signaled);
493 check_event_state(event, 0, 0);
494
495 close(event);
496
497 close(fd);
498 }
499
TEST(test_wait_any)500 TEST(test_wait_any)
501 {
502 int objs[NTSYNC_MAX_WAIT_COUNT + 1], fd, ret;
503 struct ntsync_mutex_args mutex_args = {0};
504 struct ntsync_sem_args sem_args = {0};
505 __u32 owner, index, count, i;
506 struct timespec timeout;
507
508 clock_gettime(CLOCK_MONOTONIC, &timeout);
509
510 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
511 ASSERT_LE(0, fd);
512
513 sem_args.count = 2;
514 sem_args.max = 3;
515 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
516 EXPECT_LE(0, objs[0]);
517
518 mutex_args.owner = 0;
519 mutex_args.count = 0;
520 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
521 EXPECT_LE(0, objs[1]);
522
523 ret = wait_any(fd, 2, objs, 123, &index);
524 EXPECT_EQ(0, ret);
525 EXPECT_EQ(0, index);
526 check_sem_state(objs[0], 1, 3);
527 check_mutex_state(objs[1], 0, 0);
528
529 ret = wait_any(fd, 2, objs, 123, &index);
530 EXPECT_EQ(0, ret);
531 EXPECT_EQ(0, index);
532 check_sem_state(objs[0], 0, 3);
533 check_mutex_state(objs[1], 0, 0);
534
535 ret = wait_any(fd, 2, objs, 123, &index);
536 EXPECT_EQ(0, ret);
537 EXPECT_EQ(1, index);
538 check_sem_state(objs[0], 0, 3);
539 check_mutex_state(objs[1], 1, 123);
540
541 count = 1;
542 ret = release_sem(objs[0], &count);
543 EXPECT_EQ(0, ret);
544 EXPECT_EQ(0, count);
545
546 ret = wait_any(fd, 2, objs, 123, &index);
547 EXPECT_EQ(0, ret);
548 EXPECT_EQ(0, index);
549 check_sem_state(objs[0], 0, 3);
550 check_mutex_state(objs[1], 1, 123);
551
552 ret = wait_any(fd, 2, objs, 123, &index);
553 EXPECT_EQ(0, ret);
554 EXPECT_EQ(1, index);
555 check_sem_state(objs[0], 0, 3);
556 check_mutex_state(objs[1], 2, 123);
557
558 ret = wait_any(fd, 2, objs, 456, &index);
559 EXPECT_EQ(-1, ret);
560 EXPECT_EQ(ETIMEDOUT, errno);
561
562 owner = 123;
563 ret = ioctl(objs[1], NTSYNC_IOC_MUTEX_KILL, &owner);
564 EXPECT_EQ(0, ret);
565
566 ret = wait_any(fd, 2, objs, 456, &index);
567 EXPECT_EQ(-1, ret);
568 EXPECT_EQ(EOWNERDEAD, errno);
569 EXPECT_EQ(1, index);
570
571 ret = wait_any(fd, 2, objs, 456, &index);
572 EXPECT_EQ(0, ret);
573 EXPECT_EQ(1, index);
574
575 close(objs[1]);
576
577 /* test waiting on the same object twice */
578
579 count = 2;
580 ret = release_sem(objs[0], &count);
581 EXPECT_EQ(0, ret);
582 EXPECT_EQ(0, count);
583
584 objs[1] = objs[0];
585 ret = wait_any(fd, 2, objs, 456, &index);
586 EXPECT_EQ(0, ret);
587 EXPECT_EQ(0, index);
588 check_sem_state(objs[0], 1, 3);
589
590 ret = wait_any(fd, 0, NULL, 456, &index);
591 EXPECT_EQ(-1, ret);
592 EXPECT_EQ(ETIMEDOUT, errno);
593
594 for (i = 1; i < NTSYNC_MAX_WAIT_COUNT + 1; ++i)
595 objs[i] = objs[0];
596
597 ret = wait_any(fd, NTSYNC_MAX_WAIT_COUNT, objs, 123, &index);
598 EXPECT_EQ(0, ret);
599 EXPECT_EQ(0, index);
600
601 ret = wait_any(fd, NTSYNC_MAX_WAIT_COUNT + 1, objs, 123, &index);
602 EXPECT_EQ(-1, ret);
603 EXPECT_EQ(EINVAL, errno);
604
605 ret = wait_any(fd, -1, objs, 123, &index);
606 EXPECT_EQ(-1, ret);
607 EXPECT_EQ(EINVAL, errno);
608
609 close(objs[0]);
610
611 close(fd);
612 }
613
TEST(test_wait_all)614 TEST(test_wait_all)
615 {
616 struct ntsync_event_args event_args = {0};
617 struct ntsync_mutex_args mutex_args = {0};
618 struct ntsync_sem_args sem_args = {0};
619 __u32 owner, index, count;
620 int objs[2], fd, ret;
621
622 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
623 ASSERT_LE(0, fd);
624
625 sem_args.count = 2;
626 sem_args.max = 3;
627 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
628 EXPECT_LE(0, objs[0]);
629
630 mutex_args.owner = 0;
631 mutex_args.count = 0;
632 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
633 EXPECT_LE(0, objs[1]);
634
635 ret = wait_all(fd, 2, objs, 123, &index);
636 EXPECT_EQ(0, ret);
637 EXPECT_EQ(0, index);
638 check_sem_state(objs[0], 1, 3);
639 check_mutex_state(objs[1], 1, 123);
640
641 ret = wait_all(fd, 2, objs, 456, &index);
642 EXPECT_EQ(-1, ret);
643 EXPECT_EQ(ETIMEDOUT, errno);
644 check_sem_state(objs[0], 1, 3);
645 check_mutex_state(objs[1], 1, 123);
646
647 ret = wait_all(fd, 2, objs, 123, &index);
648 EXPECT_EQ(0, ret);
649 EXPECT_EQ(0, index);
650 check_sem_state(objs[0], 0, 3);
651 check_mutex_state(objs[1], 2, 123);
652
653 ret = wait_all(fd, 2, objs, 123, &index);
654 EXPECT_EQ(-1, ret);
655 EXPECT_EQ(ETIMEDOUT, errno);
656 check_sem_state(objs[0], 0, 3);
657 check_mutex_state(objs[1], 2, 123);
658
659 count = 3;
660 ret = release_sem(objs[0], &count);
661 EXPECT_EQ(0, ret);
662 EXPECT_EQ(0, count);
663
664 ret = wait_all(fd, 2, objs, 123, &index);
665 EXPECT_EQ(0, ret);
666 EXPECT_EQ(0, index);
667 check_sem_state(objs[0], 2, 3);
668 check_mutex_state(objs[1], 3, 123);
669
670 owner = 123;
671 ret = ioctl(objs[1], NTSYNC_IOC_MUTEX_KILL, &owner);
672 EXPECT_EQ(0, ret);
673
674 ret = wait_all(fd, 2, objs, 123, &index);
675 EXPECT_EQ(-1, ret);
676 EXPECT_EQ(EOWNERDEAD, errno);
677 check_sem_state(objs[0], 1, 3);
678 check_mutex_state(objs[1], 1, 123);
679
680 close(objs[1]);
681
682 event_args.manual = true;
683 event_args.signaled = true;
684 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
685 EXPECT_LE(0, objs[1]);
686
687 ret = wait_all(fd, 2, objs, 123, &index);
688 EXPECT_EQ(0, ret);
689 EXPECT_EQ(0, index);
690 check_sem_state(objs[0], 0, 3);
691 check_event_state(objs[1], 1, 1);
692
693 close(objs[1]);
694
695 /* test waiting on the same object twice */
696 objs[1] = objs[0];
697 ret = wait_all(fd, 2, objs, 123, &index);
698 EXPECT_EQ(-1, ret);
699 EXPECT_EQ(EINVAL, errno);
700
701 close(objs[0]);
702
703 close(fd);
704 }
705
706 struct wake_args {
707 int fd;
708 int obj;
709 };
710
711 struct wait_args {
712 int fd;
713 unsigned long request;
714 struct ntsync_wait_args *args;
715 int ret;
716 int err;
717 };
718
wait_thread(void * arg)719 static void *wait_thread(void *arg)
720 {
721 struct wait_args *args = arg;
722
723 args->ret = ioctl(args->fd, args->request, args->args);
724 args->err = errno;
725 return NULL;
726 }
727
get_abs_timeout(unsigned int ms)728 static __u64 get_abs_timeout(unsigned int ms)
729 {
730 struct timespec timeout;
731 clock_gettime(CLOCK_MONOTONIC, &timeout);
732 return (timeout.tv_sec * 1000000000) + timeout.tv_nsec + (ms * 1000000);
733 }
734
wait_for_thread(pthread_t thread,unsigned int ms)735 static int wait_for_thread(pthread_t thread, unsigned int ms)
736 {
737 struct timespec timeout;
738
739 clock_gettime(CLOCK_REALTIME, &timeout);
740 timeout.tv_nsec += ms * 1000000;
741 timeout.tv_sec += (timeout.tv_nsec / 1000000000);
742 timeout.tv_nsec %= 1000000000;
743 return pthread_timedjoin_np(thread, NULL, &timeout);
744 }
745
TEST(wake_any)746 TEST(wake_any)
747 {
748 struct ntsync_event_args event_args = {0};
749 struct ntsync_mutex_args mutex_args = {0};
750 struct ntsync_wait_args wait_args = {0};
751 struct ntsync_sem_args sem_args = {0};
752 struct wait_args thread_args;
753 __u32 count, index, signaled;
754 int objs[2], fd, ret;
755 pthread_t thread;
756
757 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
758 ASSERT_LE(0, fd);
759
760 sem_args.count = 0;
761 sem_args.max = 3;
762 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
763 EXPECT_LE(0, objs[0]);
764
765 mutex_args.owner = 123;
766 mutex_args.count = 1;
767 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
768 EXPECT_LE(0, objs[1]);
769
770 /* test waking the semaphore */
771
772 wait_args.timeout = get_abs_timeout(1000);
773 wait_args.objs = (uintptr_t)objs;
774 wait_args.count = 2;
775 wait_args.owner = 456;
776 wait_args.index = 0xdeadbeef;
777 thread_args.fd = fd;
778 thread_args.args = &wait_args;
779 thread_args.request = NTSYNC_IOC_WAIT_ANY;
780 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
781 EXPECT_EQ(0, ret);
782
783 ret = wait_for_thread(thread, 100);
784 EXPECT_EQ(ETIMEDOUT, ret);
785
786 count = 1;
787 ret = release_sem(objs[0], &count);
788 EXPECT_EQ(0, ret);
789 EXPECT_EQ(0, count);
790 check_sem_state(objs[0], 0, 3);
791
792 ret = wait_for_thread(thread, 100);
793 EXPECT_EQ(0, ret);
794 EXPECT_EQ(0, thread_args.ret);
795 EXPECT_EQ(0, wait_args.index);
796
797 /* test waking the mutex */
798
799 /* first grab it again for owner 123 */
800 ret = wait_any(fd, 1, &objs[1], 123, &index);
801 EXPECT_EQ(0, ret);
802 EXPECT_EQ(0, index);
803
804 wait_args.timeout = get_abs_timeout(1000);
805 wait_args.owner = 456;
806 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
807 EXPECT_EQ(0, ret);
808
809 ret = wait_for_thread(thread, 100);
810 EXPECT_EQ(ETIMEDOUT, ret);
811
812 ret = unlock_mutex(objs[1], 123, &count);
813 EXPECT_EQ(0, ret);
814 EXPECT_EQ(2, count);
815
816 ret = pthread_tryjoin_np(thread, NULL);
817 EXPECT_EQ(EBUSY, ret);
818
819 ret = unlock_mutex(objs[1], 123, &count);
820 EXPECT_EQ(0, ret);
821 EXPECT_EQ(1, mutex_args.count);
822 check_mutex_state(objs[1], 1, 456);
823
824 ret = wait_for_thread(thread, 100);
825 EXPECT_EQ(0, ret);
826 EXPECT_EQ(0, thread_args.ret);
827 EXPECT_EQ(1, wait_args.index);
828
829 close(objs[1]);
830
831 /* test waking events */
832
833 event_args.manual = false;
834 event_args.signaled = false;
835 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
836 EXPECT_LE(0, objs[1]);
837
838 wait_args.timeout = get_abs_timeout(1000);
839 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
840 EXPECT_EQ(0, ret);
841
842 ret = wait_for_thread(thread, 100);
843 EXPECT_EQ(ETIMEDOUT, ret);
844
845 ret = ioctl(objs[1], NTSYNC_IOC_EVENT_SET, &signaled);
846 EXPECT_EQ(0, ret);
847 EXPECT_EQ(0, signaled);
848 check_event_state(objs[1], 0, 0);
849
850 ret = wait_for_thread(thread, 100);
851 EXPECT_EQ(0, ret);
852 EXPECT_EQ(0, thread_args.ret);
853 EXPECT_EQ(1, wait_args.index);
854
855 wait_args.timeout = get_abs_timeout(1000);
856 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
857 EXPECT_EQ(0, ret);
858
859 ret = wait_for_thread(thread, 100);
860 EXPECT_EQ(ETIMEDOUT, ret);
861
862 ret = ioctl(objs[1], NTSYNC_IOC_EVENT_PULSE, &signaled);
863 EXPECT_EQ(0, ret);
864 EXPECT_EQ(0, signaled);
865 check_event_state(objs[1], 0, 0);
866
867 ret = wait_for_thread(thread, 100);
868 EXPECT_EQ(0, ret);
869 EXPECT_EQ(0, thread_args.ret);
870 EXPECT_EQ(1, wait_args.index);
871
872 close(objs[1]);
873
874 event_args.manual = true;
875 event_args.signaled = false;
876 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
877 EXPECT_LE(0, objs[1]);
878
879 wait_args.timeout = get_abs_timeout(1000);
880 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
881 EXPECT_EQ(0, ret);
882
883 ret = wait_for_thread(thread, 100);
884 EXPECT_EQ(ETIMEDOUT, ret);
885
886 ret = ioctl(objs[1], NTSYNC_IOC_EVENT_SET, &signaled);
887 EXPECT_EQ(0, ret);
888 EXPECT_EQ(0, signaled);
889 check_event_state(objs[1], 1, 1);
890
891 ret = wait_for_thread(thread, 100);
892 EXPECT_EQ(0, ret);
893 EXPECT_EQ(0, thread_args.ret);
894 EXPECT_EQ(1, wait_args.index);
895
896 ret = ioctl(objs[1], NTSYNC_IOC_EVENT_RESET, &signaled);
897 EXPECT_EQ(0, ret);
898 EXPECT_EQ(1, signaled);
899
900 wait_args.timeout = get_abs_timeout(1000);
901 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
902 EXPECT_EQ(0, ret);
903
904 ret = wait_for_thread(thread, 100);
905 EXPECT_EQ(ETIMEDOUT, ret);
906
907 ret = ioctl(objs[1], NTSYNC_IOC_EVENT_PULSE, &signaled);
908 EXPECT_EQ(0, ret);
909 EXPECT_EQ(0, signaled);
910 check_event_state(objs[1], 0, 1);
911
912 ret = wait_for_thread(thread, 100);
913 EXPECT_EQ(0, ret);
914 EXPECT_EQ(0, thread_args.ret);
915 EXPECT_EQ(1, wait_args.index);
916
917 /* delete an object while it's being waited on */
918
919 wait_args.timeout = get_abs_timeout(200);
920 wait_args.owner = 123;
921 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
922 EXPECT_EQ(0, ret);
923
924 ret = wait_for_thread(thread, 100);
925 EXPECT_EQ(ETIMEDOUT, ret);
926
927 close(objs[0]);
928 close(objs[1]);
929
930 ret = wait_for_thread(thread, 200);
931 EXPECT_EQ(0, ret);
932 EXPECT_EQ(-1, thread_args.ret);
933 EXPECT_EQ(ETIMEDOUT, thread_args.err);
934
935 close(fd);
936 }
937
TEST(wake_all)938 TEST(wake_all)
939 {
940 struct ntsync_event_args manual_event_args = {0};
941 struct ntsync_event_args auto_event_args = {0};
942 struct ntsync_mutex_args mutex_args = {0};
943 struct ntsync_wait_args wait_args = {0};
944 struct ntsync_sem_args sem_args = {0};
945 struct wait_args thread_args;
946 __u32 count, index, signaled;
947 int objs[4], fd, ret;
948 pthread_t thread;
949
950 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
951 ASSERT_LE(0, fd);
952
953 sem_args.count = 0;
954 sem_args.max = 3;
955 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
956 EXPECT_LE(0, objs[0]);
957
958 mutex_args.owner = 123;
959 mutex_args.count = 1;
960 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
961 EXPECT_LE(0, objs[1]);
962
963 manual_event_args.manual = true;
964 manual_event_args.signaled = true;
965 objs[2] = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &manual_event_args);
966 EXPECT_LE(0, objs[2]);
967
968 auto_event_args.manual = false;
969 auto_event_args.signaled = true;
970 objs[3] = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &auto_event_args);
971 EXPECT_EQ(0, objs[3]);
972
973 wait_args.timeout = get_abs_timeout(1000);
974 wait_args.objs = (uintptr_t)objs;
975 wait_args.count = 4;
976 wait_args.owner = 456;
977 thread_args.fd = fd;
978 thread_args.args = &wait_args;
979 thread_args.request = NTSYNC_IOC_WAIT_ALL;
980 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
981 EXPECT_EQ(0, ret);
982
983 ret = wait_for_thread(thread, 100);
984 EXPECT_EQ(ETIMEDOUT, ret);
985
986 count = 1;
987 ret = release_sem(objs[0], &count);
988 EXPECT_EQ(0, ret);
989 EXPECT_EQ(0, count);
990
991 ret = pthread_tryjoin_np(thread, NULL);
992 EXPECT_EQ(EBUSY, ret);
993
994 check_sem_state(objs[0], 1, 3);
995
996 ret = wait_any(fd, 1, &objs[0], 123, &index);
997 EXPECT_EQ(0, ret);
998 EXPECT_EQ(0, index);
999
1000 ret = unlock_mutex(objs[1], 123, &count);
1001 EXPECT_EQ(0, ret);
1002 EXPECT_EQ(1, count);
1003
1004 ret = pthread_tryjoin_np(thread, NULL);
1005 EXPECT_EQ(EBUSY, ret);
1006
1007 check_mutex_state(objs[1], 0, 0);
1008
1009 ret = ioctl(objs[2], NTSYNC_IOC_EVENT_RESET, &signaled);
1010 EXPECT_EQ(0, ret);
1011 EXPECT_EQ(1, signaled);
1012
1013 count = 2;
1014 ret = release_sem(objs[0], &count);
1015 EXPECT_EQ(0, ret);
1016 EXPECT_EQ(0, count);
1017 check_sem_state(objs[0], 2, 3);
1018
1019 ret = ioctl(objs[3], NTSYNC_IOC_EVENT_RESET, &signaled);
1020 EXPECT_EQ(0, ret);
1021 EXPECT_EQ(1, signaled);
1022
1023 ret = ioctl(objs[2], NTSYNC_IOC_EVENT_SET, &signaled);
1024 EXPECT_EQ(0, ret);
1025 EXPECT_EQ(0, signaled);
1026
1027 ret = ioctl(objs[3], NTSYNC_IOC_EVENT_SET, &signaled);
1028 EXPECT_EQ(0, ret);
1029 EXPECT_EQ(0, signaled);
1030
1031 check_sem_state(objs[0], 1, 3);
1032 check_mutex_state(objs[1], 1, 456);
1033 check_event_state(objs[2], 1, 1);
1034 check_event_state(objs[3], 0, 0);
1035
1036 ret = wait_for_thread(thread, 100);
1037 EXPECT_EQ(0, ret);
1038 EXPECT_EQ(0, thread_args.ret);
1039
1040 /* delete an object while it's being waited on */
1041
1042 wait_args.timeout = get_abs_timeout(200);
1043 wait_args.owner = 123;
1044 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
1045 EXPECT_EQ(0, ret);
1046
1047 ret = wait_for_thread(thread, 100);
1048 EXPECT_EQ(ETIMEDOUT, ret);
1049
1050 close(objs[0]);
1051 close(objs[1]);
1052 close(objs[2]);
1053 close(objs[3]);
1054
1055 ret = wait_for_thread(thread, 200);
1056 EXPECT_EQ(0, ret);
1057 EXPECT_EQ(-1, thread_args.ret);
1058 EXPECT_EQ(ETIMEDOUT, thread_args.err);
1059
1060 close(fd);
1061 }
1062
TEST(alert_any)1063 TEST(alert_any)
1064 {
1065 struct ntsync_event_args event_args = {0};
1066 struct ntsync_wait_args wait_args = {0};
1067 struct ntsync_sem_args sem_args = {0};
1068 __u32 index, count, signaled;
1069 struct wait_args thread_args;
1070 int objs[2], event, fd, ret;
1071 pthread_t thread;
1072
1073 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
1074 ASSERT_LE(0, fd);
1075
1076 sem_args.count = 0;
1077 sem_args.max = 2;
1078 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
1079 EXPECT_LE(0, objs[0]);
1080
1081 sem_args.count = 1;
1082 sem_args.max = 2;
1083 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
1084 EXPECT_LE(0, objs[1]);
1085
1086 event_args.manual = true;
1087 event_args.signaled = true;
1088 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
1089 EXPECT_LE(0, event);
1090
1091 ret = wait_any_alert(fd, 0, NULL, 123, event, &index);
1092 EXPECT_EQ(0, ret);
1093 EXPECT_EQ(0, index);
1094
1095 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
1096 EXPECT_EQ(0, ret);
1097
1098 ret = wait_any_alert(fd, 0, NULL, 123, event, &index);
1099 EXPECT_EQ(-1, ret);
1100 EXPECT_EQ(ETIMEDOUT, errno);
1101
1102 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
1103 EXPECT_EQ(0, ret);
1104
1105 ret = wait_any_alert(fd, 2, objs, 123, event, &index);
1106 EXPECT_EQ(0, ret);
1107 EXPECT_EQ(1, index);
1108
1109 ret = wait_any_alert(fd, 2, objs, 123, event, &index);
1110 EXPECT_EQ(0, ret);
1111 EXPECT_EQ(2, index);
1112
1113 /* test wakeup via alert */
1114
1115 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
1116 EXPECT_EQ(0, ret);
1117
1118 wait_args.timeout = get_abs_timeout(1000);
1119 wait_args.objs = (uintptr_t)objs;
1120 wait_args.count = 2;
1121 wait_args.owner = 123;
1122 wait_args.index = 0xdeadbeef;
1123 wait_args.alert = event;
1124 thread_args.fd = fd;
1125 thread_args.args = &wait_args;
1126 thread_args.request = NTSYNC_IOC_WAIT_ANY;
1127 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
1128 EXPECT_EQ(0, ret);
1129
1130 ret = wait_for_thread(thread, 100);
1131 EXPECT_EQ(ETIMEDOUT, ret);
1132
1133 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
1134 EXPECT_EQ(0, ret);
1135
1136 ret = wait_for_thread(thread, 100);
1137 EXPECT_EQ(0, ret);
1138 EXPECT_EQ(0, thread_args.ret);
1139 EXPECT_EQ(2, wait_args.index);
1140
1141 close(event);
1142
1143 /* test with an auto-reset event */
1144
1145 event_args.manual = false;
1146 event_args.signaled = true;
1147 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
1148 EXPECT_LE(0, event);
1149
1150 count = 1;
1151 ret = release_sem(objs[0], &count);
1152 EXPECT_EQ(0, ret);
1153
1154 ret = wait_any_alert(fd, 2, objs, 123, event, &index);
1155 EXPECT_EQ(0, ret);
1156 EXPECT_EQ(0, index);
1157
1158 ret = wait_any_alert(fd, 2, objs, 123, event, &index);
1159 EXPECT_EQ(0, ret);
1160 EXPECT_EQ(2, index);
1161
1162 ret = wait_any_alert(fd, 2, objs, 123, event, &index);
1163 EXPECT_EQ(-1, ret);
1164 EXPECT_EQ(ETIMEDOUT, errno);
1165
1166 close(event);
1167
1168 close(objs[0]);
1169 close(objs[1]);
1170
1171 close(fd);
1172 }
1173
TEST(alert_all)1174 TEST(alert_all)
1175 {
1176 struct ntsync_event_args event_args = {0};
1177 struct ntsync_wait_args wait_args = {0};
1178 struct ntsync_sem_args sem_args = {0};
1179 struct wait_args thread_args;
1180 __u32 index, count, signaled;
1181 int objs[2], event, fd, ret;
1182 pthread_t thread;
1183
1184 fd = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
1185 ASSERT_LE(0, fd);
1186
1187 sem_args.count = 2;
1188 sem_args.max = 2;
1189 objs[0] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
1190 EXPECT_LE(0, objs[0]);
1191
1192 sem_args.count = 1;
1193 sem_args.max = 2;
1194 objs[1] = ioctl(fd, NTSYNC_IOC_CREATE_SEM, &sem_args);
1195 EXPECT_LE(0, objs[1]);
1196
1197 event_args.manual = true;
1198 event_args.signaled = true;
1199 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
1200 EXPECT_LE(0, event);
1201
1202 ret = wait_all_alert(fd, 2, objs, 123, event, &index);
1203 EXPECT_EQ(0, ret);
1204 EXPECT_EQ(0, index);
1205
1206 ret = wait_all_alert(fd, 2, objs, 123, event, &index);
1207 EXPECT_EQ(0, ret);
1208 EXPECT_EQ(2, index);
1209
1210 /* test wakeup via alert */
1211
1212 ret = ioctl(event, NTSYNC_IOC_EVENT_RESET, &signaled);
1213 EXPECT_EQ(0, ret);
1214
1215 wait_args.timeout = get_abs_timeout(1000);
1216 wait_args.objs = (uintptr_t)objs;
1217 wait_args.count = 2;
1218 wait_args.owner = 123;
1219 wait_args.index = 0xdeadbeef;
1220 wait_args.alert = event;
1221 thread_args.fd = fd;
1222 thread_args.args = &wait_args;
1223 thread_args.request = NTSYNC_IOC_WAIT_ALL;
1224 ret = pthread_create(&thread, NULL, wait_thread, &thread_args);
1225 EXPECT_EQ(0, ret);
1226
1227 ret = wait_for_thread(thread, 100);
1228 EXPECT_EQ(ETIMEDOUT, ret);
1229
1230 ret = ioctl(event, NTSYNC_IOC_EVENT_SET, &signaled);
1231 EXPECT_EQ(0, ret);
1232
1233 ret = wait_for_thread(thread, 100);
1234 EXPECT_EQ(0, ret);
1235 EXPECT_EQ(0, thread_args.ret);
1236 EXPECT_EQ(2, wait_args.index);
1237
1238 close(event);
1239
1240 /* test with an auto-reset event */
1241
1242 event_args.manual = false;
1243 event_args.signaled = true;
1244 event = ioctl(fd, NTSYNC_IOC_CREATE_EVENT, &event_args);
1245 EXPECT_LE(0, event);
1246
1247 count = 2;
1248 ret = release_sem(objs[1], &count);
1249 EXPECT_EQ(0, ret);
1250
1251 ret = wait_all_alert(fd, 2, objs, 123, event, &index);
1252 EXPECT_EQ(0, ret);
1253 EXPECT_EQ(0, index);
1254
1255 ret = wait_all_alert(fd, 2, objs, 123, event, &index);
1256 EXPECT_EQ(0, ret);
1257 EXPECT_EQ(2, index);
1258
1259 ret = wait_all_alert(fd, 2, objs, 123, event, &index);
1260 EXPECT_EQ(-1, ret);
1261 EXPECT_EQ(ETIMEDOUT, errno);
1262
1263 close(event);
1264
1265 close(objs[0]);
1266 close(objs[1]);
1267
1268 close(fd);
1269 }
1270
1271 #define STRESS_LOOPS 10000
1272 #define STRESS_THREADS 4
1273
1274 static unsigned int stress_counter;
1275 static int stress_device, stress_start_event, stress_mutex;
1276
stress_thread(void * arg)1277 static void *stress_thread(void *arg)
1278 {
1279 struct ntsync_wait_args wait_args = {0};
1280 __u32 index, count, i;
1281 int ret;
1282
1283 wait_args.timeout = UINT64_MAX;
1284 wait_args.count = 1;
1285 wait_args.objs = (uintptr_t)&stress_start_event;
1286 wait_args.owner = gettid();
1287 wait_args.index = 0xdeadbeef;
1288
1289 ioctl(stress_device, NTSYNC_IOC_WAIT_ANY, &wait_args);
1290
1291 wait_args.objs = (uintptr_t)&stress_mutex;
1292
1293 for (i = 0; i < STRESS_LOOPS; ++i) {
1294 ioctl(stress_device, NTSYNC_IOC_WAIT_ANY, &wait_args);
1295
1296 ++stress_counter;
1297
1298 unlock_mutex(stress_mutex, wait_args.owner, &count);
1299 }
1300
1301 return NULL;
1302 }
1303
TEST(stress_wait)1304 TEST(stress_wait)
1305 {
1306 struct ntsync_event_args event_args;
1307 struct ntsync_mutex_args mutex_args;
1308 pthread_t threads[STRESS_THREADS];
1309 __u32 signaled, i;
1310 int ret;
1311
1312 stress_device = open("/dev/ntsync", O_CLOEXEC | O_RDONLY);
1313 ASSERT_LE(0, stress_device);
1314
1315 mutex_args.owner = 0;
1316 mutex_args.count = 0;
1317 stress_mutex = ioctl(stress_device, NTSYNC_IOC_CREATE_MUTEX, &mutex_args);
1318 EXPECT_LE(0, stress_mutex);
1319
1320 event_args.manual = 1;
1321 event_args.signaled = 0;
1322 stress_start_event = ioctl(stress_device, NTSYNC_IOC_CREATE_EVENT, &event_args);
1323 EXPECT_LE(0, stress_start_event);
1324
1325 for (i = 0; i < STRESS_THREADS; ++i)
1326 pthread_create(&threads[i], NULL, stress_thread, NULL);
1327
1328 ret = ioctl(stress_start_event, NTSYNC_IOC_EVENT_SET, &signaled);
1329 EXPECT_EQ(0, ret);
1330
1331 for (i = 0; i < STRESS_THREADS; ++i) {
1332 ret = pthread_join(threads[i], NULL);
1333 EXPECT_EQ(0, ret);
1334 }
1335
1336 EXPECT_EQ(STRESS_LOOPS * STRESS_THREADS, stress_counter);
1337
1338 close(stress_start_event);
1339 close(stress_mutex);
1340 close(stress_device);
1341 }
1342
1343 TEST_HARNESS_MAIN
1344