/linux-6.14.4/tools/perf/util/ |
D | thread_map.c | 36 struct perf_thread_map *threads; in thread_map__new_by_pid() local 47 threads = thread_map__alloc(items); in thread_map__new_by_pid() 48 if (threads != NULL) { in thread_map__new_by_pid() 50 perf_thread_map__set_pid(threads, i, atoi(namelist[i]->d_name)); in thread_map__new_by_pid() 51 threads->nr = items; in thread_map__new_by_pid() 52 refcount_set(&threads->refcnt, 1); in thread_map__new_by_pid() 59 return threads; in thread_map__new_by_pid() 64 struct perf_thread_map *threads = thread_map__alloc(1); in thread_map__new_by_tid() local 66 if (threads != NULL) { in thread_map__new_by_tid() 67 perf_thread_map__set_pid(threads, 0, tid); in thread_map__new_by_tid() [all …]
|
D | threads.h | 20 struct threads { struct 24 void threads__init(struct threads *threads); 25 void threads__exit(struct threads *threads); 26 size_t threads__nr(struct threads *threads); 27 struct thread *threads__find(struct threads *threads, pid_t tid); 28 struct thread *threads__findnew(struct threads *threads, pid_t pid, pid_t tid, bool *created); 29 void threads__remove_all_threads(struct threads *threads); 30 void threads__remove(struct threads *threads, struct thread *thread); 31 int threads__for_each_thread(struct threads *threads,
|
D | threads.c | 2 #include "threads.h" 6 static struct threads_table_entry *threads__table(struct threads *threads, pid_t tid) in threads__table() argument 9 return &threads->table[(unsigned int)tid % THREADS__TABLE_SIZE]; in threads__table() 23 void threads__init(struct threads *threads) in threads__init() argument 26 struct threads_table_entry *table = &threads->table[i]; in threads__init() 34 void threads__exit(struct threads *threads) in threads__exit() argument 36 threads__remove_all_threads(threads); in threads__exit() 38 struct threads_table_entry *table = &threads->table[i]; in threads__exit() 45 size_t threads__nr(struct threads *threads) in threads__nr() argument 50 struct threads_table_entry *table = &threads->table[i]; in threads__nr() [all …]
|
/linux-6.14.4/tools/perf/tests/ |
D | thread-map.c | 69 struct perf_thread_map *threads; in process_event() local 75 threads = thread_map__new_event(&event->thread_map); in process_event() 76 TEST_ASSERT_VAL("failed to alloc map", threads); in process_event() 78 TEST_ASSERT_VAL("wrong nr", threads->nr == 1); in process_event() 80 perf_thread_map__pid(threads, 0) == getpid()); in process_event() 82 perf_thread_map__comm(threads, 0) && in process_event() 83 !strcmp(perf_thread_map__comm(threads, 0), NAME)); in process_event() 85 refcount_read(&threads->refcnt) == 1); in process_event() 86 perf_thread_map__put(threads); in process_event() 92 struct perf_thread_map *threads; in test__thread_map_synthesize() local [all …]
|
D | mmap-thread-lookup.c | 21 #define THREADS 4 macro 32 static struct thread_data threads[THREADS]; variable 81 struct thread_data *td = &threads[i]; in thread_create() 101 struct thread_data *td0 = &threads[0]; in threads_create() 110 for (i = 1; !err && i < THREADS; i++) in threads_create() 118 struct thread_data *td0 = &threads[0]; in threads_destroy() 126 for (i = 1; !err && i < THREADS; i++) in threads_destroy() 127 err = pthread_join(threads[i].pt, NULL); in threads_destroy() 162 * The threads_create will not return before all threads in mmap_events() 168 TEST_ASSERT_VAL("failed to create threads", !threads_create()); in mmap_events() [all …]
|
/linux-6.14.4/tools/lib/perf/tests/ |
D | test-evsel.c | 56 struct perf_thread_map *threads; in test_stat_thread() local 64 threads = perf_thread_map__new_dummy(); in test_stat_thread() 65 __T("failed to create threads", threads); in test_stat_thread() 67 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread() 72 err = perf_evsel__open(evsel, NULL, threads); in test_stat_thread() 81 perf_thread_map__put(threads); in test_stat_thread() 88 struct perf_thread_map *threads; in test_stat_thread_enable() local 97 threads = perf_thread_map__new_dummy(); in test_stat_thread_enable() 98 __T("failed to create threads", threads); in test_stat_thread_enable() 100 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread_enable() [all …]
|
D | test-threadmap.c | 16 struct perf_thread_map *threads; in test_threadmap_array() local 19 threads = perf_thread_map__new_array(nr, array); in test_threadmap_array() 20 __T("Failed to allocate new thread map", threads); in test_threadmap_array() 22 __T("Unexpected number of threads", perf_thread_map__nr(threads) == nr); in test_threadmap_array() 26 perf_thread_map__pid(threads, i) == (array ? array[i] : -1)); in test_threadmap_array() 30 perf_thread_map__set_pid(threads, i, i * 100); in test_threadmap_array() 33 perf_thread_map__pid(threads, 0) == (array ? array[0] : -1)); in test_threadmap_array() 37 perf_thread_map__pid(threads, i) == i * 100); in test_threadmap_array() 40 perf_thread_map__put(threads); in test_threadmap_array() 48 struct perf_thread_map *threads; in test_threadmap() local [all …]
|
D | test-evlist.c | 95 struct perf_thread_map *threads; in test_stat_thread() local 108 threads = perf_thread_map__new_dummy(); in test_stat_thread() 109 __T("failed to create threads", threads); in test_stat_thread() 111 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread() 130 perf_evlist__set_maps(evlist, NULL, threads); in test_stat_thread() 143 perf_thread_map__put(threads); in test_stat_thread() 150 struct perf_thread_map *threads; in test_stat_thread_enable() local 165 threads = perf_thread_map__new_dummy(); in test_stat_thread_enable() 166 __T("failed to create threads", threads); in test_stat_thread_enable() 168 perf_thread_map__set_pid(threads, 0, 0); in test_stat_thread_enable() [all …]
|
/linux-6.14.4/tools/lib/perf/ |
D | threadmap.c | 47 struct perf_thread_map *threads = thread_map__alloc(nr_threads); in perf_thread_map__new_array() local 50 if (!threads) in perf_thread_map__new_array() 54 perf_thread_map__set_pid(threads, i, array ? array[i] : -1); in perf_thread_map__new_array() 56 threads->nr = nr_threads; in perf_thread_map__new_array() 57 refcount_set(&threads->refcnt, 1); in perf_thread_map__new_array() 59 return threads; in perf_thread_map__new_array() 67 static void perf_thread_map__delete(struct perf_thread_map *threads) in perf_thread_map__delete() argument 69 if (threads) { in perf_thread_map__delete() 72 WARN_ONCE(refcount_read(&threads->refcnt) != 0, in perf_thread_map__delete() 74 for (i = 0; i < threads->nr; i++) in perf_thread_map__delete() [all …]
|
/linux-6.14.4/tools/perf/bench/ |
D | breakpoint.c | 33 OPT_UINTEGER('t', "threads", &thread_params.nthreads, "Specify amount of threads"), 89 pthread_t *threads; in breakpoint_thread() local 91 threads = calloc(thread_params.nthreads, sizeof(threads[0])); in breakpoint_thread() 92 if (!threads) in breakpoint_thread() 98 if (pthread_create(&threads[i], NULL, passive_thread, &done)) in breakpoint_thread() 104 pthread_join(threads[i], NULL); in breakpoint_thread() 106 free(threads); in breakpoint_thread() 111 // then starts nparallel threads which create and join bench_repeat batches of nthreads threads. 155 printf("# Created/joined %d threads with %d breakpoints and %d parallelism\n", in bench_breakpoint_thread() 185 OPT_UINTEGER('p', "passive", &enable_params.npassive, "Specify amount of passive threads"), [all …]
|
D | synthesize.c | 34 OPT_UINTEGER('m', "min-threads", &min_threads, 35 "Minimum number of threads in multithreaded bench"), 36 OPT_UINTEGER('M', "max-threads", &max_threads, 37 "Maximum number of threads in multithreaded bench"), 62 struct perf_thread_map *threads, in do_run_single_threaded() argument 81 target, threads, in do_run_single_threaded() 116 struct perf_thread_map *threads; in run_single_threaded() local 125 threads = thread_map__new_by_pid(getpid()); in run_single_threaded() 126 if (!threads) { in run_single_threaded() 136 err = do_run_single_threaded(session, threads, &target, false); in run_single_threaded() [all …]
|
/linux-6.14.4/Documentation/driver-api/dmaengine/ |
D | dmatest.rst | 16 test multiple channels at the same time, and it can start multiple threads 73 (shared) parameters used for all threads will use the new values. 74 After the channels are specified, each thread is set as pending. All threads 82 Once started a message like " dmatest: Added 1 threads using dma0chan0" is 171 dmatest: Added 1 threads using dma0chan2 179 dmatest: Added 1 threads using dma0chan1 181 dmatest: Added 1 threads using dma0chan2 191 dmatest: Added 1 threads using dma0chan0 192 dmatest: Added 1 threads using dma0chan3 193 dmatest: Added 1 threads using dma0chan4 [all …]
|
/linux-6.14.4/Documentation/arch/x86/ |
D | topology.rst | 24 threads, cores, packages, etc. 37 - threads 52 The number of threads in a package. 97 A core consists of 1 or more threads. It does not matter whether the threads 98 are SMT- or CMT-type threads. 103 Threads chapter 108 AMDs nomenclature for CMT threads is "Compute Unit Core". The kernel always 115 The cpumask contains all online threads in the package to which a thread 118 The number of online threads is also printed in /proc/cpuinfo "siblings." 122 The cpumask contains all online threads in the core to which a thread [all …]
|
/linux-6.14.4/tools/testing/selftests/powerpc/dscr/ |
D | dscr_default_test.c | 6 * it's sysfs interface and then verifies that all threads 121 struct random_thread_args threads[THREADS]; in dscr_default_random_test() local 132 FAIL_IF(pthread_barrier_init(&barrier, NULL, THREADS)); in dscr_default_random_test() 136 for (int i = 0; i < THREADS; i++) { in dscr_default_random_test() 137 threads[i].expected_system_dscr = &expected_system_dscr; in dscr_default_random_test() 138 threads[i].rw_lock = &rw_lock; in dscr_default_random_test() 139 threads[i].barrier = &barrier; in dscr_default_random_test() 141 FAIL_IF(pthread_create(&threads[i].thread_id, NULL, in dscr_default_random_test() 142 dscr_default_random_thread, (void *)&threads[i])); in dscr_default_random_test() 145 for (int i = 0; i < THREADS; i++) in dscr_default_random_test() [all …]
|
D | dscr_explicit_test.c | 135 struct random_thread_args threads[THREADS]; in dscr_explicit_random_test() local 140 FAIL_IF(pthread_barrier_init(&barrier, NULL, THREADS)); in dscr_explicit_random_test() 142 for (int i = 0; i < THREADS; i++) { in dscr_explicit_random_test() 143 threads[i].do_yields = i % 2 == 0; in dscr_explicit_random_test() 144 threads[i].barrier = &barrier; in dscr_explicit_random_test() 146 FAIL_IF(pthread_create(&threads[i].thread_id, NULL, in dscr_explicit_random_test() 147 dscr_explicit_random_thread, (void *)&threads[i])); in dscr_explicit_random_test() 150 for (int i = 0; i < THREADS; i++) in dscr_explicit_random_test() 151 FAIL_IF(pthread_join(threads[i].thread_id, NULL)); in dscr_explicit_random_test()
|
/linux-6.14.4/Documentation/power/ |
D | freezing-of-tasks.rst | 11 kernel threads are controlled during hibernation or system-wide suspend (on some 20 threads) are regarded as 'freezable' and treated in a special way before the 31 wakes up all the kernel threads. All freezable tasks must react to that by 38 tasks are generally frozen before kernel threads. 45 signal-handling code, but the freezable kernel threads need to call it 74 kernel threads must call try_to_freeze() somewhere or use one of the 90 - freezes all tasks (including kernel threads) because we can't freeze 91 kernel threads without freezing userspace tasks 94 - thaws only kernel threads; this is particularly useful if we need to do 95 anything special in between thawing of kernel threads and thawing of [all …]
|
/linux-6.14.4/include/uapi/linux/ |
D | membarrier.h | 34 * @MEMBARRIER_CMD_GLOBAL: Execute a memory barrier on all running threads. 36 * is ensured that all running threads have passed 40 * (non-running threads are de facto in such a 41 * state). This covers threads from all processes 44 * Execute a memory barrier on all running threads 48 * is ensured that all running threads have passed 52 * (non-running threads are de facto in such a 53 * state). This only covers threads from processes 70 * threads siblings have passed through a state 74 * (non-running threads are de facto in such a [all …]
|
/linux-6.14.4/Documentation/admin-guide/device-mapper/ |
D | vdo.rst | 106 the number of threads in each group can be configured separately. 113 The number of threads used to complete bios. Since 115 outside the vdo volume, threads of this type allow the vdo 120 The number of threads used to issue bios to the underlying 121 storage. Threads of this type allow the vdo volume to 131 The number of threads used to do CPU-intensive work, such 135 The number of threads used to manage data comparisons for 140 The number of threads used to manage caching and locking 145 The number of threads used to manage administration of the 269 threads: A synonym of 'queues' [all …]
|
/linux-6.14.4/tools/testing/selftests/mm/ |
D | migration.c | 25 pthread_t *threads; in FIXTURE() local 51 self->threads = malloc(self->nthreads * sizeof(*self->threads)); in FIXTURE_SETUP() 52 ASSERT_NE(self->threads, NULL); in FIXTURE_SETUP() 59 free(self->threads); in FIXTURE_TEARDOWN() 120 * between nodes whilst other threads try and access them triggering the 129 SKIP(return, "Not enough threads or NUMA nodes available"); 137 if (pthread_create(&self->threads[i], NULL, access_mem, ptr)) 142 ASSERT_EQ(pthread_cancel(self->threads[i]), 0); 155 SKIP(return, "Not enough threads or NUMA nodes available"); 189 SKIP(return, "Not enough threads or NUMA nodes available"); [all …]
|
/linux-6.14.4/tools/testing/selftests/vDSO/ |
D | vdso_test_getrandom.c | 144 enum { TRIALS = 25000000, THREADS = 256 }; enumerator 202 pthread_t threads[THREADS]; in bench_multi() local 205 for (size_t i = 0; i < THREADS; ++i) in bench_multi() 206 ksft_assert(pthread_create(&threads[i], NULL, test_vdso_getrandom, NULL) == 0); in bench_multi() 207 for (size_t i = 0; i < THREADS; ++i) in bench_multi() 208 pthread_join(threads[i], NULL); in bench_multi() 211 …printf(" vdso: %u x %u times in %lu.%09lu seconds\n", TRIALS, THREADS, diff.tv_sec, diff.tv_nsec… in bench_multi() 214 for (size_t i = 0; i < THREADS; ++i) in bench_multi() 215 ksft_assert(pthread_create(&threads[i], NULL, test_libc_getrandom, NULL) == 0); in bench_multi() 216 for (size_t i = 0; i < THREADS; ++i) in bench_multi() [all …]
|
/linux-6.14.4/arch/powerpc/kvm/ |
D | book3s_hv_ras.c | 258 * - On TB error, HMI interrupt is reported on all the threads of the core 267 * All threads need to co-ordinate before making opal hmi handler. 268 * All threads will use sibling_subcore_state->in_guest[] (shared by all 269 * threads in the core) in paca which holds information about whether 272 * subcore status. Only primary threads from each subcore is responsible 279 * primary threads to decide who takes up the responsibility. 286 * - All other threads which are in host will call 291 * - Once all primary threads clear in_guest[0-3], all of them will invoke 293 * - Now all threads will wait for TB resync to complete by invoking 297 * - All other threads will now come out of resync wait loop and proceed [all …]
|
/linux-6.14.4/tools/testing/selftests/powerpc/math/ |
D | fpu_signal.c | 27 * worker threads 73 int i, j, rc, threads; in test_signal_fpu() local 77 threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; in test_signal_fpu() 78 tids = malloc(threads * sizeof(pthread_t)); in test_signal_fpu() 82 threads_starting = threads; in test_signal_fpu() 83 for (i = 0; i < threads; i++) { in test_signal_fpu() 94 printf("\tSending signals to all threads %d times...", ITERATIONS); in test_signal_fpu() 96 for (j = 0; j < threads; j++) { in test_signal_fpu() 105 for (i = 0; i < threads; i++) { in test_signal_fpu()
|
D | fpu_preempt.c | 8 * threads and a long wait. As such, a successful test doesn't mean much but 28 * worker threads 53 int i, rc, threads; in test_preempt_fpu() local 56 threads = sysconf(_SC_NPROCESSORS_ONLN) * THREAD_FACTOR; in test_preempt_fpu() 57 tids = malloc((threads) * sizeof(pthread_t)); in test_preempt_fpu() 61 threads_starting = threads; in test_preempt_fpu() 62 for (i = 0; i < threads; i++) { in test_preempt_fpu() 84 for (i = 0; i < threads; i++) { in test_preempt_fpu()
|
/linux-6.14.4/samples/pktgen/ |
D | pktgen_sample06_numa_awared_queue_irq_affinity.sh | 3 # Multiqueue: Using pktgen threads for sending on multiple CPUs 4 # * adding devices to kernel threads which are in the same NUMA node 5 # * bound devices queue's irq affinity to the threads, 1:1 mapping 32 [ $THREADS -gt ${#irq_array[*]} -o $THREADS -gt ${#cpu_array[*]} ] && \ 33 err 1 "Thread number $THREADS exceeds: min (${#irq_array[*]},${#cpu_array[*]})" 52 # Threads are specified with parameter -t value in $THREADS 53 for ((i = 0; i < $THREADS; i++)); do 109 for ((i = 0; i < $THREADS; i++)); do
|
/linux-6.14.4/tools/testing/selftests/dma/ |
D | dma_map_benchmark.c | 29 int threads = 1, seconds = 20, node = -1; in main() local 40 threads = atoi(optarg); in main() 65 if (threads <= 0 || threads > DMA_MAP_MAX_THREADS) { in main() 66 fprintf(stderr, "invalid number of threads, must be in 1-%d\n", in main() 108 map.threads = threads; in main() 120 printf("dma mapping benchmark: threads:%d seconds:%d node:%d dir:%s granule: %d\n", in main() 121 threads, seconds, node, dir[directions], granule); in main()
|