1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/slab.h>
3 #include <linux/kernel.h>
4 #include <linux/bitops.h>
5 #include <linux/cpumask.h>
6 #include <linux/export.h>
7 #include <linux/memblock.h>
8 #include <linux/numa.h>
9
10 /**
11 * cpumask_next_wrap - helper to implement for_each_cpu_wrap
12 * @n: the cpu prior to the place to search
13 * @mask: the cpumask pointer
14 * @start: the start point of the iteration
15 * @wrap: assume @n crossing @start terminates the iteration
16 *
17 * Return: >= nr_cpu_ids on completion
18 *
19 * Note: the @wrap argument is required for the start condition when
20 * we cannot assume @start is set in @mask.
21 */
cpumask_next_wrap(int n,const struct cpumask * mask,int start,bool wrap)22 unsigned int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
23 {
24 unsigned int next;
25
26 again:
27 next = cpumask_next(n, mask);
28
29 if (wrap && n < start && next >= start) {
30 return nr_cpumask_bits;
31
32 } else if (next >= nr_cpumask_bits) {
33 wrap = true;
34 n = -1;
35 goto again;
36 }
37
38 return next;
39 }
40 EXPORT_SYMBOL(cpumask_next_wrap);
41
42 /* These are not inline because of header tangles. */
43 #ifdef CONFIG_CPUMASK_OFFSTACK
44 /**
45 * alloc_cpumask_var_node - allocate a struct cpumask on a given node
46 * @mask: pointer to cpumask_var_t where the cpumask is returned
47 * @flags: GFP_ flags
48 * @node: memory node from which to allocate or %NUMA_NO_NODE
49 *
50 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
51 * a nop returning a constant 1 (in <linux/cpumask.h>).
52 *
53 * Return: TRUE if memory allocation succeeded, FALSE otherwise.
54 *
55 * In addition, mask will be NULL if this fails. Note that gcc is
56 * usually smart enough to know that mask can never be NULL if
57 * CONFIG_CPUMASK_OFFSTACK=n, so does code elimination in that case
58 * too.
59 */
alloc_cpumask_var_node(cpumask_var_t * mask,gfp_t flags,int node)60 bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
61 {
62 *mask = kmalloc_node(cpumask_size(), flags, node);
63
64 #ifdef CONFIG_DEBUG_PER_CPU_MAPS
65 if (!*mask) {
66 printk(KERN_ERR "=> alloc_cpumask_var: failed!\n");
67 dump_stack();
68 }
69 #endif
70
71 return *mask != NULL;
72 }
73 EXPORT_SYMBOL(alloc_cpumask_var_node);
74
75 /**
76 * alloc_bootmem_cpumask_var - allocate a struct cpumask from the bootmem arena.
77 * @mask: pointer to cpumask_var_t where the cpumask is returned
78 *
79 * Only defined when CONFIG_CPUMASK_OFFSTACK=y, otherwise is
80 * a nop (in <linux/cpumask.h>).
81 * Either returns an allocated (zero-filled) cpumask, or causes the
82 * system to panic.
83 */
alloc_bootmem_cpumask_var(cpumask_var_t * mask)84 void __init alloc_bootmem_cpumask_var(cpumask_var_t *mask)
85 {
86 *mask = memblock_alloc_or_panic(cpumask_size(), SMP_CACHE_BYTES);
87 }
88
89 /**
90 * free_cpumask_var - frees memory allocated for a struct cpumask.
91 * @mask: cpumask to free
92 *
93 * This is safe on a NULL mask.
94 */
free_cpumask_var(cpumask_var_t mask)95 void free_cpumask_var(cpumask_var_t mask)
96 {
97 kfree(mask);
98 }
99 EXPORT_SYMBOL(free_cpumask_var);
100
101 /**
102 * free_bootmem_cpumask_var - frees result of alloc_bootmem_cpumask_var
103 * @mask: cpumask to free
104 */
free_bootmem_cpumask_var(cpumask_var_t mask)105 void __init free_bootmem_cpumask_var(cpumask_var_t mask)
106 {
107 memblock_free(mask, cpumask_size());
108 }
109 #endif
110
111 /**
112 * cpumask_local_spread - select the i'th cpu based on NUMA distances
113 * @i: index number
114 * @node: local numa_node
115 *
116 * Return: online CPU according to a numa aware policy; local cpus are returned
117 * first, followed by non-local ones, then it wraps around.
118 *
119 * For those who wants to enumerate all CPUs based on their NUMA distances,
120 * i.e. call this function in a loop, like:
121 *
122 * for (i = 0; i < num_online_cpus(); i++) {
123 * cpu = cpumask_local_spread(i, node);
124 * do_something(cpu);
125 * }
126 *
127 * There's a better alternative based on for_each()-like iterators:
128 *
129 * for_each_numa_hop_mask(mask, node) {
130 * for_each_cpu_andnot(cpu, mask, prev)
131 * do_something(cpu);
132 * prev = mask;
133 * }
134 *
135 * It's simpler and more verbose than above. Complexity of iterator-based
136 * enumeration is O(sched_domains_numa_levels * nr_cpu_ids), while
137 * cpumask_local_spread() when called for each cpu is
138 * O(sched_domains_numa_levels * nr_cpu_ids * log(nr_cpu_ids)).
139 */
cpumask_local_spread(unsigned int i,int node)140 unsigned int cpumask_local_spread(unsigned int i, int node)
141 {
142 unsigned int cpu;
143
144 /* Wrap: we always want a cpu. */
145 i %= num_online_cpus();
146
147 cpu = sched_numa_find_nth_cpu(cpu_online_mask, i, node);
148
149 WARN_ON(cpu >= nr_cpu_ids);
150 return cpu;
151 }
152 EXPORT_SYMBOL(cpumask_local_spread);
153
154 static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);
155
156 /**
157 * cpumask_any_and_distribute - Return an arbitrary cpu within src1p & src2p.
158 * @src1p: first &cpumask for intersection
159 * @src2p: second &cpumask for intersection
160 *
161 * Iterated calls using the same srcp1 and srcp2 will be distributed within
162 * their intersection.
163 *
164 * Return: >= nr_cpu_ids if the intersection is empty.
165 */
cpumask_any_and_distribute(const struct cpumask * src1p,const struct cpumask * src2p)166 unsigned int cpumask_any_and_distribute(const struct cpumask *src1p,
167 const struct cpumask *src2p)
168 {
169 unsigned int next, prev;
170
171 /* NOTE: our first selection will skip 0. */
172 prev = __this_cpu_read(distribute_cpu_mask_prev);
173
174 next = find_next_and_bit_wrap(cpumask_bits(src1p), cpumask_bits(src2p),
175 nr_cpumask_bits, prev + 1);
176 if (next < nr_cpu_ids)
177 __this_cpu_write(distribute_cpu_mask_prev, next);
178
179 return next;
180 }
181 EXPORT_SYMBOL(cpumask_any_and_distribute);
182
183 /**
184 * cpumask_any_distribute - Return an arbitrary cpu from srcp
185 * @srcp: &cpumask for selection
186 *
187 * Return: >= nr_cpu_ids if the intersection is empty.
188 */
cpumask_any_distribute(const struct cpumask * srcp)189 unsigned int cpumask_any_distribute(const struct cpumask *srcp)
190 {
191 unsigned int next, prev;
192
193 /* NOTE: our first selection will skip 0. */
194 prev = __this_cpu_read(distribute_cpu_mask_prev);
195 next = find_next_bit_wrap(cpumask_bits(srcp), nr_cpumask_bits, prev + 1);
196 if (next < nr_cpu_ids)
197 __this_cpu_write(distribute_cpu_mask_prev, next);
198
199 return next;
200 }
201 EXPORT_SYMBOL(cpumask_any_distribute);
202