1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * padata.c - generic interface to process data streams in parallel
4  *
5  * See Documentation/core-api/padata.rst for more information.
6  *
7  * Copyright (C) 2008, 2009 secunet Security Networks AG
8  * Copyright (C) 2008, 2009 Steffen Klassert <[email protected]>
9  *
10  * Copyright (c) 2020 Oracle and/or its affiliates.
11  * Author: Daniel Jordan <[email protected]>
12  */
13 
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
25 
26 #define	PADATA_WORK_ONSTACK	1	/* Work's memory is on stack */
27 
28 struct padata_work {
29 	struct work_struct	pw_work;
30 	struct list_head	pw_list;  /* padata_free_works linkage */
31 	void			*pw_data;
32 };
33 
34 static DEFINE_SPINLOCK(padata_works_lock);
35 static struct padata_work *padata_works;
36 static LIST_HEAD(padata_free_works);
37 
38 struct padata_mt_job_state {
39 	spinlock_t		lock;
40 	struct completion	completion;
41 	struct padata_mt_job	*job;
42 	int			nworks;
43 	int			nworks_fini;
44 	unsigned long		chunk_size;
45 };
46 
47 static void padata_free_pd(struct parallel_data *pd);
48 static void __init padata_mt_helper(struct work_struct *work);
49 
padata_get_pd(struct parallel_data * pd)50 static inline void padata_get_pd(struct parallel_data *pd)
51 {
52 	refcount_inc(&pd->refcnt);
53 }
54 
padata_put_pd_cnt(struct parallel_data * pd,int cnt)55 static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
56 {
57 	if (refcount_sub_and_test(cnt, &pd->refcnt))
58 		padata_free_pd(pd);
59 }
60 
padata_put_pd(struct parallel_data * pd)61 static inline void padata_put_pd(struct parallel_data *pd)
62 {
63 	padata_put_pd_cnt(pd, 1);
64 }
65 
padata_index_to_cpu(struct parallel_data * pd,int cpu_index)66 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
67 {
68 	int cpu, target_cpu;
69 
70 	target_cpu = cpumask_first(pd->cpumask.pcpu);
71 	for (cpu = 0; cpu < cpu_index; cpu++)
72 		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
73 
74 	return target_cpu;
75 }
76 
padata_cpu_hash(struct parallel_data * pd,unsigned int seq_nr)77 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
78 {
79 	/*
80 	 * Hash the sequence numbers to the cpus by taking
81 	 * seq_nr mod. number of cpus in use.
82 	 */
83 	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
84 
85 	return padata_index_to_cpu(pd, cpu_index);
86 }
87 
padata_work_alloc(void)88 static struct padata_work *padata_work_alloc(void)
89 {
90 	struct padata_work *pw;
91 
92 	lockdep_assert_held(&padata_works_lock);
93 
94 	if (list_empty(&padata_free_works))
95 		return NULL;	/* No more work items allowed to be queued. */
96 
97 	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
98 	list_del(&pw->pw_list);
99 	return pw;
100 }
101 
102 /*
103  * This function is marked __ref because this function may be optimized in such
104  * a way that it directly refers to work_fn's address, which causes modpost to
105  * complain when work_fn is marked __init. This scenario was observed with clang
106  * LTO, where padata_work_init() was optimized to refer directly to
107  * padata_mt_helper() because the calls to padata_work_init() with other work_fn
108  * values were eliminated or inlined.
109  */
padata_work_init(struct padata_work * pw,work_func_t work_fn,void * data,int flags)110 static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
111 				   void *data, int flags)
112 {
113 	if (flags & PADATA_WORK_ONSTACK)
114 		INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
115 	else
116 		INIT_WORK(&pw->pw_work, work_fn);
117 	pw->pw_data = data;
118 }
119 
padata_work_alloc_mt(int nworks,void * data,struct list_head * head)120 static int __init padata_work_alloc_mt(int nworks, void *data,
121 				       struct list_head *head)
122 {
123 	int i;
124 
125 	spin_lock_bh(&padata_works_lock);
126 	/* Start at 1 because the current task participates in the job. */
127 	for (i = 1; i < nworks; ++i) {
128 		struct padata_work *pw = padata_work_alloc();
129 
130 		if (!pw)
131 			break;
132 		padata_work_init(pw, padata_mt_helper, data, 0);
133 		list_add(&pw->pw_list, head);
134 	}
135 	spin_unlock_bh(&padata_works_lock);
136 
137 	return i;
138 }
139 
padata_work_free(struct padata_work * pw)140 static void padata_work_free(struct padata_work *pw)
141 {
142 	lockdep_assert_held(&padata_works_lock);
143 	list_add(&pw->pw_list, &padata_free_works);
144 }
145 
padata_works_free(struct list_head * works)146 static void __init padata_works_free(struct list_head *works)
147 {
148 	struct padata_work *cur, *next;
149 
150 	if (list_empty(works))
151 		return;
152 
153 	spin_lock_bh(&padata_works_lock);
154 	list_for_each_entry_safe(cur, next, works, pw_list) {
155 		list_del(&cur->pw_list);
156 		padata_work_free(cur);
157 	}
158 	spin_unlock_bh(&padata_works_lock);
159 }
160 
padata_parallel_worker(struct work_struct * parallel_work)161 static void padata_parallel_worker(struct work_struct *parallel_work)
162 {
163 	struct padata_work *pw = container_of(parallel_work, struct padata_work,
164 					      pw_work);
165 	struct padata_priv *padata = pw->pw_data;
166 
167 	local_bh_disable();
168 	padata->parallel(padata);
169 	spin_lock(&padata_works_lock);
170 	padata_work_free(pw);
171 	spin_unlock(&padata_works_lock);
172 	local_bh_enable();
173 }
174 
175 /**
176  * padata_do_parallel - padata parallelization function
177  *
178  * @ps: padatashell
179  * @padata: object to be parallelized
180  * @cb_cpu: pointer to the CPU that the serialization callback function should
181  *          run on.  If it's not in the serial cpumask of @pinst
182  *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
183  *          none found, returns -EINVAL.
184  *
185  * The parallelization callback function will run with BHs off.
186  * Note: Every object which is parallelized by padata_do_parallel
187  * must be seen by padata_do_serial.
188  *
189  * Return: 0 on success or else negative error code.
190  */
padata_do_parallel(struct padata_shell * ps,struct padata_priv * padata,int * cb_cpu)191 int padata_do_parallel(struct padata_shell *ps,
192 		       struct padata_priv *padata, int *cb_cpu)
193 {
194 	struct padata_instance *pinst = ps->pinst;
195 	int i, cpu, cpu_index, err;
196 	struct parallel_data *pd;
197 	struct padata_work *pw;
198 
199 	rcu_read_lock_bh();
200 
201 	pd = rcu_dereference_bh(ps->pd);
202 
203 	err = -EINVAL;
204 	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
205 		goto out;
206 
207 	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
208 		if (cpumask_empty(pd->cpumask.cbcpu))
209 			goto out;
210 
211 		/* Select an alternate fallback CPU and notify the caller. */
212 		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
213 
214 		cpu = cpumask_first(pd->cpumask.cbcpu);
215 		for (i = 0; i < cpu_index; i++)
216 			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
217 
218 		*cb_cpu = cpu;
219 	}
220 
221 	err = -EBUSY;
222 	if ((pinst->flags & PADATA_RESET))
223 		goto out;
224 
225 	padata_get_pd(pd);
226 	padata->pd = pd;
227 	padata->cb_cpu = *cb_cpu;
228 
229 	spin_lock(&padata_works_lock);
230 	padata->seq_nr = ++pd->seq_nr;
231 	pw = padata_work_alloc();
232 	spin_unlock(&padata_works_lock);
233 
234 	if (!pw) {
235 		/* Maximum works limit exceeded, run in the current task. */
236 		padata->parallel(padata);
237 	}
238 
239 	rcu_read_unlock_bh();
240 
241 	if (pw) {
242 		padata_work_init(pw, padata_parallel_worker, padata, 0);
243 		queue_work(pinst->parallel_wq, &pw->pw_work);
244 	}
245 
246 	return 0;
247 out:
248 	rcu_read_unlock_bh();
249 
250 	return err;
251 }
252 EXPORT_SYMBOL(padata_do_parallel);
253 
254 /*
255  * padata_find_next - Find the next object that needs serialization.
256  *
257  * Return:
258  * * A pointer to the control struct of the next object that needs
259  *   serialization, if present in one of the percpu reorder queues.
260  * * NULL, if the next object that needs serialization will
261  *   be parallel processed by another cpu and is not yet present in
262  *   the cpu's reorder queue.
263  */
padata_find_next(struct parallel_data * pd,bool remove_object)264 static struct padata_priv *padata_find_next(struct parallel_data *pd,
265 					    bool remove_object)
266 {
267 	struct padata_priv *padata;
268 	struct padata_list *reorder;
269 	int cpu = pd->cpu;
270 
271 	reorder = per_cpu_ptr(pd->reorder_list, cpu);
272 
273 	spin_lock(&reorder->lock);
274 	if (list_empty(&reorder->list)) {
275 		spin_unlock(&reorder->lock);
276 		return NULL;
277 	}
278 
279 	padata = list_entry(reorder->list.next, struct padata_priv, list);
280 
281 	/*
282 	 * Checks the rare case where two or more parallel jobs have hashed to
283 	 * the same CPU and one of the later ones finishes first.
284 	 */
285 	if (padata->seq_nr != pd->processed) {
286 		spin_unlock(&reorder->lock);
287 		return NULL;
288 	}
289 
290 	if (remove_object) {
291 		list_del_init(&padata->list);
292 		++pd->processed;
293 		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
294 	}
295 
296 	spin_unlock(&reorder->lock);
297 	return padata;
298 }
299 
padata_reorder(struct parallel_data * pd)300 static void padata_reorder(struct parallel_data *pd)
301 {
302 	struct padata_instance *pinst = pd->ps->pinst;
303 	int cb_cpu;
304 	struct padata_priv *padata;
305 	struct padata_serial_queue *squeue;
306 	struct padata_list *reorder;
307 
308 	/*
309 	 * We need to ensure that only one cpu can work on dequeueing of
310 	 * the reorder queue the time. Calculating in which percpu reorder
311 	 * queue the next object will arrive takes some time. A spinlock
312 	 * would be highly contended. Also it is not clear in which order
313 	 * the objects arrive to the reorder queues. So a cpu could wait to
314 	 * get the lock just to notice that there is nothing to do at the
315 	 * moment. Therefore we use a trylock and let the holder of the lock
316 	 * care for all the objects enqueued during the holdtime of the lock.
317 	 */
318 	if (!spin_trylock_bh(&pd->lock))
319 		return;
320 
321 	while (1) {
322 		padata = padata_find_next(pd, true);
323 
324 		/*
325 		 * If the next object that needs serialization is parallel
326 		 * processed by another cpu and is still on it's way to the
327 		 * cpu's reorder queue, nothing to do for now.
328 		 */
329 		if (!padata)
330 			break;
331 
332 		cb_cpu = padata->cb_cpu;
333 		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
334 
335 		spin_lock(&squeue->serial.lock);
336 		list_add_tail(&padata->list, &squeue->serial.list);
337 		spin_unlock(&squeue->serial.lock);
338 
339 		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
340 	}
341 
342 	spin_unlock_bh(&pd->lock);
343 
344 	/*
345 	 * The next object that needs serialization might have arrived to
346 	 * the reorder queues in the meantime.
347 	 *
348 	 * Ensure reorder queue is read after pd->lock is dropped so we see
349 	 * new objects from another task in padata_do_serial.  Pairs with
350 	 * smp_mb in padata_do_serial.
351 	 */
352 	smp_mb();
353 
354 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
355 	if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
356 		/*
357 		 * Other context(eg. the padata_serial_worker) can finish the request.
358 		 * To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
359 		 */
360 		padata_get_pd(pd);
361 		queue_work(pinst->serial_wq, &pd->reorder_work);
362 	}
363 }
364 
invoke_padata_reorder(struct work_struct * work)365 static void invoke_padata_reorder(struct work_struct *work)
366 {
367 	struct parallel_data *pd;
368 
369 	local_bh_disable();
370 	pd = container_of(work, struct parallel_data, reorder_work);
371 	padata_reorder(pd);
372 	local_bh_enable();
373 	/* Pairs with putting the reorder_work in the serial_wq */
374 	padata_put_pd(pd);
375 }
376 
padata_serial_worker(struct work_struct * serial_work)377 static void padata_serial_worker(struct work_struct *serial_work)
378 {
379 	struct padata_serial_queue *squeue;
380 	struct parallel_data *pd;
381 	LIST_HEAD(local_list);
382 	int cnt;
383 
384 	local_bh_disable();
385 	squeue = container_of(serial_work, struct padata_serial_queue, work);
386 	pd = squeue->pd;
387 
388 	spin_lock(&squeue->serial.lock);
389 	list_replace_init(&squeue->serial.list, &local_list);
390 	spin_unlock(&squeue->serial.lock);
391 
392 	cnt = 0;
393 
394 	while (!list_empty(&local_list)) {
395 		struct padata_priv *padata;
396 
397 		padata = list_entry(local_list.next,
398 				    struct padata_priv, list);
399 
400 		list_del_init(&padata->list);
401 
402 		padata->serial(padata);
403 		cnt++;
404 	}
405 	local_bh_enable();
406 
407 	padata_put_pd_cnt(pd, cnt);
408 }
409 
410 /**
411  * padata_do_serial - padata serialization function
412  *
413  * @padata: object to be serialized.
414  *
415  * padata_do_serial must be called for every parallelized object.
416  * The serialization callback function will run with BHs off.
417  */
padata_do_serial(struct padata_priv * padata)418 void padata_do_serial(struct padata_priv *padata)
419 {
420 	struct parallel_data *pd = padata->pd;
421 	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
422 	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
423 	struct padata_priv *cur;
424 	struct list_head *pos;
425 
426 	spin_lock(&reorder->lock);
427 	/* Sort in ascending order of sequence number. */
428 	list_for_each_prev(pos, &reorder->list) {
429 		cur = list_entry(pos, struct padata_priv, list);
430 		/* Compare by difference to consider integer wrap around */
431 		if ((signed int)(cur->seq_nr - padata->seq_nr) < 0)
432 			break;
433 	}
434 	list_add(&padata->list, pos);
435 	spin_unlock(&reorder->lock);
436 
437 	/*
438 	 * Ensure the addition to the reorder list is ordered correctly
439 	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
440 	 * in padata_reorder.
441 	 */
442 	smp_mb();
443 
444 	padata_reorder(pd);
445 }
446 EXPORT_SYMBOL(padata_do_serial);
447 
padata_setup_cpumasks(struct padata_instance * pinst)448 static int padata_setup_cpumasks(struct padata_instance *pinst)
449 {
450 	struct workqueue_attrs *attrs;
451 	int err;
452 
453 	attrs = alloc_workqueue_attrs();
454 	if (!attrs)
455 		return -ENOMEM;
456 
457 	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
458 	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
459 	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
460 	free_workqueue_attrs(attrs);
461 
462 	return err;
463 }
464 
padata_mt_helper(struct work_struct * w)465 static void __init padata_mt_helper(struct work_struct *w)
466 {
467 	struct padata_work *pw = container_of(w, struct padata_work, pw_work);
468 	struct padata_mt_job_state *ps = pw->pw_data;
469 	struct padata_mt_job *job = ps->job;
470 	bool done;
471 
472 	spin_lock(&ps->lock);
473 
474 	while (job->size > 0) {
475 		unsigned long start, size, end;
476 
477 		start = job->start;
478 		/* So end is chunk size aligned if enough work remains. */
479 		size = roundup(start + 1, ps->chunk_size) - start;
480 		size = min(size, job->size);
481 		end = start + size;
482 
483 		job->start = end;
484 		job->size -= size;
485 
486 		spin_unlock(&ps->lock);
487 		job->thread_fn(start, end, job->fn_arg);
488 		spin_lock(&ps->lock);
489 	}
490 
491 	++ps->nworks_fini;
492 	done = (ps->nworks_fini == ps->nworks);
493 	spin_unlock(&ps->lock);
494 
495 	if (done)
496 		complete(&ps->completion);
497 }
498 
499 /**
500  * padata_do_multithreaded - run a multithreaded job
501  * @job: Description of the job.
502  *
503  * See the definition of struct padata_mt_job for more details.
504  */
padata_do_multithreaded(struct padata_mt_job * job)505 void __init padata_do_multithreaded(struct padata_mt_job *job)
506 {
507 	/* In case threads finish at different times. */
508 	static const unsigned long load_balance_factor = 4;
509 	struct padata_work my_work, *pw;
510 	struct padata_mt_job_state ps;
511 	LIST_HEAD(works);
512 	int nworks, nid;
513 	static atomic_t last_used_nid __initdata;
514 
515 	if (job->size == 0)
516 		return;
517 
518 	/* Ensure at least one thread when size < min_chunk. */
519 	nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
520 	nworks = min(nworks, job->max_threads);
521 
522 	if (nworks == 1) {
523 		/* Single thread, no coordination needed, cut to the chase. */
524 		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
525 		return;
526 	}
527 
528 	spin_lock_init(&ps.lock);
529 	init_completion(&ps.completion);
530 	ps.job	       = job;
531 	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
532 	ps.nworks_fini = 0;
533 
534 	/*
535 	 * Chunk size is the amount of work a helper does per call to the
536 	 * thread function.  Load balance large jobs between threads by
537 	 * increasing the number of chunks, guarantee at least the minimum
538 	 * chunk size from the caller, and honor the caller's alignment.
539 	 * Ensure chunk_size is at least 1 to prevent divide-by-0
540 	 * panic in padata_mt_helper().
541 	 */
542 	ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
543 	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
544 	ps.chunk_size = max(ps.chunk_size, 1ul);
545 	ps.chunk_size = roundup(ps.chunk_size, job->align);
546 
547 	list_for_each_entry(pw, &works, pw_list)
548 		if (job->numa_aware) {
549 			int old_node = atomic_read(&last_used_nid);
550 
551 			do {
552 				nid = next_node_in(old_node, node_states[N_CPU]);
553 			} while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
554 			queue_work_node(nid, system_unbound_wq, &pw->pw_work);
555 		} else {
556 			queue_work(system_unbound_wq, &pw->pw_work);
557 		}
558 
559 	/* Use the current thread, which saves starting a workqueue worker. */
560 	padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
561 	padata_mt_helper(&my_work.pw_work);
562 
563 	/* Wait for all the helpers to finish. */
564 	wait_for_completion(&ps.completion);
565 
566 	destroy_work_on_stack(&my_work.pw_work);
567 	padata_works_free(&works);
568 }
569 
__padata_list_init(struct padata_list * pd_list)570 static void __padata_list_init(struct padata_list *pd_list)
571 {
572 	INIT_LIST_HEAD(&pd_list->list);
573 	spin_lock_init(&pd_list->lock);
574 }
575 
576 /* Initialize all percpu queues used by serial workers */
padata_init_squeues(struct parallel_data * pd)577 static void padata_init_squeues(struct parallel_data *pd)
578 {
579 	int cpu;
580 	struct padata_serial_queue *squeue;
581 
582 	for_each_cpu(cpu, pd->cpumask.cbcpu) {
583 		squeue = per_cpu_ptr(pd->squeue, cpu);
584 		squeue->pd = pd;
585 		__padata_list_init(&squeue->serial);
586 		INIT_WORK(&squeue->work, padata_serial_worker);
587 	}
588 }
589 
590 /* Initialize per-CPU reorder lists */
padata_init_reorder_list(struct parallel_data * pd)591 static void padata_init_reorder_list(struct parallel_data *pd)
592 {
593 	int cpu;
594 	struct padata_list *list;
595 
596 	for_each_cpu(cpu, pd->cpumask.pcpu) {
597 		list = per_cpu_ptr(pd->reorder_list, cpu);
598 		__padata_list_init(list);
599 	}
600 }
601 
602 /* Allocate and initialize the internal cpumask dependend resources. */
padata_alloc_pd(struct padata_shell * ps)603 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
604 {
605 	struct padata_instance *pinst = ps->pinst;
606 	struct parallel_data *pd;
607 
608 	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
609 	if (!pd)
610 		goto err;
611 
612 	pd->reorder_list = alloc_percpu(struct padata_list);
613 	if (!pd->reorder_list)
614 		goto err_free_pd;
615 
616 	pd->squeue = alloc_percpu(struct padata_serial_queue);
617 	if (!pd->squeue)
618 		goto err_free_reorder_list;
619 
620 	pd->ps = ps;
621 
622 	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
623 		goto err_free_squeue;
624 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
625 		goto err_free_pcpu;
626 
627 	cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
628 	cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
629 
630 	padata_init_reorder_list(pd);
631 	padata_init_squeues(pd);
632 	pd->seq_nr = -1;
633 	refcount_set(&pd->refcnt, 1);
634 	spin_lock_init(&pd->lock);
635 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
636 	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
637 
638 	return pd;
639 
640 err_free_pcpu:
641 	free_cpumask_var(pd->cpumask.pcpu);
642 err_free_squeue:
643 	free_percpu(pd->squeue);
644 err_free_reorder_list:
645 	free_percpu(pd->reorder_list);
646 err_free_pd:
647 	kfree(pd);
648 err:
649 	return NULL;
650 }
651 
padata_free_pd(struct parallel_data * pd)652 static void padata_free_pd(struct parallel_data *pd)
653 {
654 	free_cpumask_var(pd->cpumask.pcpu);
655 	free_cpumask_var(pd->cpumask.cbcpu);
656 	free_percpu(pd->reorder_list);
657 	free_percpu(pd->squeue);
658 	kfree(pd);
659 }
660 
__padata_start(struct padata_instance * pinst)661 static void __padata_start(struct padata_instance *pinst)
662 {
663 	pinst->flags |= PADATA_INIT;
664 }
665 
__padata_stop(struct padata_instance * pinst)666 static void __padata_stop(struct padata_instance *pinst)
667 {
668 	if (!(pinst->flags & PADATA_INIT))
669 		return;
670 
671 	pinst->flags &= ~PADATA_INIT;
672 
673 	synchronize_rcu();
674 }
675 
676 /* Replace the internal control structure with a new one. */
padata_replace_one(struct padata_shell * ps)677 static int padata_replace_one(struct padata_shell *ps)
678 {
679 	struct parallel_data *pd_new;
680 
681 	pd_new = padata_alloc_pd(ps);
682 	if (!pd_new)
683 		return -ENOMEM;
684 
685 	ps->opd = rcu_dereference_protected(ps->pd, 1);
686 	rcu_assign_pointer(ps->pd, pd_new);
687 
688 	return 0;
689 }
690 
padata_replace(struct padata_instance * pinst)691 static int padata_replace(struct padata_instance *pinst)
692 {
693 	struct padata_shell *ps;
694 	int err = 0;
695 
696 	pinst->flags |= PADATA_RESET;
697 
698 	list_for_each_entry(ps, &pinst->pslist, list) {
699 		err = padata_replace_one(ps);
700 		if (err)
701 			break;
702 	}
703 
704 	synchronize_rcu();
705 
706 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
707 		padata_put_pd(ps->opd);
708 
709 	pinst->flags &= ~PADATA_RESET;
710 
711 	return err;
712 }
713 
714 /* If cpumask contains no active cpu, we mark the instance as invalid. */
padata_validate_cpumask(struct padata_instance * pinst,const struct cpumask * cpumask)715 static bool padata_validate_cpumask(struct padata_instance *pinst,
716 				    const struct cpumask *cpumask)
717 {
718 	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
719 		pinst->flags |= PADATA_INVALID;
720 		return false;
721 	}
722 
723 	pinst->flags &= ~PADATA_INVALID;
724 	return true;
725 }
726 
__padata_set_cpumasks(struct padata_instance * pinst,cpumask_var_t pcpumask,cpumask_var_t cbcpumask)727 static int __padata_set_cpumasks(struct padata_instance *pinst,
728 				 cpumask_var_t pcpumask,
729 				 cpumask_var_t cbcpumask)
730 {
731 	int valid;
732 	int err;
733 
734 	valid = padata_validate_cpumask(pinst, pcpumask);
735 	if (!valid) {
736 		__padata_stop(pinst);
737 		goto out_replace;
738 	}
739 
740 	valid = padata_validate_cpumask(pinst, cbcpumask);
741 	if (!valid)
742 		__padata_stop(pinst);
743 
744 out_replace:
745 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
746 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
747 
748 	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
749 
750 	if (valid)
751 		__padata_start(pinst);
752 
753 	return err;
754 }
755 
756 /**
757  * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
758  *                      equivalent to @cpumask.
759  * @pinst: padata instance
760  * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
761  *                to parallel and serial cpumasks respectively.
762  * @cpumask: the cpumask to use
763  *
764  * Return: 0 on success or negative error code
765  */
padata_set_cpumask(struct padata_instance * pinst,int cpumask_type,cpumask_var_t cpumask)766 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
767 		       cpumask_var_t cpumask)
768 {
769 	struct cpumask *serial_mask, *parallel_mask;
770 	int err = -EINVAL;
771 
772 	cpus_read_lock();
773 	mutex_lock(&pinst->lock);
774 
775 	switch (cpumask_type) {
776 	case PADATA_CPU_PARALLEL:
777 		serial_mask = pinst->cpumask.cbcpu;
778 		parallel_mask = cpumask;
779 		break;
780 	case PADATA_CPU_SERIAL:
781 		parallel_mask = pinst->cpumask.pcpu;
782 		serial_mask = cpumask;
783 		break;
784 	default:
785 		 goto out;
786 	}
787 
788 	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
789 
790 out:
791 	mutex_unlock(&pinst->lock);
792 	cpus_read_unlock();
793 
794 	return err;
795 }
796 EXPORT_SYMBOL(padata_set_cpumask);
797 
798 #ifdef CONFIG_HOTPLUG_CPU
799 
__padata_add_cpu(struct padata_instance * pinst,int cpu)800 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
801 {
802 	int err = 0;
803 
804 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
805 		err = padata_replace(pinst);
806 
807 		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
808 		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
809 			__padata_start(pinst);
810 	}
811 
812 	return err;
813 }
814 
__padata_remove_cpu(struct padata_instance * pinst,int cpu)815 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
816 {
817 	int err = 0;
818 
819 	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
820 		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
821 		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
822 			__padata_stop(pinst);
823 
824 		err = padata_replace(pinst);
825 	}
826 
827 	return err;
828 }
829 
pinst_has_cpu(struct padata_instance * pinst,int cpu)830 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
831 {
832 	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
833 		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
834 }
835 
padata_cpu_online(unsigned int cpu,struct hlist_node * node)836 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
837 {
838 	struct padata_instance *pinst;
839 	int ret;
840 
841 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
842 	if (!pinst_has_cpu(pinst, cpu))
843 		return 0;
844 
845 	mutex_lock(&pinst->lock);
846 	ret = __padata_add_cpu(pinst, cpu);
847 	mutex_unlock(&pinst->lock);
848 	return ret;
849 }
850 
padata_cpu_dead(unsigned int cpu,struct hlist_node * node)851 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
852 {
853 	struct padata_instance *pinst;
854 	int ret;
855 
856 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
857 	if (!pinst_has_cpu(pinst, cpu))
858 		return 0;
859 
860 	mutex_lock(&pinst->lock);
861 	ret = __padata_remove_cpu(pinst, cpu);
862 	mutex_unlock(&pinst->lock);
863 	return ret;
864 }
865 
866 static enum cpuhp_state hp_online;
867 #endif
868 
__padata_free(struct padata_instance * pinst)869 static void __padata_free(struct padata_instance *pinst)
870 {
871 #ifdef CONFIG_HOTPLUG_CPU
872 	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
873 					    &pinst->cpu_dead_node);
874 	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
875 #endif
876 
877 	WARN_ON(!list_empty(&pinst->pslist));
878 
879 	free_cpumask_var(pinst->cpumask.pcpu);
880 	free_cpumask_var(pinst->cpumask.cbcpu);
881 	destroy_workqueue(pinst->serial_wq);
882 	destroy_workqueue(pinst->parallel_wq);
883 	kfree(pinst);
884 }
885 
886 #define kobj2pinst(_kobj)					\
887 	container_of(_kobj, struct padata_instance, kobj)
888 #define attr2pentry(_attr)					\
889 	container_of(_attr, struct padata_sysfs_entry, attr)
890 
padata_sysfs_release(struct kobject * kobj)891 static void padata_sysfs_release(struct kobject *kobj)
892 {
893 	struct padata_instance *pinst = kobj2pinst(kobj);
894 	__padata_free(pinst);
895 }
896 
897 struct padata_sysfs_entry {
898 	struct attribute attr;
899 	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
900 	ssize_t (*store)(struct padata_instance *, struct attribute *,
901 			 const char *, size_t);
902 };
903 
show_cpumask(struct padata_instance * pinst,struct attribute * attr,char * buf)904 static ssize_t show_cpumask(struct padata_instance *pinst,
905 			    struct attribute *attr,  char *buf)
906 {
907 	struct cpumask *cpumask;
908 	ssize_t len;
909 
910 	mutex_lock(&pinst->lock);
911 	if (!strcmp(attr->name, "serial_cpumask"))
912 		cpumask = pinst->cpumask.cbcpu;
913 	else
914 		cpumask = pinst->cpumask.pcpu;
915 
916 	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
917 		       nr_cpu_ids, cpumask_bits(cpumask));
918 	mutex_unlock(&pinst->lock);
919 	return len < PAGE_SIZE ? len : -EINVAL;
920 }
921 
store_cpumask(struct padata_instance * pinst,struct attribute * attr,const char * buf,size_t count)922 static ssize_t store_cpumask(struct padata_instance *pinst,
923 			     struct attribute *attr,
924 			     const char *buf, size_t count)
925 {
926 	cpumask_var_t new_cpumask;
927 	ssize_t ret;
928 	int mask_type;
929 
930 	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
931 		return -ENOMEM;
932 
933 	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
934 			   nr_cpumask_bits);
935 	if (ret < 0)
936 		goto out;
937 
938 	mask_type = !strcmp(attr->name, "serial_cpumask") ?
939 		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
940 	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
941 	if (!ret)
942 		ret = count;
943 
944 out:
945 	free_cpumask_var(new_cpumask);
946 	return ret;
947 }
948 
949 #define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
950 	static struct padata_sysfs_entry _name##_attr =		\
951 		__ATTR(_name, 0644, _show_name, _store_name)
952 #define PADATA_ATTR_RO(_name, _show_name)		\
953 	static struct padata_sysfs_entry _name##_attr = \
954 		__ATTR(_name, 0400, _show_name, NULL)
955 
956 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
957 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
958 
959 /*
960  * Padata sysfs provides the following objects:
961  * serial_cpumask   [RW] - cpumask for serial workers
962  * parallel_cpumask [RW] - cpumask for parallel workers
963  */
964 static struct attribute *padata_default_attrs[] = {
965 	&serial_cpumask_attr.attr,
966 	&parallel_cpumask_attr.attr,
967 	NULL,
968 };
969 ATTRIBUTE_GROUPS(padata_default);
970 
padata_sysfs_show(struct kobject * kobj,struct attribute * attr,char * buf)971 static ssize_t padata_sysfs_show(struct kobject *kobj,
972 				 struct attribute *attr, char *buf)
973 {
974 	struct padata_instance *pinst;
975 	struct padata_sysfs_entry *pentry;
976 	ssize_t ret = -EIO;
977 
978 	pinst = kobj2pinst(kobj);
979 	pentry = attr2pentry(attr);
980 	if (pentry->show)
981 		ret = pentry->show(pinst, attr, buf);
982 
983 	return ret;
984 }
985 
padata_sysfs_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)986 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
987 				  const char *buf, size_t count)
988 {
989 	struct padata_instance *pinst;
990 	struct padata_sysfs_entry *pentry;
991 	ssize_t ret = -EIO;
992 
993 	pinst = kobj2pinst(kobj);
994 	pentry = attr2pentry(attr);
995 	if (pentry->store)
996 		ret = pentry->store(pinst, attr, buf, count);
997 
998 	return ret;
999 }
1000 
1001 static const struct sysfs_ops padata_sysfs_ops = {
1002 	.show = padata_sysfs_show,
1003 	.store = padata_sysfs_store,
1004 };
1005 
1006 static const struct kobj_type padata_attr_type = {
1007 	.sysfs_ops = &padata_sysfs_ops,
1008 	.default_groups = padata_default_groups,
1009 	.release = padata_sysfs_release,
1010 };
1011 
1012 /**
1013  * padata_alloc - allocate and initialize a padata instance
1014  * @name: used to identify the instance
1015  *
1016  * Return: new instance on success, NULL on error
1017  */
padata_alloc(const char * name)1018 struct padata_instance *padata_alloc(const char *name)
1019 {
1020 	struct padata_instance *pinst;
1021 
1022 	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
1023 	if (!pinst)
1024 		goto err;
1025 
1026 	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1027 					     name);
1028 	if (!pinst->parallel_wq)
1029 		goto err_free_inst;
1030 
1031 	cpus_read_lock();
1032 
1033 	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1034 					   WQ_CPU_INTENSIVE, 1, name);
1035 	if (!pinst->serial_wq)
1036 		goto err_put_cpus;
1037 
1038 	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1039 		goto err_free_serial_wq;
1040 	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1041 		free_cpumask_var(pinst->cpumask.pcpu);
1042 		goto err_free_serial_wq;
1043 	}
1044 
1045 	INIT_LIST_HEAD(&pinst->pslist);
1046 
1047 	cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1048 	cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1049 
1050 	if (padata_setup_cpumasks(pinst))
1051 		goto err_free_masks;
1052 
1053 	__padata_start(pinst);
1054 
1055 	kobject_init(&pinst->kobj, &padata_attr_type);
1056 	mutex_init(&pinst->lock);
1057 
1058 #ifdef CONFIG_HOTPLUG_CPU
1059 	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1060 						    &pinst->cpu_online_node);
1061 	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1062 						    &pinst->cpu_dead_node);
1063 #endif
1064 
1065 	cpus_read_unlock();
1066 
1067 	return pinst;
1068 
1069 err_free_masks:
1070 	free_cpumask_var(pinst->cpumask.pcpu);
1071 	free_cpumask_var(pinst->cpumask.cbcpu);
1072 err_free_serial_wq:
1073 	destroy_workqueue(pinst->serial_wq);
1074 err_put_cpus:
1075 	cpus_read_unlock();
1076 	destroy_workqueue(pinst->parallel_wq);
1077 err_free_inst:
1078 	kfree(pinst);
1079 err:
1080 	return NULL;
1081 }
1082 EXPORT_SYMBOL(padata_alloc);
1083 
1084 /**
1085  * padata_free - free a padata instance
1086  *
1087  * @pinst: padata instance to free
1088  */
padata_free(struct padata_instance * pinst)1089 void padata_free(struct padata_instance *pinst)
1090 {
1091 	kobject_put(&pinst->kobj);
1092 }
1093 EXPORT_SYMBOL(padata_free);
1094 
1095 /**
1096  * padata_alloc_shell - Allocate and initialize padata shell.
1097  *
1098  * @pinst: Parent padata_instance object.
1099  *
1100  * Return: new shell on success, NULL on error
1101  */
padata_alloc_shell(struct padata_instance * pinst)1102 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1103 {
1104 	struct parallel_data *pd;
1105 	struct padata_shell *ps;
1106 
1107 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1108 	if (!ps)
1109 		goto out;
1110 
1111 	ps->pinst = pinst;
1112 
1113 	cpus_read_lock();
1114 	pd = padata_alloc_pd(ps);
1115 	cpus_read_unlock();
1116 
1117 	if (!pd)
1118 		goto out_free_ps;
1119 
1120 	mutex_lock(&pinst->lock);
1121 	RCU_INIT_POINTER(ps->pd, pd);
1122 	list_add(&ps->list, &pinst->pslist);
1123 	mutex_unlock(&pinst->lock);
1124 
1125 	return ps;
1126 
1127 out_free_ps:
1128 	kfree(ps);
1129 out:
1130 	return NULL;
1131 }
1132 EXPORT_SYMBOL(padata_alloc_shell);
1133 
1134 /**
1135  * padata_free_shell - free a padata shell
1136  *
1137  * @ps: padata shell to free
1138  */
padata_free_shell(struct padata_shell * ps)1139 void padata_free_shell(struct padata_shell *ps)
1140 {
1141 	struct parallel_data *pd;
1142 
1143 	if (!ps)
1144 		return;
1145 
1146 	/*
1147 	 * Wait for all _do_serial calls to finish to avoid touching
1148 	 * freed pd's and ps's.
1149 	 */
1150 	synchronize_rcu();
1151 
1152 	mutex_lock(&ps->pinst->lock);
1153 	list_del(&ps->list);
1154 	pd = rcu_dereference_protected(ps->pd, 1);
1155 	padata_put_pd(pd);
1156 	mutex_unlock(&ps->pinst->lock);
1157 
1158 	kfree(ps);
1159 }
1160 EXPORT_SYMBOL(padata_free_shell);
1161 
padata_init(void)1162 void __init padata_init(void)
1163 {
1164 	unsigned int i, possible_cpus;
1165 #ifdef CONFIG_HOTPLUG_CPU
1166 	int ret;
1167 
1168 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1169 				      padata_cpu_online, NULL);
1170 	if (ret < 0)
1171 		goto err;
1172 	hp_online = ret;
1173 
1174 	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1175 				      NULL, padata_cpu_dead);
1176 	if (ret < 0)
1177 		goto remove_online_state;
1178 #endif
1179 
1180 	possible_cpus = num_possible_cpus();
1181 	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1182 				     GFP_KERNEL);
1183 	if (!padata_works)
1184 		goto remove_dead_state;
1185 
1186 	for (i = 0; i < possible_cpus; ++i)
1187 		list_add(&padata_works[i].pw_list, &padata_free_works);
1188 
1189 	return;
1190 
1191 remove_dead_state:
1192 #ifdef CONFIG_HOTPLUG_CPU
1193 	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1194 remove_online_state:
1195 	cpuhp_remove_multi_state(hp_online);
1196 err:
1197 #endif
1198 	pr_warn("padata: initialization failed\n");
1199 }
1200