xref: /linux/kernel/padata.c (revision ba3193fa8fc8910f724b67a523ec67ee24997d3e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * padata.c - generic interface to process data streams in parallel
4  *
5  * See Documentation/core-api/padata.rst for more information.
6  *
7  * Copyright (C) 2008, 2009 secunet Security Networks AG
8  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9  *
10  * Copyright (c) 2020 Oracle and/or its affiliates.
11  * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
12  */
13 
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
25 
26 #define	PADATA_WORK_ONSTACK	1	/* Work's memory is on stack */
27 
28 struct padata_work {
29 	struct work_struct	pw_work;
30 	struct list_head	pw_list;  /* padata_free_works linkage */
31 	void			*pw_data;
32 };
33 
34 static DEFINE_SPINLOCK(padata_works_lock);
35 static struct padata_work *padata_works;
36 static LIST_HEAD(padata_free_works);
37 
38 struct padata_mt_job_state {
39 	spinlock_t		lock;
40 	struct completion	completion;
41 	struct padata_mt_job	*job;
42 	int			nworks;
43 	int			nworks_fini;
44 	unsigned long		chunk_size;
45 };
46 
47 static void padata_free_pd(struct parallel_data *pd);
48 static void __init padata_mt_helper(struct work_struct *work);
49 
50 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
51 {
52 	int cpu, target_cpu;
53 
54 	target_cpu = cpumask_first(pd->cpumask.pcpu);
55 	for (cpu = 0; cpu < cpu_index; cpu++)
56 		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
57 
58 	return target_cpu;
59 }
60 
61 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
62 {
63 	/*
64 	 * Hash the sequence numbers to the cpus by taking
65 	 * seq_nr mod. number of cpus in use.
66 	 */
67 	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
68 
69 	return padata_index_to_cpu(pd, cpu_index);
70 }
71 
72 static struct padata_work *padata_work_alloc(void)
73 {
74 	struct padata_work *pw;
75 
76 	lockdep_assert_held(&padata_works_lock);
77 
78 	if (list_empty(&padata_free_works))
79 		return NULL;	/* No more work items allowed to be queued. */
80 
81 	pw = list_first_entry(&padata_free_works, struct padata_work, pw_list);
82 	list_del(&pw->pw_list);
83 	return pw;
84 }
85 
86 /*
87  * This function is marked __ref because this function may be optimized in such
88  * a way that it directly refers to work_fn's address, which causes modpost to
89  * complain when work_fn is marked __init. This scenario was observed with clang
90  * LTO, where padata_work_init() was optimized to refer directly to
91  * padata_mt_helper() because the calls to padata_work_init() with other work_fn
92  * values were eliminated or inlined.
93  */
94 static void __ref padata_work_init(struct padata_work *pw, work_func_t work_fn,
95 				   void *data, int flags)
96 {
97 	if (flags & PADATA_WORK_ONSTACK)
98 		INIT_WORK_ONSTACK(&pw->pw_work, work_fn);
99 	else
100 		INIT_WORK(&pw->pw_work, work_fn);
101 	pw->pw_data = data;
102 }
103 
104 static int __init padata_work_alloc_mt(int nworks, void *data,
105 				       struct list_head *head)
106 {
107 	int i;
108 
109 	spin_lock(&padata_works_lock);
110 	/* Start at 1 because the current task participates in the job. */
111 	for (i = 1; i < nworks; ++i) {
112 		struct padata_work *pw = padata_work_alloc();
113 
114 		if (!pw)
115 			break;
116 		padata_work_init(pw, padata_mt_helper, data, 0);
117 		list_add(&pw->pw_list, head);
118 	}
119 	spin_unlock(&padata_works_lock);
120 
121 	return i;
122 }
123 
124 static void padata_work_free(struct padata_work *pw)
125 {
126 	lockdep_assert_held(&padata_works_lock);
127 	list_add(&pw->pw_list, &padata_free_works);
128 }
129 
130 static void __init padata_works_free(struct list_head *works)
131 {
132 	struct padata_work *cur, *next;
133 
134 	if (list_empty(works))
135 		return;
136 
137 	spin_lock(&padata_works_lock);
138 	list_for_each_entry_safe(cur, next, works, pw_list) {
139 		list_del(&cur->pw_list);
140 		padata_work_free(cur);
141 	}
142 	spin_unlock(&padata_works_lock);
143 }
144 
145 static void padata_parallel_worker(struct work_struct *parallel_work)
146 {
147 	struct padata_work *pw = container_of(parallel_work, struct padata_work,
148 					      pw_work);
149 	struct padata_priv *padata = pw->pw_data;
150 
151 	local_bh_disable();
152 	padata->parallel(padata);
153 	spin_lock(&padata_works_lock);
154 	padata_work_free(pw);
155 	spin_unlock(&padata_works_lock);
156 	local_bh_enable();
157 }
158 
159 /**
160  * padata_do_parallel - padata parallelization function
161  *
162  * @ps: padatashell
163  * @padata: object to be parallelized
164  * @cb_cpu: pointer to the CPU that the serialization callback function should
165  *          run on.  If it's not in the serial cpumask of @pinst
166  *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
167  *          none found, returns -EINVAL.
168  *
169  * The parallelization callback function will run with BHs off.
170  * Note: Every object which is parallelized by padata_do_parallel
171  * must be seen by padata_do_serial.
172  *
173  * Return: 0 on success or else negative error code.
174  */
175 int padata_do_parallel(struct padata_shell *ps,
176 		       struct padata_priv *padata, int *cb_cpu)
177 {
178 	struct padata_instance *pinst = ps->pinst;
179 	int i, cpu, cpu_index, err;
180 	struct parallel_data *pd;
181 	struct padata_work *pw;
182 
183 	rcu_read_lock_bh();
184 
185 	pd = rcu_dereference_bh(ps->pd);
186 
187 	err = -EINVAL;
188 	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
189 		goto out;
190 
191 	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
192 		if (cpumask_empty(pd->cpumask.cbcpu))
193 			goto out;
194 
195 		/* Select an alternate fallback CPU and notify the caller. */
196 		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
197 
198 		cpu = cpumask_first(pd->cpumask.cbcpu);
199 		for (i = 0; i < cpu_index; i++)
200 			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
201 
202 		*cb_cpu = cpu;
203 	}
204 
205 	err = -EBUSY;
206 	if ((pinst->flags & PADATA_RESET))
207 		goto out;
208 
209 	refcount_inc(&pd->refcnt);
210 	padata->pd = pd;
211 	padata->cb_cpu = *cb_cpu;
212 
213 	spin_lock(&padata_works_lock);
214 	padata->seq_nr = ++pd->seq_nr;
215 	pw = padata_work_alloc();
216 	spin_unlock(&padata_works_lock);
217 
218 	if (!pw) {
219 		/* Maximum works limit exceeded, run in the current task. */
220 		padata->parallel(padata);
221 	}
222 
223 	rcu_read_unlock_bh();
224 
225 	if (pw) {
226 		padata_work_init(pw, padata_parallel_worker, padata, 0);
227 		queue_work(pinst->parallel_wq, &pw->pw_work);
228 	}
229 
230 	return 0;
231 out:
232 	rcu_read_unlock_bh();
233 
234 	return err;
235 }
236 EXPORT_SYMBOL(padata_do_parallel);
237 
238 /*
239  * padata_find_next - Find the next object that needs serialization.
240  *
241  * Return:
242  * * A pointer to the control struct of the next object that needs
243  *   serialization, if present in one of the percpu reorder queues.
244  * * NULL, if the next object that needs serialization will
245  *   be parallel processed by another cpu and is not yet present in
246  *   the cpu's reorder queue.
247  */
248 static struct padata_priv *padata_find_next(struct parallel_data *pd,
249 					    bool remove_object)
250 {
251 	struct padata_priv *padata;
252 	struct padata_list *reorder;
253 	int cpu = pd->cpu;
254 
255 	reorder = per_cpu_ptr(pd->reorder_list, cpu);
256 
257 	spin_lock(&reorder->lock);
258 	if (list_empty(&reorder->list)) {
259 		spin_unlock(&reorder->lock);
260 		return NULL;
261 	}
262 
263 	padata = list_entry(reorder->list.next, struct padata_priv, list);
264 
265 	/*
266 	 * Checks the rare case where two or more parallel jobs have hashed to
267 	 * the same CPU and one of the later ones finishes first.
268 	 */
269 	if (padata->seq_nr != pd->processed) {
270 		spin_unlock(&reorder->lock);
271 		return NULL;
272 	}
273 
274 	if (remove_object) {
275 		list_del_init(&padata->list);
276 		++pd->processed;
277 		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
278 	}
279 
280 	spin_unlock(&reorder->lock);
281 	return padata;
282 }
283 
284 static void padata_reorder(struct parallel_data *pd)
285 {
286 	struct padata_instance *pinst = pd->ps->pinst;
287 	int cb_cpu;
288 	struct padata_priv *padata;
289 	struct padata_serial_queue *squeue;
290 	struct padata_list *reorder;
291 
292 	/*
293 	 * We need to ensure that only one cpu can work on dequeueing of
294 	 * the reorder queue the time. Calculating in which percpu reorder
295 	 * queue the next object will arrive takes some time. A spinlock
296 	 * would be highly contended. Also it is not clear in which order
297 	 * the objects arrive to the reorder queues. So a cpu could wait to
298 	 * get the lock just to notice that there is nothing to do at the
299 	 * moment. Therefore we use a trylock and let the holder of the lock
300 	 * care for all the objects enqueued during the holdtime of the lock.
301 	 */
302 	if (!spin_trylock_bh(&pd->lock))
303 		return;
304 
305 	while (1) {
306 		padata = padata_find_next(pd, true);
307 
308 		/*
309 		 * If the next object that needs serialization is parallel
310 		 * processed by another cpu and is still on it's way to the
311 		 * cpu's reorder queue, nothing to do for now.
312 		 */
313 		if (!padata)
314 			break;
315 
316 		cb_cpu = padata->cb_cpu;
317 		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
318 
319 		spin_lock(&squeue->serial.lock);
320 		list_add_tail(&padata->list, &squeue->serial.list);
321 		spin_unlock(&squeue->serial.lock);
322 
323 		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
324 	}
325 
326 	spin_unlock_bh(&pd->lock);
327 
328 	/*
329 	 * The next object that needs serialization might have arrived to
330 	 * the reorder queues in the meantime.
331 	 *
332 	 * Ensure reorder queue is read after pd->lock is dropped so we see
333 	 * new objects from another task in padata_do_serial.  Pairs with
334 	 * smp_mb in padata_do_serial.
335 	 */
336 	smp_mb();
337 
338 	reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
339 	if (!list_empty(&reorder->list) && padata_find_next(pd, false))
340 		queue_work(pinst->serial_wq, &pd->reorder_work);
341 }
342 
343 static void invoke_padata_reorder(struct work_struct *work)
344 {
345 	struct parallel_data *pd;
346 
347 	local_bh_disable();
348 	pd = container_of(work, struct parallel_data, reorder_work);
349 	padata_reorder(pd);
350 	local_bh_enable();
351 }
352 
353 static void padata_serial_worker(struct work_struct *serial_work)
354 {
355 	struct padata_serial_queue *squeue;
356 	struct parallel_data *pd;
357 	LIST_HEAD(local_list);
358 	int cnt;
359 
360 	local_bh_disable();
361 	squeue = container_of(serial_work, struct padata_serial_queue, work);
362 	pd = squeue->pd;
363 
364 	spin_lock(&squeue->serial.lock);
365 	list_replace_init(&squeue->serial.list, &local_list);
366 	spin_unlock(&squeue->serial.lock);
367 
368 	cnt = 0;
369 
370 	while (!list_empty(&local_list)) {
371 		struct padata_priv *padata;
372 
373 		padata = list_entry(local_list.next,
374 				    struct padata_priv, list);
375 
376 		list_del_init(&padata->list);
377 
378 		padata->serial(padata);
379 		cnt++;
380 	}
381 	local_bh_enable();
382 
383 	if (refcount_sub_and_test(cnt, &pd->refcnt))
384 		padata_free_pd(pd);
385 }
386 
387 /**
388  * padata_do_serial - padata serialization function
389  *
390  * @padata: object to be serialized.
391  *
392  * padata_do_serial must be called for every parallelized object.
393  * The serialization callback function will run with BHs off.
394  */
395 void padata_do_serial(struct padata_priv *padata)
396 {
397 	struct parallel_data *pd = padata->pd;
398 	int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
399 	struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
400 	struct padata_priv *cur;
401 	struct list_head *pos;
402 
403 	spin_lock(&reorder->lock);
404 	/* Sort in ascending order of sequence number. */
405 	list_for_each_prev(pos, &reorder->list) {
406 		cur = list_entry(pos, struct padata_priv, list);
407 		if (cur->seq_nr < padata->seq_nr)
408 			break;
409 	}
410 	list_add(&padata->list, pos);
411 	spin_unlock(&reorder->lock);
412 
413 	/*
414 	 * Ensure the addition to the reorder list is ordered correctly
415 	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
416 	 * in padata_reorder.
417 	 */
418 	smp_mb();
419 
420 	padata_reorder(pd);
421 }
422 EXPORT_SYMBOL(padata_do_serial);
423 
424 static int padata_setup_cpumasks(struct padata_instance *pinst)
425 {
426 	struct workqueue_attrs *attrs;
427 	int err;
428 
429 	attrs = alloc_workqueue_attrs();
430 	if (!attrs)
431 		return -ENOMEM;
432 
433 	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
434 	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
435 	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
436 	free_workqueue_attrs(attrs);
437 
438 	return err;
439 }
440 
441 static void __init padata_mt_helper(struct work_struct *w)
442 {
443 	struct padata_work *pw = container_of(w, struct padata_work, pw_work);
444 	struct padata_mt_job_state *ps = pw->pw_data;
445 	struct padata_mt_job *job = ps->job;
446 	bool done;
447 
448 	spin_lock(&ps->lock);
449 
450 	while (job->size > 0) {
451 		unsigned long start, size, end;
452 
453 		start = job->start;
454 		/* So end is chunk size aligned if enough work remains. */
455 		size = roundup(start + 1, ps->chunk_size) - start;
456 		size = min(size, job->size);
457 		end = start + size;
458 
459 		job->start = end;
460 		job->size -= size;
461 
462 		spin_unlock(&ps->lock);
463 		job->thread_fn(start, end, job->fn_arg);
464 		spin_lock(&ps->lock);
465 	}
466 
467 	++ps->nworks_fini;
468 	done = (ps->nworks_fini == ps->nworks);
469 	spin_unlock(&ps->lock);
470 
471 	if (done)
472 		complete(&ps->completion);
473 }
474 
475 /**
476  * padata_do_multithreaded - run a multithreaded job
477  * @job: Description of the job.
478  *
479  * See the definition of struct padata_mt_job for more details.
480  */
481 void __init padata_do_multithreaded(struct padata_mt_job *job)
482 {
483 	/* In case threads finish at different times. */
484 	static const unsigned long load_balance_factor = 4;
485 	struct padata_work my_work, *pw;
486 	struct padata_mt_job_state ps;
487 	LIST_HEAD(works);
488 	int nworks, nid;
489 	static atomic_t last_used_nid __initdata;
490 
491 	if (job->size == 0)
492 		return;
493 
494 	/* Ensure at least one thread when size < min_chunk. */
495 	nworks = max(job->size / max(job->min_chunk, job->align), 1ul);
496 	nworks = min(nworks, job->max_threads);
497 
498 	if (nworks == 1) {
499 		/* Single thread, no coordination needed, cut to the chase. */
500 		job->thread_fn(job->start, job->start + job->size, job->fn_arg);
501 		return;
502 	}
503 
504 	spin_lock_init(&ps.lock);
505 	init_completion(&ps.completion);
506 	ps.job	       = job;
507 	ps.nworks      = padata_work_alloc_mt(nworks, &ps, &works);
508 	ps.nworks_fini = 0;
509 
510 	/*
511 	 * Chunk size is the amount of work a helper does per call to the
512 	 * thread function.  Load balance large jobs between threads by
513 	 * increasing the number of chunks, guarantee at least the minimum
514 	 * chunk size from the caller, and honor the caller's alignment.
515 	 */
516 	ps.chunk_size = job->size / (ps.nworks * load_balance_factor);
517 	ps.chunk_size = max(ps.chunk_size, job->min_chunk);
518 	ps.chunk_size = roundup(ps.chunk_size, job->align);
519 
520 	list_for_each_entry(pw, &works, pw_list)
521 		if (job->numa_aware) {
522 			int old_node = atomic_read(&last_used_nid);
523 
524 			do {
525 				nid = next_node_in(old_node, node_states[N_CPU]);
526 			} while (!atomic_try_cmpxchg(&last_used_nid, &old_node, nid));
527 			queue_work_node(nid, system_unbound_wq, &pw->pw_work);
528 		} else {
529 			queue_work(system_unbound_wq, &pw->pw_work);
530 		}
531 
532 	/* Use the current thread, which saves starting a workqueue worker. */
533 	padata_work_init(&my_work, padata_mt_helper, &ps, PADATA_WORK_ONSTACK);
534 	padata_mt_helper(&my_work.pw_work);
535 
536 	/* Wait for all the helpers to finish. */
537 	wait_for_completion(&ps.completion);
538 
539 	destroy_work_on_stack(&my_work.pw_work);
540 	padata_works_free(&works);
541 }
542 
543 static void __padata_list_init(struct padata_list *pd_list)
544 {
545 	INIT_LIST_HEAD(&pd_list->list);
546 	spin_lock_init(&pd_list->lock);
547 }
548 
549 /* Initialize all percpu queues used by serial workers */
550 static void padata_init_squeues(struct parallel_data *pd)
551 {
552 	int cpu;
553 	struct padata_serial_queue *squeue;
554 
555 	for_each_cpu(cpu, pd->cpumask.cbcpu) {
556 		squeue = per_cpu_ptr(pd->squeue, cpu);
557 		squeue->pd = pd;
558 		__padata_list_init(&squeue->serial);
559 		INIT_WORK(&squeue->work, padata_serial_worker);
560 	}
561 }
562 
563 /* Initialize per-CPU reorder lists */
564 static void padata_init_reorder_list(struct parallel_data *pd)
565 {
566 	int cpu;
567 	struct padata_list *list;
568 
569 	for_each_cpu(cpu, pd->cpumask.pcpu) {
570 		list = per_cpu_ptr(pd->reorder_list, cpu);
571 		__padata_list_init(list);
572 	}
573 }
574 
575 /* Allocate and initialize the internal cpumask dependend resources. */
576 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
577 {
578 	struct padata_instance *pinst = ps->pinst;
579 	struct parallel_data *pd;
580 
581 	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
582 	if (!pd)
583 		goto err;
584 
585 	pd->reorder_list = alloc_percpu(struct padata_list);
586 	if (!pd->reorder_list)
587 		goto err_free_pd;
588 
589 	pd->squeue = alloc_percpu(struct padata_serial_queue);
590 	if (!pd->squeue)
591 		goto err_free_reorder_list;
592 
593 	pd->ps = ps;
594 
595 	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
596 		goto err_free_squeue;
597 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
598 		goto err_free_pcpu;
599 
600 	cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
601 	cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
602 
603 	padata_init_reorder_list(pd);
604 	padata_init_squeues(pd);
605 	pd->seq_nr = -1;
606 	refcount_set(&pd->refcnt, 1);
607 	spin_lock_init(&pd->lock);
608 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
609 	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
610 
611 	return pd;
612 
613 err_free_pcpu:
614 	free_cpumask_var(pd->cpumask.pcpu);
615 err_free_squeue:
616 	free_percpu(pd->squeue);
617 err_free_reorder_list:
618 	free_percpu(pd->reorder_list);
619 err_free_pd:
620 	kfree(pd);
621 err:
622 	return NULL;
623 }
624 
625 static void padata_free_pd(struct parallel_data *pd)
626 {
627 	free_cpumask_var(pd->cpumask.pcpu);
628 	free_cpumask_var(pd->cpumask.cbcpu);
629 	free_percpu(pd->reorder_list);
630 	free_percpu(pd->squeue);
631 	kfree(pd);
632 }
633 
634 static void __padata_start(struct padata_instance *pinst)
635 {
636 	pinst->flags |= PADATA_INIT;
637 }
638 
639 static void __padata_stop(struct padata_instance *pinst)
640 {
641 	if (!(pinst->flags & PADATA_INIT))
642 		return;
643 
644 	pinst->flags &= ~PADATA_INIT;
645 
646 	synchronize_rcu();
647 }
648 
649 /* Replace the internal control structure with a new one. */
650 static int padata_replace_one(struct padata_shell *ps)
651 {
652 	struct parallel_data *pd_new;
653 
654 	pd_new = padata_alloc_pd(ps);
655 	if (!pd_new)
656 		return -ENOMEM;
657 
658 	ps->opd = rcu_dereference_protected(ps->pd, 1);
659 	rcu_assign_pointer(ps->pd, pd_new);
660 
661 	return 0;
662 }
663 
664 static int padata_replace(struct padata_instance *pinst)
665 {
666 	struct padata_shell *ps;
667 	int err = 0;
668 
669 	pinst->flags |= PADATA_RESET;
670 
671 	list_for_each_entry(ps, &pinst->pslist, list) {
672 		err = padata_replace_one(ps);
673 		if (err)
674 			break;
675 	}
676 
677 	synchronize_rcu();
678 
679 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
680 		if (refcount_dec_and_test(&ps->opd->refcnt))
681 			padata_free_pd(ps->opd);
682 
683 	pinst->flags &= ~PADATA_RESET;
684 
685 	return err;
686 }
687 
688 /* If cpumask contains no active cpu, we mark the instance as invalid. */
689 static bool padata_validate_cpumask(struct padata_instance *pinst,
690 				    const struct cpumask *cpumask)
691 {
692 	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
693 		pinst->flags |= PADATA_INVALID;
694 		return false;
695 	}
696 
697 	pinst->flags &= ~PADATA_INVALID;
698 	return true;
699 }
700 
701 static int __padata_set_cpumasks(struct padata_instance *pinst,
702 				 cpumask_var_t pcpumask,
703 				 cpumask_var_t cbcpumask)
704 {
705 	int valid;
706 	int err;
707 
708 	valid = padata_validate_cpumask(pinst, pcpumask);
709 	if (!valid) {
710 		__padata_stop(pinst);
711 		goto out_replace;
712 	}
713 
714 	valid = padata_validate_cpumask(pinst, cbcpumask);
715 	if (!valid)
716 		__padata_stop(pinst);
717 
718 out_replace:
719 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
720 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
721 
722 	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
723 
724 	if (valid)
725 		__padata_start(pinst);
726 
727 	return err;
728 }
729 
730 /**
731  * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
732  *                      equivalent to @cpumask.
733  * @pinst: padata instance
734  * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
735  *                to parallel and serial cpumasks respectively.
736  * @cpumask: the cpumask to use
737  *
738  * Return: 0 on success or negative error code
739  */
740 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
741 		       cpumask_var_t cpumask)
742 {
743 	struct cpumask *serial_mask, *parallel_mask;
744 	int err = -EINVAL;
745 
746 	cpus_read_lock();
747 	mutex_lock(&pinst->lock);
748 
749 	switch (cpumask_type) {
750 	case PADATA_CPU_PARALLEL:
751 		serial_mask = pinst->cpumask.cbcpu;
752 		parallel_mask = cpumask;
753 		break;
754 	case PADATA_CPU_SERIAL:
755 		parallel_mask = pinst->cpumask.pcpu;
756 		serial_mask = cpumask;
757 		break;
758 	default:
759 		 goto out;
760 	}
761 
762 	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
763 
764 out:
765 	mutex_unlock(&pinst->lock);
766 	cpus_read_unlock();
767 
768 	return err;
769 }
770 EXPORT_SYMBOL(padata_set_cpumask);
771 
772 #ifdef CONFIG_HOTPLUG_CPU
773 
774 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
775 {
776 	int err = 0;
777 
778 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
779 		err = padata_replace(pinst);
780 
781 		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
782 		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
783 			__padata_start(pinst);
784 	}
785 
786 	return err;
787 }
788 
789 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
790 {
791 	int err = 0;
792 
793 	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
794 		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
795 		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
796 			__padata_stop(pinst);
797 
798 		err = padata_replace(pinst);
799 	}
800 
801 	return err;
802 }
803 
804 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
805 {
806 	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
807 		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
808 }
809 
810 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
811 {
812 	struct padata_instance *pinst;
813 	int ret;
814 
815 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_online_node);
816 	if (!pinst_has_cpu(pinst, cpu))
817 		return 0;
818 
819 	mutex_lock(&pinst->lock);
820 	ret = __padata_add_cpu(pinst, cpu);
821 	mutex_unlock(&pinst->lock);
822 	return ret;
823 }
824 
825 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
826 {
827 	struct padata_instance *pinst;
828 	int ret;
829 
830 	pinst = hlist_entry_safe(node, struct padata_instance, cpu_dead_node);
831 	if (!pinst_has_cpu(pinst, cpu))
832 		return 0;
833 
834 	mutex_lock(&pinst->lock);
835 	ret = __padata_remove_cpu(pinst, cpu);
836 	mutex_unlock(&pinst->lock);
837 	return ret;
838 }
839 
840 static enum cpuhp_state hp_online;
841 #endif
842 
843 static void __padata_free(struct padata_instance *pinst)
844 {
845 #ifdef CONFIG_HOTPLUG_CPU
846 	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD,
847 					    &pinst->cpu_dead_node);
848 	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->cpu_online_node);
849 #endif
850 
851 	WARN_ON(!list_empty(&pinst->pslist));
852 
853 	free_cpumask_var(pinst->cpumask.pcpu);
854 	free_cpumask_var(pinst->cpumask.cbcpu);
855 	destroy_workqueue(pinst->serial_wq);
856 	destroy_workqueue(pinst->parallel_wq);
857 	kfree(pinst);
858 }
859 
860 #define kobj2pinst(_kobj)					\
861 	container_of(_kobj, struct padata_instance, kobj)
862 #define attr2pentry(_attr)					\
863 	container_of(_attr, struct padata_sysfs_entry, attr)
864 
865 static void padata_sysfs_release(struct kobject *kobj)
866 {
867 	struct padata_instance *pinst = kobj2pinst(kobj);
868 	__padata_free(pinst);
869 }
870 
871 struct padata_sysfs_entry {
872 	struct attribute attr;
873 	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
874 	ssize_t (*store)(struct padata_instance *, struct attribute *,
875 			 const char *, size_t);
876 };
877 
878 static ssize_t show_cpumask(struct padata_instance *pinst,
879 			    struct attribute *attr,  char *buf)
880 {
881 	struct cpumask *cpumask;
882 	ssize_t len;
883 
884 	mutex_lock(&pinst->lock);
885 	if (!strcmp(attr->name, "serial_cpumask"))
886 		cpumask = pinst->cpumask.cbcpu;
887 	else
888 		cpumask = pinst->cpumask.pcpu;
889 
890 	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
891 		       nr_cpu_ids, cpumask_bits(cpumask));
892 	mutex_unlock(&pinst->lock);
893 	return len < PAGE_SIZE ? len : -EINVAL;
894 }
895 
896 static ssize_t store_cpumask(struct padata_instance *pinst,
897 			     struct attribute *attr,
898 			     const char *buf, size_t count)
899 {
900 	cpumask_var_t new_cpumask;
901 	ssize_t ret;
902 	int mask_type;
903 
904 	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
905 		return -ENOMEM;
906 
907 	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
908 			   nr_cpumask_bits);
909 	if (ret < 0)
910 		goto out;
911 
912 	mask_type = !strcmp(attr->name, "serial_cpumask") ?
913 		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
914 	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
915 	if (!ret)
916 		ret = count;
917 
918 out:
919 	free_cpumask_var(new_cpumask);
920 	return ret;
921 }
922 
923 #define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
924 	static struct padata_sysfs_entry _name##_attr =		\
925 		__ATTR(_name, 0644, _show_name, _store_name)
926 #define PADATA_ATTR_RO(_name, _show_name)		\
927 	static struct padata_sysfs_entry _name##_attr = \
928 		__ATTR(_name, 0400, _show_name, NULL)
929 
930 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
931 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
932 
933 /*
934  * Padata sysfs provides the following objects:
935  * serial_cpumask   [RW] - cpumask for serial workers
936  * parallel_cpumask [RW] - cpumask for parallel workers
937  */
938 static struct attribute *padata_default_attrs[] = {
939 	&serial_cpumask_attr.attr,
940 	&parallel_cpumask_attr.attr,
941 	NULL,
942 };
943 ATTRIBUTE_GROUPS(padata_default);
944 
945 static ssize_t padata_sysfs_show(struct kobject *kobj,
946 				 struct attribute *attr, char *buf)
947 {
948 	struct padata_instance *pinst;
949 	struct padata_sysfs_entry *pentry;
950 	ssize_t ret = -EIO;
951 
952 	pinst = kobj2pinst(kobj);
953 	pentry = attr2pentry(attr);
954 	if (pentry->show)
955 		ret = pentry->show(pinst, attr, buf);
956 
957 	return ret;
958 }
959 
960 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
961 				  const char *buf, size_t count)
962 {
963 	struct padata_instance *pinst;
964 	struct padata_sysfs_entry *pentry;
965 	ssize_t ret = -EIO;
966 
967 	pinst = kobj2pinst(kobj);
968 	pentry = attr2pentry(attr);
969 	if (pentry->show)
970 		ret = pentry->store(pinst, attr, buf, count);
971 
972 	return ret;
973 }
974 
975 static const struct sysfs_ops padata_sysfs_ops = {
976 	.show = padata_sysfs_show,
977 	.store = padata_sysfs_store,
978 };
979 
980 static const struct kobj_type padata_attr_type = {
981 	.sysfs_ops = &padata_sysfs_ops,
982 	.default_groups = padata_default_groups,
983 	.release = padata_sysfs_release,
984 };
985 
986 /**
987  * padata_alloc - allocate and initialize a padata instance
988  * @name: used to identify the instance
989  *
990  * Return: new instance on success, NULL on error
991  */
992 struct padata_instance *padata_alloc(const char *name)
993 {
994 	struct padata_instance *pinst;
995 
996 	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
997 	if (!pinst)
998 		goto err;
999 
1000 	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
1001 					     name);
1002 	if (!pinst->parallel_wq)
1003 		goto err_free_inst;
1004 
1005 	cpus_read_lock();
1006 
1007 	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
1008 					   WQ_CPU_INTENSIVE, 1, name);
1009 	if (!pinst->serial_wq)
1010 		goto err_put_cpus;
1011 
1012 	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
1013 		goto err_free_serial_wq;
1014 	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
1015 		free_cpumask_var(pinst->cpumask.pcpu);
1016 		goto err_free_serial_wq;
1017 	}
1018 
1019 	INIT_LIST_HEAD(&pinst->pslist);
1020 
1021 	cpumask_copy(pinst->cpumask.pcpu, cpu_possible_mask);
1022 	cpumask_copy(pinst->cpumask.cbcpu, cpu_possible_mask);
1023 
1024 	if (padata_setup_cpumasks(pinst))
1025 		goto err_free_masks;
1026 
1027 	__padata_start(pinst);
1028 
1029 	kobject_init(&pinst->kobj, &padata_attr_type);
1030 	mutex_init(&pinst->lock);
1031 
1032 #ifdef CONFIG_HOTPLUG_CPU
1033 	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online,
1034 						    &pinst->cpu_online_node);
1035 	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
1036 						    &pinst->cpu_dead_node);
1037 #endif
1038 
1039 	cpus_read_unlock();
1040 
1041 	return pinst;
1042 
1043 err_free_masks:
1044 	free_cpumask_var(pinst->cpumask.pcpu);
1045 	free_cpumask_var(pinst->cpumask.cbcpu);
1046 err_free_serial_wq:
1047 	destroy_workqueue(pinst->serial_wq);
1048 err_put_cpus:
1049 	cpus_read_unlock();
1050 	destroy_workqueue(pinst->parallel_wq);
1051 err_free_inst:
1052 	kfree(pinst);
1053 err:
1054 	return NULL;
1055 }
1056 EXPORT_SYMBOL(padata_alloc);
1057 
1058 /**
1059  * padata_free - free a padata instance
1060  *
1061  * @pinst: padata instance to free
1062  */
1063 void padata_free(struct padata_instance *pinst)
1064 {
1065 	kobject_put(&pinst->kobj);
1066 }
1067 EXPORT_SYMBOL(padata_free);
1068 
1069 /**
1070  * padata_alloc_shell - Allocate and initialize padata shell.
1071  *
1072  * @pinst: Parent padata_instance object.
1073  *
1074  * Return: new shell on success, NULL on error
1075  */
1076 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1077 {
1078 	struct parallel_data *pd;
1079 	struct padata_shell *ps;
1080 
1081 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1082 	if (!ps)
1083 		goto out;
1084 
1085 	ps->pinst = pinst;
1086 
1087 	cpus_read_lock();
1088 	pd = padata_alloc_pd(ps);
1089 	cpus_read_unlock();
1090 
1091 	if (!pd)
1092 		goto out_free_ps;
1093 
1094 	mutex_lock(&pinst->lock);
1095 	RCU_INIT_POINTER(ps->pd, pd);
1096 	list_add(&ps->list, &pinst->pslist);
1097 	mutex_unlock(&pinst->lock);
1098 
1099 	return ps;
1100 
1101 out_free_ps:
1102 	kfree(ps);
1103 out:
1104 	return NULL;
1105 }
1106 EXPORT_SYMBOL(padata_alloc_shell);
1107 
1108 /**
1109  * padata_free_shell - free a padata shell
1110  *
1111  * @ps: padata shell to free
1112  */
1113 void padata_free_shell(struct padata_shell *ps)
1114 {
1115 	struct parallel_data *pd;
1116 
1117 	if (!ps)
1118 		return;
1119 
1120 	mutex_lock(&ps->pinst->lock);
1121 	list_del(&ps->list);
1122 	pd = rcu_dereference_protected(ps->pd, 1);
1123 	if (refcount_dec_and_test(&pd->refcnt))
1124 		padata_free_pd(pd);
1125 	mutex_unlock(&ps->pinst->lock);
1126 
1127 	kfree(ps);
1128 }
1129 EXPORT_SYMBOL(padata_free_shell);
1130 
1131 void __init padata_init(void)
1132 {
1133 	unsigned int i, possible_cpus;
1134 #ifdef CONFIG_HOTPLUG_CPU
1135 	int ret;
1136 
1137 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1138 				      padata_cpu_online, NULL);
1139 	if (ret < 0)
1140 		goto err;
1141 	hp_online = ret;
1142 
1143 	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1144 				      NULL, padata_cpu_dead);
1145 	if (ret < 0)
1146 		goto remove_online_state;
1147 #endif
1148 
1149 	possible_cpus = num_possible_cpus();
1150 	padata_works = kmalloc_array(possible_cpus, sizeof(struct padata_work),
1151 				     GFP_KERNEL);
1152 	if (!padata_works)
1153 		goto remove_dead_state;
1154 
1155 	for (i = 0; i < possible_cpus; ++i)
1156 		list_add(&padata_works[i].pw_list, &padata_free_works);
1157 
1158 	return;
1159 
1160 remove_dead_state:
1161 #ifdef CONFIG_HOTPLUG_CPU
1162 	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1163 remove_online_state:
1164 	cpuhp_remove_multi_state(hp_online);
1165 err:
1166 #endif
1167 	pr_warn("padata: initialization failed\n");
1168 }
1169