xref: /linux/kernel/padata.c (revision bd4af432cc71b5fbfe4833510359a6ad3ada250d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * padata.c - generic interface to process data streams in parallel
4  *
5  * See Documentation/core-api/padata.rst for more information.
6  *
7  * Copyright (C) 2008, 2009 secunet Security Networks AG
8  * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms and conditions of the GNU General Public License,
12  * version 2, as published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope it will be useful, but WITHOUT
15  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
17  * more details.
18  *
19  * You should have received a copy of the GNU General Public License along with
20  * this program; if not, write to the Free Software Foundation, Inc.,
21  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
22  */
23 
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
35 
36 #define MAX_OBJ_NUM 1000
37 
38 static void padata_free_pd(struct parallel_data *pd);
39 
40 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
41 {
42 	int cpu, target_cpu;
43 
44 	target_cpu = cpumask_first(pd->cpumask.pcpu);
45 	for (cpu = 0; cpu < cpu_index; cpu++)
46 		target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
47 
48 	return target_cpu;
49 }
50 
51 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
52 {
53 	/*
54 	 * Hash the sequence numbers to the cpus by taking
55 	 * seq_nr mod. number of cpus in use.
56 	 */
57 	int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
58 
59 	return padata_index_to_cpu(pd, cpu_index);
60 }
61 
62 static void padata_parallel_worker(struct work_struct *parallel_work)
63 {
64 	struct padata_parallel_queue *pqueue;
65 	LIST_HEAD(local_list);
66 
67 	local_bh_disable();
68 	pqueue = container_of(parallel_work,
69 			      struct padata_parallel_queue, work);
70 
71 	spin_lock(&pqueue->parallel.lock);
72 	list_replace_init(&pqueue->parallel.list, &local_list);
73 	spin_unlock(&pqueue->parallel.lock);
74 
75 	while (!list_empty(&local_list)) {
76 		struct padata_priv *padata;
77 
78 		padata = list_entry(local_list.next,
79 				    struct padata_priv, list);
80 
81 		list_del_init(&padata->list);
82 
83 		padata->parallel(padata);
84 	}
85 
86 	local_bh_enable();
87 }
88 
89 /**
90  * padata_do_parallel - padata parallelization function
91  *
92  * @ps: padatashell
93  * @padata: object to be parallelized
94  * @cb_cpu: pointer to the CPU that the serialization callback function should
95  *          run on.  If it's not in the serial cpumask of @pinst
96  *          (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
97  *          none found, returns -EINVAL.
98  *
99  * The parallelization callback function will run with BHs off.
100  * Note: Every object which is parallelized by padata_do_parallel
101  * must be seen by padata_do_serial.
102  *
103  * Return: 0 on success or else negative error code.
104  */
105 int padata_do_parallel(struct padata_shell *ps,
106 		       struct padata_priv *padata, int *cb_cpu)
107 {
108 	struct padata_instance *pinst = ps->pinst;
109 	int i, cpu, cpu_index, target_cpu, err;
110 	struct padata_parallel_queue *queue;
111 	struct parallel_data *pd;
112 
113 	rcu_read_lock_bh();
114 
115 	pd = rcu_dereference_bh(ps->pd);
116 
117 	err = -EINVAL;
118 	if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 		goto out;
120 
121 	if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
122 		if (!cpumask_weight(pd->cpumask.cbcpu))
123 			goto out;
124 
125 		/* Select an alternate fallback CPU and notify the caller. */
126 		cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
127 
128 		cpu = cpumask_first(pd->cpumask.cbcpu);
129 		for (i = 0; i < cpu_index; i++)
130 			cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
131 
132 		*cb_cpu = cpu;
133 	}
134 
135 	err =  -EBUSY;
136 	if ((pinst->flags & PADATA_RESET))
137 		goto out;
138 
139 	if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
140 		goto out;
141 
142 	err = 0;
143 	atomic_inc(&pd->refcnt);
144 	padata->pd = pd;
145 	padata->cb_cpu = *cb_cpu;
146 
147 	padata->seq_nr = atomic_inc_return(&pd->seq_nr);
148 	target_cpu = padata_cpu_hash(pd, padata->seq_nr);
149 	padata->cpu = target_cpu;
150 	queue = per_cpu_ptr(pd->pqueue, target_cpu);
151 
152 	spin_lock(&queue->parallel.lock);
153 	list_add_tail(&padata->list, &queue->parallel.list);
154 	spin_unlock(&queue->parallel.lock);
155 
156 	queue_work(pinst->parallel_wq, &queue->work);
157 
158 out:
159 	rcu_read_unlock_bh();
160 
161 	return err;
162 }
163 EXPORT_SYMBOL(padata_do_parallel);
164 
165 /*
166  * padata_find_next - Find the next object that needs serialization.
167  *
168  * Return:
169  * * A pointer to the control struct of the next object that needs
170  *   serialization, if present in one of the percpu reorder queues.
171  * * NULL, if the next object that needs serialization will
172  *   be parallel processed by another cpu and is not yet present in
173  *   the cpu's reorder queue.
174  */
175 static struct padata_priv *padata_find_next(struct parallel_data *pd,
176 					    bool remove_object)
177 {
178 	struct padata_parallel_queue *next_queue;
179 	struct padata_priv *padata;
180 	struct padata_list *reorder;
181 	int cpu = pd->cpu;
182 
183 	next_queue = per_cpu_ptr(pd->pqueue, cpu);
184 	reorder = &next_queue->reorder;
185 
186 	spin_lock(&reorder->lock);
187 	if (list_empty(&reorder->list)) {
188 		spin_unlock(&reorder->lock);
189 		return NULL;
190 	}
191 
192 	padata = list_entry(reorder->list.next, struct padata_priv, list);
193 
194 	/*
195 	 * Checks the rare case where two or more parallel jobs have hashed to
196 	 * the same CPU and one of the later ones finishes first.
197 	 */
198 	if (padata->seq_nr != pd->processed) {
199 		spin_unlock(&reorder->lock);
200 		return NULL;
201 	}
202 
203 	if (remove_object) {
204 		list_del_init(&padata->list);
205 		++pd->processed;
206 		pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
207 	}
208 
209 	spin_unlock(&reorder->lock);
210 	return padata;
211 }
212 
213 static void padata_reorder(struct parallel_data *pd)
214 {
215 	struct padata_instance *pinst = pd->ps->pinst;
216 	int cb_cpu;
217 	struct padata_priv *padata;
218 	struct padata_serial_queue *squeue;
219 	struct padata_parallel_queue *next_queue;
220 
221 	/*
222 	 * We need to ensure that only one cpu can work on dequeueing of
223 	 * the reorder queue the time. Calculating in which percpu reorder
224 	 * queue the next object will arrive takes some time. A spinlock
225 	 * would be highly contended. Also it is not clear in which order
226 	 * the objects arrive to the reorder queues. So a cpu could wait to
227 	 * get the lock just to notice that there is nothing to do at the
228 	 * moment. Therefore we use a trylock and let the holder of the lock
229 	 * care for all the objects enqueued during the holdtime of the lock.
230 	 */
231 	if (!spin_trylock_bh(&pd->lock))
232 		return;
233 
234 	while (1) {
235 		padata = padata_find_next(pd, true);
236 
237 		/*
238 		 * If the next object that needs serialization is parallel
239 		 * processed by another cpu and is still on it's way to the
240 		 * cpu's reorder queue, nothing to do for now.
241 		 */
242 		if (!padata)
243 			break;
244 
245 		cb_cpu = padata->cb_cpu;
246 		squeue = per_cpu_ptr(pd->squeue, cb_cpu);
247 
248 		spin_lock(&squeue->serial.lock);
249 		list_add_tail(&padata->list, &squeue->serial.list);
250 		spin_unlock(&squeue->serial.lock);
251 
252 		queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
253 	}
254 
255 	spin_unlock_bh(&pd->lock);
256 
257 	/*
258 	 * The next object that needs serialization might have arrived to
259 	 * the reorder queues in the meantime.
260 	 *
261 	 * Ensure reorder queue is read after pd->lock is dropped so we see
262 	 * new objects from another task in padata_do_serial.  Pairs with
263 	 * smp_mb__after_atomic in padata_do_serial.
264 	 */
265 	smp_mb();
266 
267 	next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
268 	if (!list_empty(&next_queue->reorder.list) &&
269 	    padata_find_next(pd, false))
270 		queue_work(pinst->serial_wq, &pd->reorder_work);
271 }
272 
273 static void invoke_padata_reorder(struct work_struct *work)
274 {
275 	struct parallel_data *pd;
276 
277 	local_bh_disable();
278 	pd = container_of(work, struct parallel_data, reorder_work);
279 	padata_reorder(pd);
280 	local_bh_enable();
281 }
282 
283 static void padata_serial_worker(struct work_struct *serial_work)
284 {
285 	struct padata_serial_queue *squeue;
286 	struct parallel_data *pd;
287 	LIST_HEAD(local_list);
288 	int cnt;
289 
290 	local_bh_disable();
291 	squeue = container_of(serial_work, struct padata_serial_queue, work);
292 	pd = squeue->pd;
293 
294 	spin_lock(&squeue->serial.lock);
295 	list_replace_init(&squeue->serial.list, &local_list);
296 	spin_unlock(&squeue->serial.lock);
297 
298 	cnt = 0;
299 
300 	while (!list_empty(&local_list)) {
301 		struct padata_priv *padata;
302 
303 		padata = list_entry(local_list.next,
304 				    struct padata_priv, list);
305 
306 		list_del_init(&padata->list);
307 
308 		padata->serial(padata);
309 		cnt++;
310 	}
311 	local_bh_enable();
312 
313 	if (atomic_sub_and_test(cnt, &pd->refcnt))
314 		padata_free_pd(pd);
315 }
316 
317 /**
318  * padata_do_serial - padata serialization function
319  *
320  * @padata: object to be serialized.
321  *
322  * padata_do_serial must be called for every parallelized object.
323  * The serialization callback function will run with BHs off.
324  */
325 void padata_do_serial(struct padata_priv *padata)
326 {
327 	struct parallel_data *pd = padata->pd;
328 	struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
329 							   padata->cpu);
330 	struct padata_priv *cur;
331 
332 	spin_lock(&pqueue->reorder.lock);
333 	/* Sort in ascending order of sequence number. */
334 	list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
335 		if (cur->seq_nr < padata->seq_nr)
336 			break;
337 	list_add(&padata->list, &cur->list);
338 	spin_unlock(&pqueue->reorder.lock);
339 
340 	/*
341 	 * Ensure the addition to the reorder list is ordered correctly
342 	 * with the trylock of pd->lock in padata_reorder.  Pairs with smp_mb
343 	 * in padata_reorder.
344 	 */
345 	smp_mb__after_atomic();
346 
347 	padata_reorder(pd);
348 }
349 EXPORT_SYMBOL(padata_do_serial);
350 
351 static int padata_setup_cpumasks(struct padata_instance *pinst)
352 {
353 	struct workqueue_attrs *attrs;
354 	int err;
355 
356 	attrs = alloc_workqueue_attrs();
357 	if (!attrs)
358 		return -ENOMEM;
359 
360 	/* Restrict parallel_wq workers to pd->cpumask.pcpu. */
361 	cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
362 	err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
363 	free_workqueue_attrs(attrs);
364 
365 	return err;
366 }
367 
368 static int pd_setup_cpumasks(struct parallel_data *pd,
369 			     const struct cpumask *pcpumask,
370 			     const struct cpumask *cbcpumask)
371 {
372 	int err = -ENOMEM;
373 
374 	if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
375 		goto out;
376 	if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
377 		goto free_pcpu_mask;
378 
379 	cpumask_copy(pd->cpumask.pcpu, pcpumask);
380 	cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
381 
382 	return 0;
383 
384 free_pcpu_mask:
385 	free_cpumask_var(pd->cpumask.pcpu);
386 out:
387 	return err;
388 }
389 
390 static void __padata_list_init(struct padata_list *pd_list)
391 {
392 	INIT_LIST_HEAD(&pd_list->list);
393 	spin_lock_init(&pd_list->lock);
394 }
395 
396 /* Initialize all percpu queues used by serial workers */
397 static void padata_init_squeues(struct parallel_data *pd)
398 {
399 	int cpu;
400 	struct padata_serial_queue *squeue;
401 
402 	for_each_cpu(cpu, pd->cpumask.cbcpu) {
403 		squeue = per_cpu_ptr(pd->squeue, cpu);
404 		squeue->pd = pd;
405 		__padata_list_init(&squeue->serial);
406 		INIT_WORK(&squeue->work, padata_serial_worker);
407 	}
408 }
409 
410 /* Initialize all percpu queues used by parallel workers */
411 static void padata_init_pqueues(struct parallel_data *pd)
412 {
413 	int cpu;
414 	struct padata_parallel_queue *pqueue;
415 
416 	for_each_cpu(cpu, pd->cpumask.pcpu) {
417 		pqueue = per_cpu_ptr(pd->pqueue, cpu);
418 
419 		__padata_list_init(&pqueue->reorder);
420 		__padata_list_init(&pqueue->parallel);
421 		INIT_WORK(&pqueue->work, padata_parallel_worker);
422 		atomic_set(&pqueue->num_obj, 0);
423 	}
424 }
425 
426 /* Allocate and initialize the internal cpumask dependend resources. */
427 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
428 {
429 	struct padata_instance *pinst = ps->pinst;
430 	const struct cpumask *cbcpumask;
431 	const struct cpumask *pcpumask;
432 	struct parallel_data *pd;
433 
434 	cbcpumask = pinst->rcpumask.cbcpu;
435 	pcpumask = pinst->rcpumask.pcpu;
436 
437 	pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
438 	if (!pd)
439 		goto err;
440 
441 	pd->pqueue = alloc_percpu(struct padata_parallel_queue);
442 	if (!pd->pqueue)
443 		goto err_free_pd;
444 
445 	pd->squeue = alloc_percpu(struct padata_serial_queue);
446 	if (!pd->squeue)
447 		goto err_free_pqueue;
448 
449 	pd->ps = ps;
450 	if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
451 		goto err_free_squeue;
452 
453 	padata_init_pqueues(pd);
454 	padata_init_squeues(pd);
455 	atomic_set(&pd->seq_nr, -1);
456 	atomic_set(&pd->refcnt, 1);
457 	spin_lock_init(&pd->lock);
458 	pd->cpu = cpumask_first(pd->cpumask.pcpu);
459 	INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
460 
461 	return pd;
462 
463 err_free_squeue:
464 	free_percpu(pd->squeue);
465 err_free_pqueue:
466 	free_percpu(pd->pqueue);
467 err_free_pd:
468 	kfree(pd);
469 err:
470 	return NULL;
471 }
472 
473 static void padata_free_pd(struct parallel_data *pd)
474 {
475 	free_cpumask_var(pd->cpumask.pcpu);
476 	free_cpumask_var(pd->cpumask.cbcpu);
477 	free_percpu(pd->pqueue);
478 	free_percpu(pd->squeue);
479 	kfree(pd);
480 }
481 
482 static void __padata_start(struct padata_instance *pinst)
483 {
484 	pinst->flags |= PADATA_INIT;
485 }
486 
487 static void __padata_stop(struct padata_instance *pinst)
488 {
489 	if (!(pinst->flags & PADATA_INIT))
490 		return;
491 
492 	pinst->flags &= ~PADATA_INIT;
493 
494 	synchronize_rcu();
495 }
496 
497 /* Replace the internal control structure with a new one. */
498 static int padata_replace_one(struct padata_shell *ps)
499 {
500 	struct parallel_data *pd_new;
501 
502 	pd_new = padata_alloc_pd(ps);
503 	if (!pd_new)
504 		return -ENOMEM;
505 
506 	ps->opd = rcu_dereference_protected(ps->pd, 1);
507 	rcu_assign_pointer(ps->pd, pd_new);
508 
509 	return 0;
510 }
511 
512 static int padata_replace(struct padata_instance *pinst)
513 {
514 	struct padata_shell *ps;
515 	int err = 0;
516 
517 	pinst->flags |= PADATA_RESET;
518 
519 	cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
520 		    cpu_online_mask);
521 
522 	cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
523 		    cpu_online_mask);
524 
525 	list_for_each_entry(ps, &pinst->pslist, list) {
526 		err = padata_replace_one(ps);
527 		if (err)
528 			break;
529 	}
530 
531 	synchronize_rcu();
532 
533 	list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
534 		if (atomic_dec_and_test(&ps->opd->refcnt))
535 			padata_free_pd(ps->opd);
536 
537 	pinst->flags &= ~PADATA_RESET;
538 
539 	return err;
540 }
541 
542 /* If cpumask contains no active cpu, we mark the instance as invalid. */
543 static bool padata_validate_cpumask(struct padata_instance *pinst,
544 				    const struct cpumask *cpumask)
545 {
546 	if (!cpumask_intersects(cpumask, cpu_online_mask)) {
547 		pinst->flags |= PADATA_INVALID;
548 		return false;
549 	}
550 
551 	pinst->flags &= ~PADATA_INVALID;
552 	return true;
553 }
554 
555 static int __padata_set_cpumasks(struct padata_instance *pinst,
556 				 cpumask_var_t pcpumask,
557 				 cpumask_var_t cbcpumask)
558 {
559 	int valid;
560 	int err;
561 
562 	valid = padata_validate_cpumask(pinst, pcpumask);
563 	if (!valid) {
564 		__padata_stop(pinst);
565 		goto out_replace;
566 	}
567 
568 	valid = padata_validate_cpumask(pinst, cbcpumask);
569 	if (!valid)
570 		__padata_stop(pinst);
571 
572 out_replace:
573 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
574 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
575 
576 	err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
577 
578 	if (valid)
579 		__padata_start(pinst);
580 
581 	return err;
582 }
583 
584 /**
585  * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
586  *                      equivalent to @cpumask.
587  * @pinst: padata instance
588  * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
589  *                to parallel and serial cpumasks respectively.
590  * @cpumask: the cpumask to use
591  *
592  * Return: 0 on success or negative error code
593  */
594 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
595 		       cpumask_var_t cpumask)
596 {
597 	struct cpumask *serial_mask, *parallel_mask;
598 	int err = -EINVAL;
599 
600 	get_online_cpus();
601 	mutex_lock(&pinst->lock);
602 
603 	switch (cpumask_type) {
604 	case PADATA_CPU_PARALLEL:
605 		serial_mask = pinst->cpumask.cbcpu;
606 		parallel_mask = cpumask;
607 		break;
608 	case PADATA_CPU_SERIAL:
609 		parallel_mask = pinst->cpumask.pcpu;
610 		serial_mask = cpumask;
611 		break;
612 	default:
613 		 goto out;
614 	}
615 
616 	err =  __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
617 
618 out:
619 	mutex_unlock(&pinst->lock);
620 	put_online_cpus();
621 
622 	return err;
623 }
624 EXPORT_SYMBOL(padata_set_cpumask);
625 
626 /**
627  * padata_start - start the parallel processing
628  *
629  * @pinst: padata instance to start
630  *
631  * Return: 0 on success or negative error code
632  */
633 int padata_start(struct padata_instance *pinst)
634 {
635 	int err = 0;
636 
637 	mutex_lock(&pinst->lock);
638 
639 	if (pinst->flags & PADATA_INVALID)
640 		err = -EINVAL;
641 
642 	__padata_start(pinst);
643 
644 	mutex_unlock(&pinst->lock);
645 
646 	return err;
647 }
648 EXPORT_SYMBOL(padata_start);
649 
650 /**
651  * padata_stop - stop the parallel processing
652  *
653  * @pinst: padata instance to stop
654  */
655 void padata_stop(struct padata_instance *pinst)
656 {
657 	mutex_lock(&pinst->lock);
658 	__padata_stop(pinst);
659 	mutex_unlock(&pinst->lock);
660 }
661 EXPORT_SYMBOL(padata_stop);
662 
663 #ifdef CONFIG_HOTPLUG_CPU
664 
665 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
666 {
667 	int err = 0;
668 
669 	if (cpumask_test_cpu(cpu, cpu_online_mask)) {
670 		err = padata_replace(pinst);
671 
672 		if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
673 		    padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
674 			__padata_start(pinst);
675 	}
676 
677 	return err;
678 }
679 
680 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
681 {
682 	int err = 0;
683 
684 	if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
685 		if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
686 		    !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
687 			__padata_stop(pinst);
688 
689 		err = padata_replace(pinst);
690 	}
691 
692 	return err;
693 }
694 
695 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
696 {
697 	return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
698 		cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
699 }
700 
701 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
702 {
703 	struct padata_instance *pinst;
704 	int ret;
705 
706 	pinst = hlist_entry_safe(node, struct padata_instance, node);
707 	if (!pinst_has_cpu(pinst, cpu))
708 		return 0;
709 
710 	mutex_lock(&pinst->lock);
711 	ret = __padata_add_cpu(pinst, cpu);
712 	mutex_unlock(&pinst->lock);
713 	return ret;
714 }
715 
716 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
717 {
718 	struct padata_instance *pinst;
719 	int ret;
720 
721 	pinst = hlist_entry_safe(node, struct padata_instance, node);
722 	if (!pinst_has_cpu(pinst, cpu))
723 		return 0;
724 
725 	mutex_lock(&pinst->lock);
726 	ret = __padata_remove_cpu(pinst, cpu);
727 	mutex_unlock(&pinst->lock);
728 	return ret;
729 }
730 
731 static enum cpuhp_state hp_online;
732 #endif
733 
734 static void __padata_free(struct padata_instance *pinst)
735 {
736 #ifdef CONFIG_HOTPLUG_CPU
737 	cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
738 	cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
739 #endif
740 
741 	WARN_ON(!list_empty(&pinst->pslist));
742 
743 	padata_stop(pinst);
744 	free_cpumask_var(pinst->rcpumask.cbcpu);
745 	free_cpumask_var(pinst->rcpumask.pcpu);
746 	free_cpumask_var(pinst->cpumask.pcpu);
747 	free_cpumask_var(pinst->cpumask.cbcpu);
748 	destroy_workqueue(pinst->serial_wq);
749 	destroy_workqueue(pinst->parallel_wq);
750 	kfree(pinst);
751 }
752 
753 #define kobj2pinst(_kobj)					\
754 	container_of(_kobj, struct padata_instance, kobj)
755 #define attr2pentry(_attr)					\
756 	container_of(_attr, struct padata_sysfs_entry, attr)
757 
758 static void padata_sysfs_release(struct kobject *kobj)
759 {
760 	struct padata_instance *pinst = kobj2pinst(kobj);
761 	__padata_free(pinst);
762 }
763 
764 struct padata_sysfs_entry {
765 	struct attribute attr;
766 	ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
767 	ssize_t (*store)(struct padata_instance *, struct attribute *,
768 			 const char *, size_t);
769 };
770 
771 static ssize_t show_cpumask(struct padata_instance *pinst,
772 			    struct attribute *attr,  char *buf)
773 {
774 	struct cpumask *cpumask;
775 	ssize_t len;
776 
777 	mutex_lock(&pinst->lock);
778 	if (!strcmp(attr->name, "serial_cpumask"))
779 		cpumask = pinst->cpumask.cbcpu;
780 	else
781 		cpumask = pinst->cpumask.pcpu;
782 
783 	len = snprintf(buf, PAGE_SIZE, "%*pb\n",
784 		       nr_cpu_ids, cpumask_bits(cpumask));
785 	mutex_unlock(&pinst->lock);
786 	return len < PAGE_SIZE ? len : -EINVAL;
787 }
788 
789 static ssize_t store_cpumask(struct padata_instance *pinst,
790 			     struct attribute *attr,
791 			     const char *buf, size_t count)
792 {
793 	cpumask_var_t new_cpumask;
794 	ssize_t ret;
795 	int mask_type;
796 
797 	if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
798 		return -ENOMEM;
799 
800 	ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
801 			   nr_cpumask_bits);
802 	if (ret < 0)
803 		goto out;
804 
805 	mask_type = !strcmp(attr->name, "serial_cpumask") ?
806 		PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
807 	ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
808 	if (!ret)
809 		ret = count;
810 
811 out:
812 	free_cpumask_var(new_cpumask);
813 	return ret;
814 }
815 
816 #define PADATA_ATTR_RW(_name, _show_name, _store_name)		\
817 	static struct padata_sysfs_entry _name##_attr =		\
818 		__ATTR(_name, 0644, _show_name, _store_name)
819 #define PADATA_ATTR_RO(_name, _show_name)		\
820 	static struct padata_sysfs_entry _name##_attr = \
821 		__ATTR(_name, 0400, _show_name, NULL)
822 
823 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
824 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
825 
826 /*
827  * Padata sysfs provides the following objects:
828  * serial_cpumask   [RW] - cpumask for serial workers
829  * parallel_cpumask [RW] - cpumask for parallel workers
830  */
831 static struct attribute *padata_default_attrs[] = {
832 	&serial_cpumask_attr.attr,
833 	&parallel_cpumask_attr.attr,
834 	NULL,
835 };
836 ATTRIBUTE_GROUPS(padata_default);
837 
838 static ssize_t padata_sysfs_show(struct kobject *kobj,
839 				 struct attribute *attr, char *buf)
840 {
841 	struct padata_instance *pinst;
842 	struct padata_sysfs_entry *pentry;
843 	ssize_t ret = -EIO;
844 
845 	pinst = kobj2pinst(kobj);
846 	pentry = attr2pentry(attr);
847 	if (pentry->show)
848 		ret = pentry->show(pinst, attr, buf);
849 
850 	return ret;
851 }
852 
853 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
854 				  const char *buf, size_t count)
855 {
856 	struct padata_instance *pinst;
857 	struct padata_sysfs_entry *pentry;
858 	ssize_t ret = -EIO;
859 
860 	pinst = kobj2pinst(kobj);
861 	pentry = attr2pentry(attr);
862 	if (pentry->show)
863 		ret = pentry->store(pinst, attr, buf, count);
864 
865 	return ret;
866 }
867 
868 static const struct sysfs_ops padata_sysfs_ops = {
869 	.show = padata_sysfs_show,
870 	.store = padata_sysfs_store,
871 };
872 
873 static struct kobj_type padata_attr_type = {
874 	.sysfs_ops = &padata_sysfs_ops,
875 	.default_groups = padata_default_groups,
876 	.release = padata_sysfs_release,
877 };
878 
879 /**
880  * padata_alloc - allocate and initialize a padata instance and specify
881  *                cpumasks for serial and parallel workers.
882  *
883  * @name: used to identify the instance
884  * @pcpumask: cpumask that will be used for padata parallelization
885  * @cbcpumask: cpumask that will be used for padata serialization
886  *
887  * Return: new instance on success, NULL on error
888  */
889 static struct padata_instance *padata_alloc(const char *name,
890 					    const struct cpumask *pcpumask,
891 					    const struct cpumask *cbcpumask)
892 {
893 	struct padata_instance *pinst;
894 
895 	pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
896 	if (!pinst)
897 		goto err;
898 
899 	pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
900 					     name);
901 	if (!pinst->parallel_wq)
902 		goto err_free_inst;
903 
904 	get_online_cpus();
905 
906 	pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
907 					   WQ_CPU_INTENSIVE, 1, name);
908 	if (!pinst->serial_wq)
909 		goto err_put_cpus;
910 
911 	if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
912 		goto err_free_serial_wq;
913 	if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
914 		free_cpumask_var(pinst->cpumask.pcpu);
915 		goto err_free_serial_wq;
916 	}
917 	if (!padata_validate_cpumask(pinst, pcpumask) ||
918 	    !padata_validate_cpumask(pinst, cbcpumask))
919 		goto err_free_masks;
920 
921 	if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
922 		goto err_free_masks;
923 	if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
924 		goto err_free_rcpumask_pcpu;
925 
926 	INIT_LIST_HEAD(&pinst->pslist);
927 
928 	cpumask_copy(pinst->cpumask.pcpu, pcpumask);
929 	cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
930 	cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
931 	cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
932 
933 	if (padata_setup_cpumasks(pinst))
934 		goto err_free_rcpumask_cbcpu;
935 
936 	pinst->flags = 0;
937 
938 	kobject_init(&pinst->kobj, &padata_attr_type);
939 	mutex_init(&pinst->lock);
940 
941 #ifdef CONFIG_HOTPLUG_CPU
942 	cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
943 	cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
944 						    &pinst->node);
945 #endif
946 
947 	put_online_cpus();
948 
949 	return pinst;
950 
951 err_free_rcpumask_cbcpu:
952 	free_cpumask_var(pinst->rcpumask.cbcpu);
953 err_free_rcpumask_pcpu:
954 	free_cpumask_var(pinst->rcpumask.pcpu);
955 err_free_masks:
956 	free_cpumask_var(pinst->cpumask.pcpu);
957 	free_cpumask_var(pinst->cpumask.cbcpu);
958 err_free_serial_wq:
959 	destroy_workqueue(pinst->serial_wq);
960 err_put_cpus:
961 	put_online_cpus();
962 	destroy_workqueue(pinst->parallel_wq);
963 err_free_inst:
964 	kfree(pinst);
965 err:
966 	return NULL;
967 }
968 
969 /**
970  * padata_alloc_possible - Allocate and initialize padata instance.
971  *                         Use the cpu_possible_mask for serial and
972  *                         parallel workers.
973  *
974  * @name: used to identify the instance
975  *
976  * Return: new instance on success, NULL on error
977  */
978 struct padata_instance *padata_alloc_possible(const char *name)
979 {
980 	return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
981 }
982 EXPORT_SYMBOL(padata_alloc_possible);
983 
984 /**
985  * padata_free - free a padata instance
986  *
987  * @pinst: padata instance to free
988  */
989 void padata_free(struct padata_instance *pinst)
990 {
991 	kobject_put(&pinst->kobj);
992 }
993 EXPORT_SYMBOL(padata_free);
994 
995 /**
996  * padata_alloc_shell - Allocate and initialize padata shell.
997  *
998  * @pinst: Parent padata_instance object.
999  *
1000  * Return: new shell on success, NULL on error
1001  */
1002 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1003 {
1004 	struct parallel_data *pd;
1005 	struct padata_shell *ps;
1006 
1007 	ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1008 	if (!ps)
1009 		goto out;
1010 
1011 	ps->pinst = pinst;
1012 
1013 	get_online_cpus();
1014 	pd = padata_alloc_pd(ps);
1015 	put_online_cpus();
1016 
1017 	if (!pd)
1018 		goto out_free_ps;
1019 
1020 	mutex_lock(&pinst->lock);
1021 	RCU_INIT_POINTER(ps->pd, pd);
1022 	list_add(&ps->list, &pinst->pslist);
1023 	mutex_unlock(&pinst->lock);
1024 
1025 	return ps;
1026 
1027 out_free_ps:
1028 	kfree(ps);
1029 out:
1030 	return NULL;
1031 }
1032 EXPORT_SYMBOL(padata_alloc_shell);
1033 
1034 /**
1035  * padata_free_shell - free a padata shell
1036  *
1037  * @ps: padata shell to free
1038  */
1039 void padata_free_shell(struct padata_shell *ps)
1040 {
1041 	if (!ps)
1042 		return;
1043 
1044 	mutex_lock(&ps->pinst->lock);
1045 	list_del(&ps->list);
1046 	padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1047 	mutex_unlock(&ps->pinst->lock);
1048 
1049 	kfree(ps);
1050 }
1051 EXPORT_SYMBOL(padata_free_shell);
1052 
1053 #ifdef CONFIG_HOTPLUG_CPU
1054 
1055 static __init int padata_driver_init(void)
1056 {
1057 	int ret;
1058 
1059 	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1060 				      padata_cpu_online, NULL);
1061 	if (ret < 0)
1062 		return ret;
1063 	hp_online = ret;
1064 
1065 	ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1066 				      NULL, padata_cpu_dead);
1067 	if (ret < 0) {
1068 		cpuhp_remove_multi_state(hp_online);
1069 		return ret;
1070 	}
1071 	return 0;
1072 }
1073 module_init(padata_driver_init);
1074 
1075 static __exit void padata_driver_exit(void)
1076 {
1077 	cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1078 	cpuhp_remove_multi_state(hp_online);
1079 }
1080 module_exit(padata_driver_exit);
1081 #endif
1082