Lines Matching refs:pd
47 static void padata_free_pd(struct parallel_data *pd);
50 static inline void padata_get_pd(struct parallel_data *pd) in padata_get_pd() argument
52 refcount_inc(&pd->refcnt); in padata_get_pd()
55 static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt) in padata_put_pd_cnt() argument
57 if (refcount_sub_and_test(cnt, &pd->refcnt)) in padata_put_pd_cnt()
58 padata_free_pd(pd); in padata_put_pd_cnt()
61 static inline void padata_put_pd(struct parallel_data *pd) in padata_put_pd() argument
63 padata_put_pd_cnt(pd, 1); in padata_put_pd()
66 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr) in padata_cpu_hash() argument
72 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu); in padata_cpu_hash()
74 return cpumask_nth(cpu_index, pd->cpumask.pcpu); in padata_cpu_hash()
184 struct parallel_data *pd; in padata_do_parallel() local
190 pd = rcu_dereference_bh(ps->pd); in padata_do_parallel()
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) { in padata_do_parallel()
197 if (cpumask_empty(pd->cpumask.cbcpu)) in padata_do_parallel()
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu); in padata_do_parallel()
202 *cb_cpu = cpumask_nth(cpu_index, pd->cpumask.cbcpu); in padata_do_parallel()
209 padata_get_pd(pd); in padata_do_parallel()
210 padata->pd = pd; in padata_do_parallel()
214 padata->seq_nr = ++pd->seq_nr; in padata_do_parallel()
248 static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu, in padata_find_next() argument
254 reorder = per_cpu_ptr(pd->reorder_list, cpu); in padata_find_next()
274 pd->processed = processed; in padata_find_next()
275 pd->cpu = cpu; in padata_find_next()
282 struct parallel_data *pd = padata->pd; in padata_reorder() local
283 struct padata_instance *pinst = pd->ps->pinst; in padata_reorder()
287 processed = pd->processed; in padata_reorder()
288 cpu = pd->cpu; in padata_reorder()
297 cpu = cpumask_first(pd->cpumask.pcpu); in padata_reorder()
299 cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu); in padata_reorder()
302 squeue = per_cpu_ptr(pd->squeue, cb_cpu); in padata_reorder()
313 padata = padata_find_next(pd, cpu, processed); in padata_reorder()
321 struct parallel_data *pd; in padata_serial_worker() local
327 pd = squeue->pd; in padata_serial_worker()
348 padata_put_pd_cnt(pd, cnt); in padata_serial_worker()
361 struct parallel_data *pd = padata->pd; in padata_do_serial() local
362 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr); in padata_do_serial()
363 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu); in padata_do_serial()
376 if (padata->seq_nr != pd->processed) { in padata_do_serial()
510 static void padata_init_squeues(struct parallel_data *pd) in padata_init_squeues() argument
515 for_each_cpu(cpu, pd->cpumask.cbcpu) { in padata_init_squeues()
516 squeue = per_cpu_ptr(pd->squeue, cpu); in padata_init_squeues()
517 squeue->pd = pd; in padata_init_squeues()
525 static void padata_init_reorder_list(struct parallel_data *pd) in padata_init_reorder_list() argument
530 for_each_cpu(cpu, pd->cpumask.pcpu) { in padata_init_reorder_list()
531 list = per_cpu_ptr(pd->reorder_list, cpu); in padata_init_reorder_list()
541 struct parallel_data *pd; in padata_alloc_pd() local
543 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL); in padata_alloc_pd()
544 if (!pd) in padata_alloc_pd()
547 pd->reorder_list = alloc_percpu(struct padata_list); in padata_alloc_pd()
548 if (!pd->reorder_list) in padata_alloc_pd()
551 pd->squeue = alloc_percpu(struct padata_serial_queue); in padata_alloc_pd()
552 if (!pd->squeue) in padata_alloc_pd()
555 pd->ps = ps; in padata_alloc_pd()
557 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL)) in padata_alloc_pd()
559 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) in padata_alloc_pd()
562 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask); in padata_alloc_pd()
563 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask); in padata_alloc_pd()
565 padata_init_reorder_list(pd); in padata_alloc_pd()
566 padata_init_squeues(pd); in padata_alloc_pd()
567 pd->seq_nr = -1; in padata_alloc_pd()
568 refcount_set(&pd->refcnt, 1); in padata_alloc_pd()
569 pd->cpu = cpumask_first(pd->cpumask.pcpu); in padata_alloc_pd()
571 return pd; in padata_alloc_pd()
574 free_cpumask_var(pd->cpumask.pcpu); in padata_alloc_pd()
576 free_percpu(pd->squeue); in padata_alloc_pd()
578 free_percpu(pd->reorder_list); in padata_alloc_pd()
580 kfree(pd); in padata_alloc_pd()
585 static void padata_free_pd(struct parallel_data *pd) in padata_free_pd() argument
587 free_cpumask_var(pd->cpumask.pcpu); in padata_free_pd()
588 free_cpumask_var(pd->cpumask.cbcpu); in padata_free_pd()
589 free_percpu(pd->reorder_list); in padata_free_pd()
590 free_percpu(pd->squeue); in padata_free_pd()
591 kfree(pd); in padata_free_pd()
618 ps->opd = rcu_dereference_protected(ps->pd, 1); in padata_replace_one()
619 rcu_assign_pointer(ps->pd, pd_new); in padata_replace_one()
1038 struct parallel_data *pd; in padata_alloc_shell() local
1048 pd = padata_alloc_pd(ps); in padata_alloc_shell()
1051 if (!pd) in padata_alloc_shell()
1055 RCU_INIT_POINTER(ps->pd, pd); in padata_alloc_shell()
1075 struct parallel_data *pd; in padata_free_shell() local
1082 pd = rcu_dereference_protected(ps->pd, 1); in padata_free_shell()
1083 padata_put_pd(pd); in padata_free_shell()