Lines Matching full:pd
47 static void padata_free_pd(struct parallel_data *pd);
50 static inline void padata_get_pd(struct parallel_data *pd)
52 refcount_inc(&pd->refcnt);
55 static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
57 if (refcount_sub_and_test(cnt, &pd->refcnt))
58 padata_free_pd(pd);
61 static inline void padata_put_pd(struct parallel_data *pd)
63 padata_put_pd_cnt(pd, 1);
66 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
72 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
74 return cpumask_nth(cpu_index, pd->cpumask.pcpu);
184 struct parallel_data *pd;
190 pd = rcu_dereference_bh(ps->pd);
196 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
197 if (cpumask_empty(pd->cpumask.cbcpu))
201 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
202 *cb_cpu = cpumask_nth(cpu_index, pd->cpumask.cbcpu);
209 padata_get_pd(pd);
210 padata->pd = pd;
214 padata->seq_nr = ++pd->seq_nr;
248 static struct padata_priv *padata_find_next(struct parallel_data *pd, int cpu,
254 reorder = per_cpu_ptr(pd->reorder_list, cpu);
274 pd->processed = processed;
275 pd->cpu = cpu;
282 struct parallel_data *pd = padata->pd;
283 struct padata_instance *pinst = pd->ps->pinst;
287 processed = pd->processed;
288 cpu = pd->cpu;
297 cpu = cpumask_first(pd->cpumask.pcpu);
299 cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu);
302 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
313 padata = padata_find_next(pd, cpu, processed);
321 struct parallel_data *pd;
327 pd = squeue->pd;
348 padata_put_pd_cnt(pd, cnt);
361 struct parallel_data *pd = padata->pd;
362 int hashed_cpu = padata_cpu_hash(pd, padata->seq_nr);
363 struct padata_list *reorder = per_cpu_ptr(pd->reorder_list, hashed_cpu);
376 if (padata->seq_nr != pd->processed) {
396 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
516 static void padata_init_squeues(struct parallel_data *pd)
521 for_each_cpu(cpu, pd->cpumask.cbcpu) {
522 squeue = per_cpu_ptr(pd->squeue, cpu);
523 squeue->pd = pd;
530 static void padata_init_reorder_list(struct parallel_data *pd)
535 for_each_cpu(cpu, pd->cpumask.pcpu) {
536 list = per_cpu_ptr(pd->reorder_list, cpu);
545 struct parallel_data *pd;
547 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
548 if (!pd)
551 pd->reorder_list = alloc_percpu(struct padata_list);
552 if (!pd->reorder_list)
555 pd->squeue = alloc_percpu(struct padata_serial_queue);
556 if (!pd->squeue)
559 pd->ps = ps;
561 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
563 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
566 cpumask_and(pd->cpumask.pcpu, pinst->cpumask.pcpu, cpu_online_mask);
567 cpumask_and(pd->cpumask.cbcpu, pinst->cpumask.cbcpu, cpu_online_mask);
569 padata_init_reorder_list(pd);
570 padata_init_squeues(pd);
571 pd->seq_nr = -1;
572 refcount_set(&pd->refcnt, 1);
573 pd->cpu = cpumask_first(pd->cpumask.pcpu);
575 return pd;
578 free_cpumask_var(pd->cpumask.pcpu);
580 free_percpu(pd->squeue);
582 free_percpu(pd->reorder_list);
584 kfree(pd);
589 static void padata_free_pd(struct parallel_data *pd)
591 free_cpumask_var(pd->cpumask.pcpu);
592 free_cpumask_var(pd->cpumask.cbcpu);
593 free_percpu(pd->reorder_list);
594 free_percpu(pd->squeue);
595 kfree(pd);
622 ps->opd = rcu_dereference_protected(ps->pd, 1);
623 rcu_assign_pointer(ps->pd, pd_new);
1042 struct parallel_data *pd;
1052 pd = padata_alloc_pd(ps);
1055 if (!pd)
1059 RCU_INIT_POINTER(ps->pd, pd);
1079 struct parallel_data *pd;
1086 pd = rcu_dereference_protected(ps->pd, 1);
1087 padata_put_pd(pd);