Lines Matching full:pd

41 	struct em_perf_domain *pd;  member
53 table = em_perf_state_from_pd(em_dbg->pd); \
70 struct dentry *pd) in em_debug_create_ps() argument
77 em_dbg[i].pd = em_pd; in em_debug_create_ps()
88 d = debugfs_create_dir(name, pd); in em_debug_create_ps()
111 struct em_perf_domain *pd = s->private; in em_debug_flags_show() local
113 seq_printf(s, "%#lx\n", pd->flags); in em_debug_flags_show()
183 * @pd : EM performance domain for which this must be done
189 struct em_perf_table *em_table_alloc(struct em_perf_domain *pd) in em_table_alloc() argument
194 table_size = sizeof(struct em_perf_state) * pd->nr_perf_states; in em_table_alloc()
205 static void em_init_performance(struct device *dev, struct em_perf_domain *pd, in em_init_performance() argument
215 cpu = cpumask_first(em_span_cpus(pd)); in em_init_performance()
306 struct em_perf_domain *pd; in em_dev_update_perf_domain() local
318 pd = dev->em_pd; in em_dev_update_perf_domain()
322 old_table = rcu_dereference_protected(pd->em_table, in em_dev_update_perf_domain()
324 rcu_assign_pointer(pd->em_table, new_table); in em_dev_update_perf_domain()
335 static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd, in em_create_perf_table() argument
341 int nr_states = pd->nr_perf_states; in em_create_perf_table()
382 em_init_performance(dev, pd, table, nr_states); in em_create_perf_table()
397 struct em_perf_domain *pd; in em_create_pd() local
410 pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL); in em_create_pd()
411 if (!pd) in em_create_pd()
414 cpumask_copy(em_span_cpus(pd), cpus); in em_create_pd()
416 pd = kzalloc(sizeof(*pd), GFP_KERNEL); in em_create_pd()
417 if (!pd) in em_create_pd()
421 pd->nr_perf_states = nr_states; in em_create_pd()
423 em_table = em_table_alloc(pd); in em_create_pd()
427 ret = em_create_perf_table(dev, pd, em_table->state, cb, flags); in em_create_pd()
431 rcu_assign_pointer(pd->em_table, em_table); in em_create_pd()
436 cpu_dev->em_pd = pd; in em_create_pd()
439 dev->em_pd = pd; in em_create_pd()
446 kfree(pd); in em_create_pd()
453 struct em_perf_domain *pd = dev->em_pd; in em_cpufreq_update_efficiencies() local
461 /* Try to get a CPU which is active and in this PD */ in em_cpufreq_update_efficiencies()
462 cpu = cpumask_first_and(em_span_cpus(pd), cpu_active_mask); in em_cpufreq_update_efficiencies()
474 for (i = 0; i < pd->nr_perf_states; i++) { in em_cpufreq_update_efficiencies()
491 pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES; in em_cpufreq_update_efficiencies()
567 * @dev : Device to register the PD for
568 * @nr_states : Number of performance states in the new PD
570 * @cpus : CPUs to include in the new PD (mandatory if @dev is a CPU device)
574 * update after registering the PD, even if @dev is a CPU device.
698 static struct em_perf_table *em_table_dup(struct em_perf_domain *pd) in em_table_dup() argument
704 em_table = em_table_alloc(pd); in em_table_dup()
711 ps = em_perf_state_from_pd(pd); in em_table_dup()
713 ps_size = sizeof(struct em_perf_state) * pd->nr_perf_states; in em_table_dup()
721 static int em_recalc_and_update(struct device *dev, struct em_perf_domain *pd, in em_recalc_and_update() argument
726 if (!em_is_artificial(pd)) { in em_recalc_and_update()
728 pd->nr_perf_states, pd->flags); in em_recalc_and_update()
752 struct em_perf_domain *pd) in em_adjust_new_capacity() argument
760 table = em_perf_state_from_pd(pd); in em_adjust_new_capacity()
761 em_max_perf = table[pd->nr_perf_states - 1].performance; in em_adjust_new_capacity()
770 em_table = em_table_dup(pd); in em_adjust_new_capacity()
776 em_init_performance(dev, pd, em_table->state, pd->nr_perf_states); in em_adjust_new_capacity()
778 em_recalc_and_update(dev, pd, em_table); in em_adjust_new_capacity()
792 struct em_perf_domain *pd; in em_adjust_cpu_capacity() local
794 pd = em_pd_get(dev); in em_adjust_cpu_capacity()
795 if (pd) in em_adjust_cpu_capacity()
796 em_adjust_new_capacity(cpu, dev, pd); in em_adjust_cpu_capacity()
812 struct em_perf_domain *pd; in em_check_capacity_update() local
826 pd = em_pd_get(dev); in em_check_capacity_update()
827 if (!pd || em_is_artificial(pd)) in em_check_capacity_update()
831 em_span_cpus(pd)); in em_check_capacity_update()
833 em_adjust_new_capacity(cpu, dev, pd); in em_check_capacity_update()
859 struct em_perf_domain *pd; in em_dev_update_chip_binning() local
865 pd = em_pd_get(dev); in em_dev_update_chip_binning()
866 if (!pd) { in em_dev_update_chip_binning()
871 em_table = em_table_dup(pd); in em_dev_update_chip_binning()
878 for (i = 0; i < pd->nr_perf_states; i++) { in em_dev_update_chip_binning()
891 return em_recalc_and_update(dev, pd, em_table); in em_dev_update_chip_binning()
899 * @pd : Performance Domain with EM that has to be updated.
908 int em_update_performance_limits(struct em_perf_domain *pd, in em_update_performance_limits() argument
916 if (!pd) in em_update_performance_limits()
920 table = em_perf_state_from_pd(pd); in em_update_performance_limits()
922 for (i = 0; i < pd->nr_perf_states; i++) { in em_update_performance_limits()
937 pd->min_perf_state = min_ps; in em_update_performance_limits()
938 pd->max_perf_state = max_ps; in em_update_performance_limits()