Lines Matching +full:cluster +full:- +full:cpufreq
1 // SPDX-License-Identifier: GPL-2.0-only
12 #include <linux/clk-provider.h>
28 #define SPCLOG "vexpress-spc: "
39 /* SPC wake-up IRQs status and mask */
46 /* SPC per-CPU mailboxes */
50 /* SPC CPU/cluster reset statue */
68 /* wake-up interrupt masks */
71 /* TC2 static dual-cluster configuration */
75 * Even though the SPC takes max 3-5 ms to complete any OPP/COMMS
97 * A15s cluster identifier
111 static inline bool cluster_is_a15(u32 cluster) in cluster_is_a15() argument
113 return cluster == info->a15_clusid; in cluster_is_a15()
117 * ve_spc_global_wakeup_irq() - sets/clears global wakeup IRQs
119 * @set: if true, global wake-up IRQs are set, if false they are cleared
129 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); in ve_spc_global_wakeup_irq()
136 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); in ve_spc_global_wakeup_irq()
140 * ve_spc_cpu_wakeup_irq() - sets/clears per-CPU wake-up IRQs
142 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
144 * @set: if true, wake-up IRQs are set, if false they are cleared
146 * Function to set/clear per-CPU wake-up IRQs. Not protected by locking since
150 void ve_spc_cpu_wakeup_irq(u32 cluster, u32 cpu, bool set) in ve_spc_cpu_wakeup_irq() argument
154 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_wakeup_irq()
159 if (!cluster_is_a15(cluster)) in ve_spc_cpu_wakeup_irq()
162 reg = readl_relaxed(info->baseaddr + WAKE_INT_MASK); in ve_spc_cpu_wakeup_irq()
169 writel_relaxed(reg, info->baseaddr + WAKE_INT_MASK); in ve_spc_cpu_wakeup_irq()
173 * ve_spc_set_resume_addr() - set the jump address used for warm boot
175 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
179 void ve_spc_set_resume_addr(u32 cluster, u32 cpu, u32 addr) in ve_spc_set_resume_addr() argument
183 if (cluster >= MAX_CLUSTERS) in ve_spc_set_resume_addr()
186 if (cluster_is_a15(cluster)) in ve_spc_set_resume_addr()
187 baseaddr = info->baseaddr + A15_BX_ADDR0 + (cpu << 2); in ve_spc_set_resume_addr()
189 baseaddr = info->baseaddr + A7_BX_ADDR0 + (cpu << 2); in ve_spc_set_resume_addr()
195 * ve_spc_powerdown() - enables/disables cluster powerdown
197 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
200 * Function to enable/disable cluster powerdown. Not protected by locking
204 void ve_spc_powerdown(u32 cluster, bool enable) in ve_spc_powerdown() argument
208 if (cluster >= MAX_CLUSTERS) in ve_spc_powerdown()
211 pwdrn_reg = cluster_is_a15(cluster) ? A15_PWRDN_EN : A7_PWRDN_EN; in ve_spc_powerdown()
212 writel_relaxed(enable, info->baseaddr + pwdrn_reg); in ve_spc_powerdown()
215 static u32 standbywfi_cpu_mask(u32 cpu, u32 cluster) in standbywfi_cpu_mask() argument
217 return cluster_is_a15(cluster) ? in standbywfi_cpu_mask()
223 * ve_spc_cpu_in_wfi() - Checks if the specified CPU is in WFI or not
225 * @cpu: mpidr[7:0] bitfield describing CPU affinity level within cluster
226 * @cluster: mpidr[15:8] bitfield describing cluster affinity level
228 * @return: non-zero if and only if the specified CPU is in WFI
234 int ve_spc_cpu_in_wfi(u32 cpu, u32 cluster) in ve_spc_cpu_in_wfi() argument
237 u32 mask = standbywfi_cpu_mask(cpu, cluster); in ve_spc_cpu_in_wfi()
239 if (cluster >= MAX_CLUSTERS) in ve_spc_cpu_in_wfi()
242 ret = readl_relaxed(info->baseaddr + STANDBYWFI_STAT); in ve_spc_cpu_in_wfi()
250 static int ve_spc_get_performance(int cluster, u32 *freq) in ve_spc_get_performance() argument
252 struct ve_spc_opp *opps = info->opps[cluster]; in ve_spc_get_performance()
256 perf_cfg_reg = cluster_is_a15(cluster) ? PERF_LVL_A15 : PERF_LVL_A7; in ve_spc_get_performance()
258 perf = readl_relaxed(info->baseaddr + perf_cfg_reg); in ve_spc_get_performance()
259 if (perf >= info->num_opps[cluster]) in ve_spc_get_performance()
260 return -EINVAL; in ve_spc_get_performance()
263 *freq = opps->freq; in ve_spc_get_performance()
269 static int ve_spc_round_performance(int cluster, u32 freq) in ve_spc_round_performance() argument
271 int idx, max_opp = info->num_opps[cluster]; in ve_spc_round_performance()
272 struct ve_spc_opp *opps = info->opps[cluster]; in ve_spc_round_performance()
277 ftmp = opps->freq; in ve_spc_round_performance()
292 static int ve_spc_find_performance_index(int cluster, u32 freq) in ve_spc_find_performance_index() argument
294 int idx, max_opp = info->num_opps[cluster]; in ve_spc_find_performance_index()
295 struct ve_spc_opp *opps = info->opps[cluster]; in ve_spc_find_performance_index()
298 if (opps->freq == freq) in ve_spc_find_performance_index()
300 return (idx == max_opp) ? -EINVAL : idx; in ve_spc_find_performance_index()
306 &info->done, usecs_to_jiffies(TIMEOUT_US)); in ve_spc_waitforcompletion()
308 ret = -ETIMEDOUT; in ve_spc_waitforcompletion()
310 ret = info->cur_rsp_stat & STAT_COMPLETE(req_type) ? 0 : -EIO; in ve_spc_waitforcompletion()
314 static int ve_spc_set_performance(int cluster, u32 freq) in ve_spc_set_performance() argument
319 if (cluster_is_a15(cluster)) { in ve_spc_set_performance()
327 perf = ve_spc_find_performance_index(cluster, freq); in ve_spc_set_performance()
332 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) in ve_spc_set_performance()
333 return -ETIME; in ve_spc_set_performance()
335 init_completion(&info->done); in ve_spc_set_performance()
336 info->cur_rsp_mask = RESPONSE_MASK(req_type); in ve_spc_set_performance()
338 writel(perf, info->baseaddr + perf_cfg_reg); in ve_spc_set_performance()
341 info->cur_rsp_mask = 0; in ve_spc_set_performance()
342 up(&info->sem); in ve_spc_set_performance()
351 if (down_timeout(&info->sem, usecs_to_jiffies(TIMEOUT_US))) in ve_spc_read_sys_cfg()
352 return -ETIME; in ve_spc_read_sys_cfg()
354 init_completion(&info->done); in ve_spc_read_sys_cfg()
355 info->cur_rsp_mask = RESPONSE_MASK(SPC_SYS_CFG); in ve_spc_read_sys_cfg()
358 writel(SYSCFG_START | func | offset >> 2, info->baseaddr + COMMS); in ve_spc_read_sys_cfg()
362 *data = readl(info->baseaddr + SYSCFG_RDATA); in ve_spc_read_sys_cfg()
364 info->cur_rsp_mask = 0; in ve_spc_read_sys_cfg()
365 up(&info->sem); in ve_spc_read_sys_cfg()
373 uint32_t status = readl_relaxed(drv_data->baseaddr + PWC_STATUS); in ve_spc_irq_handler()
375 if (info->cur_rsp_mask & status) { in ve_spc_irq_handler()
376 info->cur_rsp_stat = status; in ve_spc_irq_handler()
377 complete(&drv_data->done); in ve_spc_irq_handler()
384 * +--------------------------+
386 * +--------------------------+
388 * +--------------------------+
393 static int ve_spc_populate_opps(uint32_t cluster) in ve_spc_populate_opps() argument
400 return -ENOMEM; in ve_spc_populate_opps()
402 info->opps[cluster] = opps; in ve_spc_populate_opps()
404 off = cluster_is_a15(cluster) ? A15_PERFVAL_BASE : A7_PERFVAL_BASE; in ve_spc_populate_opps()
408 opps->freq = (data & FREQ_MASK) * MULT_FACTOR; in ve_spc_populate_opps()
409 opps->u_volt = (data >> VOLT_SHIFT) * 1000; in ve_spc_populate_opps()
414 info->num_opps[cluster] = idx; in ve_spc_populate_opps()
421 int cluster; in ve_init_opp_table() local
425 cluster = topology_physical_package_id(cpu_dev->id); in ve_init_opp_table()
426 cluster = cluster < 0 ? 0 : cluster; in ve_init_opp_table()
428 max_opp = info->num_opps[cluster]; in ve_init_opp_table()
429 opps = info->opps[cluster]; in ve_init_opp_table()
432 ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); in ve_init_opp_table()
435 opps->freq, opps->u_volt); in ve_init_opp_table()
447 return -ENOMEM; in ve_spc_init()
449 info->baseaddr = baseaddr; in ve_spc_init()
450 info->a15_clusid = a15_clusid; in ve_spc_init()
455 return -EINVAL; in ve_spc_init()
458 init_completion(&info->done); in ve_spc_init()
460 readl_relaxed(info->baseaddr + PWC_STATUS); in ve_spc_init()
463 | IRQF_ONESHOT, "vexpress-spc", info); in ve_spc_init()
467 return -ENODEV; in ve_spc_init()
470 sema_init(&info->sem, 1); in ve_spc_init()
472 * Multi-cluster systems may need this data when non-coherent, during in ve_spc_init()
473 * cluster power-up/power-down. Make sure driver info reaches main in ve_spc_init()
484 int cluster; member
494 if (ve_spc_get_performance(spc->cluster, &freq)) in spc_recalc_rate()
495 return -EIO; in spc_recalc_rate()
505 return ve_spc_round_performance(spc->cluster, drate); in spc_round_rate()
513 return ve_spc_set_performance(spc->cluster, rate / 1000); in spc_set_rate()
529 return ERR_PTR(-ENOMEM); in ve_spc_clk_register()
531 spc->hw.init = &init; in ve_spc_clk_register()
532 spc->cluster = topology_physical_package_id(cpu_dev->id); in ve_spc_clk_register()
534 spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; in ve_spc_clk_register()
541 return devm_clk_register(cpu_dev, &spc->hw); in ve_spc_clk_register()
546 int cpu, cluster; in ve_spc_clk_init() local
555 return -ENODEV; in ve_spc_clk_init()
574 cluster = topology_physical_package_id(cpu_dev->id); in ve_spc_clk_init()
575 if (cluster < 0 || init_opp_table[cluster]) in ve_spc_clk_init()
581 topology_core_cpumask(cpu_dev->id))) in ve_spc_clk_init()
584 init_opp_table[cluster] = true; in ve_spc_clk_init()
587 platform_device_register_simple("vexpress-spc-cpufreq", -1, NULL, 0); in ve_spc_clk_init()