Lines Matching +full:p +full:- +full:states

1 // SPDX-License-Identifier: GPL-2.0-or-later
13 #include <asm/asm-offsets.h>
17 #include <asm/mips-cps.h>
20 #include <asm/pm-cps.h>
22 #include <asm/smp-cps.h>
26 * cps_nc_entry_fn - type of a generated non-coherent state entry function
28 * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
30 * The code entering & exiting non-coherent states is generated at runtime
33 * core-specific code particularly for cache routines. If coupled_coherence
34 * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
42 * The entry point of the generated non-coherent idle state entry/exit
43 * functions. Actually per-core rather than per-CPU.
48 /* Bitmap indicating which states are supported by the system */
52 * Indicates the number of coupled VPEs ready to operate in a non-coherent
53 * state. Actually per-core rather than per-CPU.
61 * Used to synchronize entry to deep idle states. Actually per-core rather
62 * than per-CPU.
119 return -EINVAL; in cps_pm_enter_state()
139 return -EINVAL; in cps_pm_enter_state()
142 vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)]; in cps_pm_enter_state()
143 vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; in cps_pm_enter_state()
144 vpe_cfg->gp = (unsigned long)current_thread_info(); in cps_pm_enter_state()
145 vpe_cfg->sp = 0; in cps_pm_enter_state()
152 /* Create a non-coherent mapping of the core ready_count */ in cps_pm_enter_state()
159 /* Ensure ready_count is zero-initialised before the assembly runs */ in cps_pm_enter_state()
166 /* Remove the non-coherent mapping of ready_count */ in cps_pm_enter_state()
173 * If this VPE is the first to leave the non-coherent wait state then in cps_pm_enter_state()
191 unsigned cache_size = cache->ways << cache->waybit; in cps_gen_cache_routine()
196 if (cache->flags & MIPS_CACHE_NOT_PRESENT) in cps_gen_cache_routine()
215 uasm_i_addiu(pp, GPR_T0, GPR_T0, cache->linesz); in cps_gen_cache_routine()
217 uasm_i_cache(pp, op, i * cache->linesz, GPR_T0); in cps_gen_cache_routine()
223 uasm_i_addiu(pp, GPR_T0, GPR_T0, unroll_lines * cache->linesz); in cps_gen_cache_routine()
238 unsigned line_size = cpu_info->dcache.linesz; in cps_gen_flush_fsb()
240 unsigned revision = cpu_info->processor_id & PRID_REV_MASK; in cps_gen_flush_fsb()
246 switch (__get_cpu_type(cpu_info->cputype)) { in cps_gen_flush_fsb()
258 return -1; in cps_gen_flush_fsb()
293 * Invalidate the new D-cache entries so that the cache will need in cps_gen_flush_fsb()
340 u32 *buf, *p; in cps_gen_entry_code() local
362 p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); in cps_gen_entry_code()
376 * Save CPU state. Note the non-standard calling convention in cps_gen_entry_code()
380 UASM_i_LA(&p, GPR_T0, (long)mips_cps_pm_save); in cps_gen_entry_code()
381 uasm_i_jalr(&p, GPR_V0, GPR_T0); in cps_gen_entry_code()
382 uasm_i_nop(&p); in cps_gen_entry_code()
390 UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); in cps_gen_entry_code()
394 uasm_i_sync(&p, __SYNC_mb); in cps_gen_entry_code()
395 uasm_build_label(&l, p, lbl_incready); in cps_gen_entry_code()
396 uasm_i_ll(&p, GPR_T1, 0, r_nc_count); in cps_gen_entry_code()
397 uasm_i_addiu(&p, GPR_T2, GPR_T1, 1); in cps_gen_entry_code()
398 uasm_i_sc(&p, GPR_T2, 0, r_nc_count); in cps_gen_entry_code()
399 uasm_il_beqz(&p, &r, GPR_T2, lbl_incready); in cps_gen_entry_code()
400 uasm_i_addiu(&p, GPR_T1, GPR_T1, 1); in cps_gen_entry_code()
403 uasm_i_sync(&p, __SYNC_mb); in cps_gen_entry_code()
406 * If this is the last VPE to become ready for non-coherence in cps_gen_entry_code()
409 uasm_il_beq(&p, &r, GPR_T1, r_online, lbl_disable_coherence); in cps_gen_entry_code()
410 uasm_i_nop(&p); in cps_gen_entry_code()
415 * for non-coherence. It needs to wait until coherence in cps_gen_entry_code()
419 uasm_i_addiu(&p, GPR_T1, GPR_ZERO, -1); in cps_gen_entry_code()
420 uasm_build_label(&l, p, lbl_poll_cont); in cps_gen_entry_code()
421 uasm_i_lw(&p, GPR_T0, 0, r_nc_count); in cps_gen_entry_code()
422 uasm_il_bltz(&p, &r, GPR_T0, lbl_secondary_cont); in cps_gen_entry_code()
423 uasm_i_ehb(&p); in cps_gen_entry_code()
425 uasm_i_yield(&p, GPR_ZERO, GPR_T1); in cps_gen_entry_code()
426 uasm_il_b(&p, &r, lbl_poll_cont); in cps_gen_entry_code()
427 uasm_i_nop(&p); in cps_gen_entry_code()
435 uasm_i_addiu(&p, GPR_T0, GPR_ZERO, TCHALT_H); in cps_gen_entry_code()
436 uasm_i_mtc0(&p, GPR_T0, 2, 4); in cps_gen_entry_code()
442 uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << vpe_id); in cps_gen_entry_code()
443 UASM_i_LA(&p, GPR_T1, (long)addr_cpc_cl_vp_stop()); in cps_gen_entry_code()
444 uasm_i_sw(&p, GPR_T0, 0, GPR_T1); in cps_gen_entry_code()
448 uasm_build_label(&l, p, lbl_secondary_hang); in cps_gen_entry_code()
449 uasm_il_b(&p, &r, lbl_secondary_hang); in cps_gen_entry_code()
450 uasm_i_nop(&p); in cps_gen_entry_code()
455 * This is the point of no return - this VPE will now proceed to in cps_gen_entry_code()
459 uasm_build_label(&l, p, lbl_disable_coherence); in cps_gen_entry_code()
462 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, in cps_gen_entry_code()
466 cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, in cps_gen_entry_code()
470 uasm_i_sync(&p, __SYNC_full); in cps_gen_entry_code()
471 uasm_i_ehb(&p); in cps_gen_entry_code()
479 uasm_i_addiu(&p, GPR_T0, GPR_ZERO, 1 << cpu_core(&cpu_data[cpu])); in cps_gen_entry_code()
480 uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); in cps_gen_entry_code()
481 uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); in cps_gen_entry_code()
484 uasm_i_sync(&p, __SYNC_full); in cps_gen_entry_code()
485 uasm_i_ehb(&p); in cps_gen_entry_code()
489 uasm_i_sw(&p, GPR_ZERO, 0, r_pcohctl); in cps_gen_entry_code()
490 uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); in cps_gen_entry_code()
493 err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], in cps_gen_entry_code()
512 UASM_i_LA(&p, GPR_T0, (long)addr_cpc_cl_cmd()); in cps_gen_entry_code()
513 uasm_i_addiu(&p, GPR_T1, GPR_ZERO, cpc_cmd); in cps_gen_entry_code()
514 uasm_i_sw(&p, GPR_T1, 0, GPR_T0); in cps_gen_entry_code()
518 uasm_build_label(&l, p, lbl_hang); in cps_gen_entry_code()
519 uasm_il_b(&p, &r, lbl_hang); in cps_gen_entry_code()
520 uasm_i_nop(&p); in cps_gen_entry_code()
531 uasm_i_sync(&p, __SYNC_full); in cps_gen_entry_code()
532 uasm_i_ehb(&p); in cps_gen_entry_code()
542 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, in cps_gen_entry_code()
550 uasm_build_label(&l, p, lbl_secondary_cont); in cps_gen_entry_code()
553 uasm_i_wait(&p, 0); in cps_gen_entry_code()
557 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs in cps_gen_entry_code()
558 * will run this. The first will actually re-enable coherence & the in cps_gen_entry_code()
561 uasm_i_addiu(&p, GPR_T0, GPR_ZERO, mips_cm_revision() < CM_REV_CM3 in cps_gen_entry_code()
565 uasm_i_sw(&p, GPR_T0, 0, r_pcohctl); in cps_gen_entry_code()
566 uasm_i_lw(&p, GPR_T0, 0, r_pcohctl); in cps_gen_entry_code()
569 uasm_i_sync(&p, __SYNC_full); in cps_gen_entry_code()
570 uasm_i_ehb(&p); in cps_gen_entry_code()
574 uasm_build_label(&l, p, lbl_decready); in cps_gen_entry_code()
575 uasm_i_sync(&p, __SYNC_mb); in cps_gen_entry_code()
576 uasm_i_ll(&p, GPR_T1, 0, r_nc_count); in cps_gen_entry_code()
577 uasm_i_addiu(&p, GPR_T2, GPR_T1, -1); in cps_gen_entry_code()
578 uasm_i_sc(&p, GPR_T2, 0, r_nc_count); in cps_gen_entry_code()
579 uasm_il_beqz(&p, &r, GPR_T2, lbl_decready); in cps_gen_entry_code()
580 uasm_i_andi(&p, GPR_V0, GPR_T1, (1 << fls(smp_num_siblings)) - 1); in cps_gen_entry_code()
583 uasm_i_sync(&p, __SYNC_mb); in cps_gen_entry_code()
592 cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); in cps_gen_entry_code()
596 * power-up command to the CPC in order to resume operation. in cps_gen_entry_code()
599 * be the one to re-enable it. The rest will continue from here in cps_gen_entry_code()
602 uasm_build_label(&l, p, lbl_secondary_cont); in cps_gen_entry_code()
605 uasm_i_sync(&p, __SYNC_mb); in cps_gen_entry_code()
609 uasm_i_jr(&p, GPR_RA); in cps_gen_entry_code()
610 uasm_i_nop(&p); in cps_gen_entry_code()
614 BUG_ON((p - buf) > max_instrs); in cps_gen_entry_code()
615 BUG_ON((l - labels) > ARRAY_SIZE(labels)); in cps_gen_entry_code()
616 BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); in cps_gen_entry_code()
622 local_flush_icache_range((unsigned long)buf, (unsigned long)p); in cps_gen_entry_code()
656 return -ENOMEM; in cps_pm_online_cpu()
675 * instead put the cores into clock-off state. In this state in cps_pm_power_notifier()
683 pr_warn("JTAG probe is connected - abort suspend\n"); in cps_pm_power_notifier()
694 /* A CM is required for all non-coherent states */ in cps_pm_init()
696 pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); in cps_pm_init()
702 * non-coherent core then the VPE may end up processing interrupts in cps_pm_init()
703 * whilst non-coherent. That would be bad. in cps_pm_init()
708 pr_warn("pm-cps: non-coherent wait unavailable\n"); in cps_pm_init()
716 pr_warn("pm-cps: CPC does not support clock gating\n"); in cps_pm_init()
722 pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); in cps_pm_init()
724 pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); in cps_pm_init()