1 // SPDX-License-Identifier: GPL-2.0 2 #include <linux/ftrace.h> 3 #include <linux/percpu.h> 4 #include <linux/slab.h> 5 #include <linux/uaccess.h> 6 #include <linux/pgtable.h> 7 #include <asm/alternative.h> 8 #include <asm/cacheflush.h> 9 #include <asm/cpufeature.h> 10 #include <asm/cpuidle.h> 11 #include <asm/daifflags.h> 12 #include <asm/debug-monitors.h> 13 #include <asm/exec.h> 14 #include <asm/mte.h> 15 #include <asm/memory.h> 16 #include <asm/mmu_context.h> 17 #include <asm/smp_plat.h> 18 #include <asm/suspend.h> 19 20 /* 21 * This is allocated by cpu_suspend_init(), and used to store a pointer to 22 * the 'struct sleep_stack_data' the contains a particular CPUs state. 23 */ 24 unsigned long *sleep_save_stash; 25 26 /* 27 * This hook is provided so that cpu_suspend code can restore HW 28 * breakpoints as early as possible in the resume path, before reenabling 29 * debug exceptions. Code cannot be run from a CPU PM notifier since by the 30 * time the notifier runs debug exceptions might have been enabled already, 31 * with HW breakpoints registers content still in an unknown state. 32 */ 33 static int (*hw_breakpoint_restore)(unsigned int); 34 void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int)) 35 { 36 /* Prevent multiple restore hook initializations */ 37 if (WARN_ON(hw_breakpoint_restore)) 38 return; 39 hw_breakpoint_restore = hw_bp_restore; 40 } 41 42 void notrace __cpu_suspend_exit(void) 43 { 44 unsigned int cpu = smp_processor_id(); 45 46 mte_suspend_exit(); 47 48 /* 49 * We are resuming from reset with the idmap active in TTBR0_EL1. 50 * We must uninstall the idmap and restore the expected MMU 51 * state before we can possibly return to userspace. 52 */ 53 cpu_uninstall_idmap(); 54 55 /* Restore CnP bit in TTBR1_EL1 */ 56 if (system_supports_cnp()) 57 cpu_replace_ttbr1(lm_alias(swapper_pg_dir), idmap_pg_dir); 58 59 /* 60 * PSTATE was not saved over suspend/resume, re-enable any detected 61 * features that might not have been set correctly. 62 */ 63 __uaccess_enable_hw_pan(); 64 65 /* 66 * Restore HW breakpoint registers to sane values 67 * before debug exceptions are possibly reenabled 68 * by cpu_suspend()s local_daif_restore() call. 69 */ 70 if (hw_breakpoint_restore) 71 hw_breakpoint_restore(cpu); 72 73 /* 74 * On resume, firmware implementing dynamic mitigation will 75 * have turned the mitigation on. If the user has forcefully 76 * disabled it, make sure their wishes are obeyed. 77 */ 78 spectre_v4_enable_mitigation(NULL); 79 80 /* Restore additional feature-specific configuration */ 81 ptrauth_suspend_exit(); 82 } 83 84 /* 85 * cpu_suspend 86 * 87 * arg: argument to pass to the finisher function 88 * fn: finisher function pointer 89 * 90 */ 91 int cpu_suspend(unsigned long arg, int (*fn)(unsigned long)) 92 { 93 int ret = 0; 94 unsigned long flags; 95 struct sleep_stack_data state; 96 struct arm_cpuidle_irq_context context; 97 98 /* Report any MTE async fault before going to suspend */ 99 mte_suspend_enter(); 100 101 /* 102 * From this point debug exceptions are disabled to prevent 103 * updates to mdscr register (saved and restored along with 104 * general purpose registers) from kernel debuggers. 105 */ 106 flags = local_daif_save(); 107 108 /* 109 * Function graph tracer state gets inconsistent when the kernel 110 * calls functions that never return (aka suspend finishers) hence 111 * disable graph tracing during their execution. 112 */ 113 pause_graph_tracing(); 114 115 /* 116 * Switch to using DAIF.IF instead of PMR in order to reliably 117 * resume if we're using pseudo-NMIs. 118 */ 119 arm_cpuidle_save_irq_context(&context); 120 121 if (__cpu_suspend_enter(&state)) { 122 /* Call the suspend finisher */ 123 ret = fn(arg); 124 125 /* 126 * Never gets here, unless the suspend finisher fails. 127 * Successful cpu_suspend() should return from cpu_resume(), 128 * returning through this code path is considered an error 129 * If the return value is set to 0 force ret = -EOPNOTSUPP 130 * to make sure a proper error condition is propagated 131 */ 132 if (!ret) 133 ret = -EOPNOTSUPP; 134 } else { 135 RCU_NONIDLE(__cpu_suspend_exit()); 136 } 137 138 arm_cpuidle_restore_irq_context(&context); 139 140 unpause_graph_tracing(); 141 142 /* 143 * Restore pstate flags. OS lock and mdscr have been already 144 * restored, so from this point onwards, debugging is fully 145 * reenabled if it was enabled when core started shutdown. 146 */ 147 local_daif_restore(flags); 148 149 return ret; 150 } 151 152 static int __init cpu_suspend_init(void) 153 { 154 /* ctx_ptr is an array of physical addresses */ 155 sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash), 156 GFP_KERNEL); 157 158 if (WARN_ON(!sleep_save_stash)) 159 return -ENOMEM; 160 161 return 0; 162 } 163 early_initcall(cpu_suspend_init); 164