xref: /linux/arch/riscv/kernel/suspend.c (revision ef9226cd56b718c79184a3466d32984a51cb449c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (c) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "suspend: " fmt
8 
9 #include <linux/ftrace.h>
10 #include <linux/suspend.h>
11 #include <asm/csr.h>
12 #include <asm/sbi.h>
13 #include <asm/suspend.h>
14 
15 void suspend_save_csrs(struct suspend_context *context)
16 {
17 	context->scratch = csr_read(CSR_SCRATCH);
18 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
19 		context->envcfg = csr_read(CSR_ENVCFG);
20 	context->tvec = csr_read(CSR_TVEC);
21 	context->ie = csr_read(CSR_IE);
22 
23 	/*
24 	 * No need to save/restore IP CSR (i.e. MIP or SIP) because:
25 	 *
26 	 * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
27 	 *    external devices (such as interrupt controller, timer, etc).
28 	 * 2. For MMU (S-mode) kernel, the bits in SIP are set by
29 	 *    M-mode firmware and external devices (such as interrupt
30 	 *    controller, etc).
31 	 */
32 
33 #ifdef CONFIG_MMU
34 	context->satp = csr_read(CSR_SATP);
35 #endif
36 }
37 
38 void suspend_restore_csrs(struct suspend_context *context)
39 {
40 	csr_write(CSR_SCRATCH, context->scratch);
41 	if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
42 		csr_write(CSR_ENVCFG, context->envcfg);
43 	csr_write(CSR_TVEC, context->tvec);
44 	csr_write(CSR_IE, context->ie);
45 
46 #ifdef CONFIG_MMU
47 	csr_write(CSR_SATP, context->satp);
48 #endif
49 }
50 
51 int cpu_suspend(unsigned long arg,
52 		int (*finish)(unsigned long arg,
53 			      unsigned long entry,
54 			      unsigned long context))
55 {
56 	int rc = 0;
57 	struct suspend_context context = { 0 };
58 
59 	/* Finisher should be non-NULL */
60 	if (!finish)
61 		return -EINVAL;
62 
63 	/* Save additional CSRs*/
64 	suspend_save_csrs(&context);
65 
66 	/*
67 	 * Function graph tracer state gets incosistent when the kernel
68 	 * calls functions that never return (aka finishers) hence disable
69 	 * graph tracing during their execution.
70 	 */
71 	pause_graph_tracing();
72 
73 	/* Save context on stack */
74 	if (__cpu_suspend_enter(&context)) {
75 		/* Call the finisher */
76 		rc = finish(arg, __pa_symbol(__cpu_resume_enter),
77 			    (ulong)&context);
78 
79 		/*
80 		 * Should never reach here, unless the suspend finisher
81 		 * fails. Successful cpu_suspend() should return from
82 		 * __cpu_resume_entry()
83 		 */
84 		if (!rc)
85 			rc = -EOPNOTSUPP;
86 	}
87 
88 	/* Enable function graph tracer */
89 	unpause_graph_tracing();
90 
91 	/* Restore additional CSRs */
92 	suspend_restore_csrs(&context);
93 
94 	return rc;
95 }
96 
97 #ifdef CONFIG_RISCV_SBI
98 static int sbi_system_suspend(unsigned long sleep_type,
99 			      unsigned long resume_addr,
100 			      unsigned long opaque)
101 {
102 	struct sbiret ret;
103 
104 	ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
105 			sleep_type, resume_addr, opaque, 0, 0, 0);
106 	if (ret.error)
107 		return sbi_err_map_linux_errno(ret.error);
108 
109 	return ret.value;
110 }
111 
112 static int sbi_system_suspend_enter(suspend_state_t state)
113 {
114 	return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
115 }
116 
117 static const struct platform_suspend_ops sbi_system_suspend_ops = {
118 	.valid = suspend_valid_only_mem,
119 	.enter = sbi_system_suspend_enter,
120 };
121 
122 static int __init sbi_system_suspend_init(void)
123 {
124 	if (sbi_spec_version >= sbi_mk_version(2, 0) &&
125 	    sbi_probe_extension(SBI_EXT_SUSP) > 0) {
126 		pr_info("SBI SUSP extension detected\n");
127 		if (IS_ENABLED(CONFIG_SUSPEND))
128 			suspend_set_ops(&sbi_system_suspend_ops);
129 	}
130 
131 	return 0;
132 }
133 
134 arch_initcall(sbi_system_suspend_init);
135 
136 static int sbi_suspend_finisher(unsigned long suspend_type,
137 				unsigned long resume_addr,
138 				unsigned long opaque)
139 {
140 	struct sbiret ret;
141 
142 	ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
143 			suspend_type, resume_addr, opaque, 0, 0, 0);
144 
145 	return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
146 }
147 
148 int riscv_sbi_hart_suspend(u32 state)
149 {
150 	if (state & SBI_HSM_SUSP_NON_RET_BIT)
151 		return cpu_suspend(state, sbi_suspend_finisher);
152 	else
153 		return sbi_suspend_finisher(state, 0, 0);
154 }
155 
156 bool riscv_sbi_suspend_state_is_valid(u32 state)
157 {
158 	if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
159 	    state < SBI_HSM_SUSPEND_RET_PLATFORM)
160 		return false;
161 
162 	if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
163 	    state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
164 		return false;
165 
166 	return true;
167 }
168 
169 bool riscv_sbi_hsm_is_supported(void)
170 {
171 	/*
172 	 * The SBI HSM suspend function is only available when:
173 	 * 1) SBI version is 0.3 or higher
174 	 * 2) SBI HSM extension is available
175 	 */
176 	if (sbi_spec_version < sbi_mk_version(0, 3) ||
177 	    !sbi_probe_extension(SBI_EXT_HSM)) {
178 		pr_info("HSM suspend not available\n");
179 		return false;
180 	}
181 
182 	return true;
183 }
184 #endif /* CONFIG_RISCV_SBI */
185