1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (c) 2022 Ventana Micro Systems Inc.
5 */
6
7 #define pr_fmt(fmt) "suspend: " fmt
8
9 #include <linux/ftrace.h>
10 #include <linux/suspend.h>
11 #include <asm/csr.h>
12 #include <asm/sbi.h>
13 #include <asm/suspend.h>
14
suspend_save_csrs(struct suspend_context * context)15 void suspend_save_csrs(struct suspend_context *context)
16 {
17 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG))
18 context->envcfg = csr_read(CSR_ENVCFG);
19 context->tvec = csr_read(CSR_TVEC);
20 context->ie = csr_read(CSR_IE);
21
22 /*
23 * No need to save/restore IP CSR (i.e. MIP or SIP) because:
24 *
25 * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
26 * external devices (such as interrupt controller, timer, etc).
27 * 2. For MMU (S-mode) kernel, the bits in SIP are set by
28 * M-mode firmware and external devices (such as interrupt
29 * controller, etc).
30 */
31
32 #ifdef CONFIG_MMU
33 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
34 context->stimecmp = csr_read(CSR_STIMECMP);
35 #if __riscv_xlen < 64
36 context->stimecmph = csr_read(CSR_STIMECMPH);
37 #endif
38 }
39
40 context->satp = csr_read(CSR_SATP);
41 #endif
42 }
43
suspend_restore_csrs(struct suspend_context * context)44 void suspend_restore_csrs(struct suspend_context *context)
45 {
46 csr_write(CSR_SCRATCH, 0);
47 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_XLINUXENVCFG))
48 csr_write(CSR_ENVCFG, context->envcfg);
49 csr_write(CSR_TVEC, context->tvec);
50 csr_write(CSR_IE, context->ie);
51
52 #ifdef CONFIG_MMU
53 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SSTC)) {
54 #if __riscv_xlen < 64
55 csr_write(CSR_STIMECMP, ULONG_MAX);
56 csr_write(CSR_STIMECMPH, context->stimecmph);
57 #endif
58 csr_write(CSR_STIMECMP, context->stimecmp);
59 }
60
61 csr_write(CSR_SATP, context->satp);
62 #endif
63 }
64
cpu_suspend(unsigned long arg,int (* finish)(unsigned long arg,unsigned long entry,unsigned long context))65 int cpu_suspend(unsigned long arg,
66 int (*finish)(unsigned long arg,
67 unsigned long entry,
68 unsigned long context))
69 {
70 int rc = 0;
71 struct suspend_context context = { 0 };
72
73 /* Finisher should be non-NULL */
74 if (!finish)
75 return -EINVAL;
76
77 /* Save additional CSRs*/
78 suspend_save_csrs(&context);
79
80 /*
81 * Function graph tracer state gets incosistent when the kernel
82 * calls functions that never return (aka finishers) hence disable
83 * graph tracing during their execution.
84 */
85 pause_graph_tracing();
86
87 /* Save context on stack */
88 if (__cpu_suspend_enter(&context)) {
89 /* Call the finisher */
90 rc = finish(arg, __pa_symbol(__cpu_resume_enter),
91 (ulong)&context);
92
93 /*
94 * Should never reach here, unless the suspend finisher
95 * fails. Successful cpu_suspend() should return from
96 * __cpu_resume_entry()
97 */
98 if (!rc)
99 rc = -EOPNOTSUPP;
100 }
101
102 /* Enable function graph tracer */
103 unpause_graph_tracing();
104
105 /* Restore additional CSRs */
106 suspend_restore_csrs(&context);
107
108 return rc;
109 }
110
111 #ifdef CONFIG_RISCV_SBI
sbi_system_suspend(unsigned long sleep_type,unsigned long resume_addr,unsigned long opaque)112 static int sbi_system_suspend(unsigned long sleep_type,
113 unsigned long resume_addr,
114 unsigned long opaque)
115 {
116 struct sbiret ret;
117
118 ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
119 sleep_type, resume_addr, opaque, 0, 0, 0);
120 if (ret.error)
121 return sbi_err_map_linux_errno(ret.error);
122
123 return ret.value;
124 }
125
sbi_system_suspend_enter(suspend_state_t state)126 static int sbi_system_suspend_enter(suspend_state_t state)
127 {
128 return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
129 }
130
131 static const struct platform_suspend_ops sbi_system_suspend_ops = {
132 .valid = suspend_valid_only_mem,
133 .enter = sbi_system_suspend_enter,
134 };
135
sbi_system_suspend_init(void)136 static int __init sbi_system_suspend_init(void)
137 {
138 if (sbi_spec_version >= sbi_mk_version(2, 0) &&
139 sbi_probe_extension(SBI_EXT_SUSP) > 0) {
140 pr_info("SBI SUSP extension detected\n");
141 if (IS_ENABLED(CONFIG_SUSPEND))
142 suspend_set_ops(&sbi_system_suspend_ops);
143 }
144
145 return 0;
146 }
147
148 arch_initcall(sbi_system_suspend_init);
149
sbi_suspend_finisher(unsigned long suspend_type,unsigned long resume_addr,unsigned long opaque)150 static int sbi_suspend_finisher(unsigned long suspend_type,
151 unsigned long resume_addr,
152 unsigned long opaque)
153 {
154 struct sbiret ret;
155
156 ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
157 suspend_type, resume_addr, opaque, 0, 0, 0);
158
159 return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
160 }
161
riscv_sbi_hart_suspend(u32 state)162 int riscv_sbi_hart_suspend(u32 state)
163 {
164 if (state & SBI_HSM_SUSP_NON_RET_BIT)
165 return cpu_suspend(state, sbi_suspend_finisher);
166 else
167 return sbi_suspend_finisher(state, 0, 0);
168 }
169
riscv_sbi_suspend_state_is_valid(u32 state)170 bool riscv_sbi_suspend_state_is_valid(u32 state)
171 {
172 if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
173 state < SBI_HSM_SUSPEND_RET_PLATFORM)
174 return false;
175
176 if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
177 state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
178 return false;
179
180 return true;
181 }
182
riscv_sbi_hsm_is_supported(void)183 bool riscv_sbi_hsm_is_supported(void)
184 {
185 /*
186 * The SBI HSM suspend function is only available when:
187 * 1) SBI version is 0.3 or higher
188 * 2) SBI HSM extension is available
189 */
190 if (sbi_spec_version < sbi_mk_version(0, 3) ||
191 !sbi_probe_extension(SBI_EXT_HSM)) {
192 pr_info("HSM suspend not available\n");
193 return false;
194 }
195
196 return true;
197 }
198 #endif /* CONFIG_RISCV_SBI */
199