xref: /linux/arch/riscv/kernel/suspend.c (revision 8e3ed5440b0c305dcd1d5fa7419bd8066d22ef42)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (c) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "suspend: " fmt
8 
9 #include <linux/ftrace.h>
10 #include <linux/suspend.h>
11 #include <asm/csr.h>
12 #include <asm/sbi.h>
13 #include <asm/suspend.h>
14 
15 void suspend_save_csrs(struct suspend_context *context)
16 {
17 	context->scratch = csr_read(CSR_SCRATCH);
18 	context->tvec = csr_read(CSR_TVEC);
19 	context->ie = csr_read(CSR_IE);
20 
21 	/*
22 	 * No need to save/restore IP CSR (i.e. MIP or SIP) because:
23 	 *
24 	 * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
25 	 *    external devices (such as interrupt controller, timer, etc).
26 	 * 2. For MMU (S-mode) kernel, the bits in SIP are set by
27 	 *    M-mode firmware and external devices (such as interrupt
28 	 *    controller, etc).
29 	 */
30 
31 #ifdef CONFIG_MMU
32 	context->satp = csr_read(CSR_SATP);
33 #endif
34 }
35 
36 void suspend_restore_csrs(struct suspend_context *context)
37 {
38 	csr_write(CSR_SCRATCH, context->scratch);
39 	csr_write(CSR_TVEC, context->tvec);
40 	csr_write(CSR_IE, context->ie);
41 
42 #ifdef CONFIG_MMU
43 	csr_write(CSR_SATP, context->satp);
44 #endif
45 }
46 
47 int cpu_suspend(unsigned long arg,
48 		int (*finish)(unsigned long arg,
49 			      unsigned long entry,
50 			      unsigned long context))
51 {
52 	int rc = 0;
53 	struct suspend_context context = { 0 };
54 
55 	/* Finisher should be non-NULL */
56 	if (!finish)
57 		return -EINVAL;
58 
59 	/* Save additional CSRs*/
60 	suspend_save_csrs(&context);
61 
62 	/*
63 	 * Function graph tracer state gets incosistent when the kernel
64 	 * calls functions that never return (aka finishers) hence disable
65 	 * graph tracing during their execution.
66 	 */
67 	pause_graph_tracing();
68 
69 	/* Save context on stack */
70 	if (__cpu_suspend_enter(&context)) {
71 		/* Call the finisher */
72 		rc = finish(arg, __pa_symbol(__cpu_resume_enter),
73 			    (ulong)&context);
74 
75 		/*
76 		 * Should never reach here, unless the suspend finisher
77 		 * fails. Successful cpu_suspend() should return from
78 		 * __cpu_resume_entry()
79 		 */
80 		if (!rc)
81 			rc = -EOPNOTSUPP;
82 	}
83 
84 	/* Enable function graph tracer */
85 	unpause_graph_tracing();
86 
87 	/* Restore additional CSRs */
88 	suspend_restore_csrs(&context);
89 
90 	return rc;
91 }
92 
93 #ifdef CONFIG_RISCV_SBI
94 static int sbi_system_suspend(unsigned long sleep_type,
95 			      unsigned long resume_addr,
96 			      unsigned long opaque)
97 {
98 	struct sbiret ret;
99 
100 	ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
101 			sleep_type, resume_addr, opaque, 0, 0, 0);
102 	if (ret.error)
103 		return sbi_err_map_linux_errno(ret.error);
104 
105 	return ret.value;
106 }
107 
108 static int sbi_system_suspend_enter(suspend_state_t state)
109 {
110 	return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
111 }
112 
113 static const struct platform_suspend_ops sbi_system_suspend_ops = {
114 	.valid = suspend_valid_only_mem,
115 	.enter = sbi_system_suspend_enter,
116 };
117 
118 static int __init sbi_system_suspend_init(void)
119 {
120 	if (sbi_spec_version >= sbi_mk_version(2, 0) &&
121 	    sbi_probe_extension(SBI_EXT_SUSP) > 0) {
122 		pr_info("SBI SUSP extension detected\n");
123 		if (IS_ENABLED(CONFIG_SUSPEND))
124 			suspend_set_ops(&sbi_system_suspend_ops);
125 	}
126 
127 	return 0;
128 }
129 
130 arch_initcall(sbi_system_suspend_init);
131 #endif /* CONFIG_RISCV_SBI */
132