xref: /linux/arch/riscv/kernel/alternative.c (revision e021ae7f5145d46ab64cb058cbffda31059f37e5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * alternative runtime patching
4  * inspired by the ARM64 and x86 version
5  *
6  * Copyright (C) 2021 Sifive.
7  */
8 
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/module.h>
15 #include <asm/sections.h>
16 #include <asm/vdso.h>
17 #include <asm/vendorid_list.h>
18 #include <asm/sbi.h>
19 #include <asm/csr.h>
20 #include <asm/insn.h>
21 #include <asm/patch.h>
22 
23 struct cpu_manufacturer_info_t {
24 	unsigned long vendor_id;
25 	unsigned long arch_id;
26 	unsigned long imp_id;
27 	void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
28 				  unsigned long archid, unsigned long impid,
29 				  unsigned int stage);
30 	void (*feature_probe_func)(unsigned int cpu, unsigned long archid,
31 				   unsigned long impid);
32 };
33 
34 static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
35 {
36 #ifdef CONFIG_RISCV_M_MODE
37 	cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
38 	cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
39 	cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
40 #else
41 	cpu_mfr_info->vendor_id = sbi_get_mvendorid();
42 	cpu_mfr_info->arch_id = sbi_get_marchid();
43 	cpu_mfr_info->imp_id = sbi_get_mimpid();
44 #endif
45 
46 	cpu_mfr_info->feature_probe_func = NULL;
47 	switch (cpu_mfr_info->vendor_id) {
48 #ifdef CONFIG_ERRATA_ANDES
49 	case ANDESTECH_VENDOR_ID:
50 		cpu_mfr_info->patch_func = andes_errata_patch_func;
51 		break;
52 #endif
53 #ifdef CONFIG_ERRATA_SIFIVE
54 	case SIFIVE_VENDOR_ID:
55 		cpu_mfr_info->patch_func = sifive_errata_patch_func;
56 		break;
57 #endif
58 #ifdef CONFIG_ERRATA_THEAD
59 	case THEAD_VENDOR_ID:
60 		cpu_mfr_info->patch_func = thead_errata_patch_func;
61 		cpu_mfr_info->feature_probe_func = thead_feature_probe_func;
62 		break;
63 #endif
64 	default:
65 		cpu_mfr_info->patch_func = NULL;
66 	}
67 }
68 
69 static u32 riscv_instruction_at(void *p)
70 {
71 	u16 *parcel = p;
72 
73 	return (u32)parcel[0] | (u32)parcel[1] << 16;
74 }
75 
76 static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
77 					     u32 jalr_insn, int patch_offset)
78 {
79 	u32 call[2] = { auipc_insn, jalr_insn };
80 	s32 imm;
81 
82 	/* get and adjust new target address */
83 	imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn);
84 	imm -= patch_offset;
85 
86 	/* update instructions */
87 	riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
88 
89 	/* patch the call place again */
90 	patch_text_nosync(ptr, call, sizeof(u32) * 2);
91 }
92 
93 static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset)
94 {
95 	s32 imm;
96 
97 	/* get and adjust new target address */
98 	imm = riscv_insn_extract_jtype_imm(jal_insn);
99 	imm -= patch_offset;
100 
101 	/* update instruction */
102 	riscv_insn_insert_jtype_imm(&jal_insn, imm);
103 
104 	/* patch the call place again */
105 	patch_text_nosync(ptr, &jal_insn, sizeof(u32));
106 }
107 
108 void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
109 				      int patch_offset)
110 {
111 	int num_insn = len / sizeof(u32);
112 	int i;
113 
114 	for (i = 0; i < num_insn; i++) {
115 		u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32));
116 
117 		/*
118 		 * May be the start of an auipc + jalr pair
119 		 * Needs to check that at least one more instruction
120 		 * is in the list.
121 		 */
122 		if (riscv_insn_is_auipc(insn) && i < num_insn - 1) {
123 			u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32));
124 
125 			if (!riscv_insn_is_jalr(insn2))
126 				continue;
127 
128 			/* if instruction pair is a call, it will use the ra register */
129 			if (RV_EXTRACT_RD_REG(insn) != 1)
130 				continue;
131 
132 			riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32),
133 							 insn, insn2, patch_offset);
134 			i++;
135 		}
136 
137 		if (riscv_insn_is_jal(insn)) {
138 			s32 imm = riscv_insn_extract_jtype_imm(insn);
139 
140 			/* Don't modify jumps inside the alternative block */
141 			if ((alt_ptr + i * sizeof(u32) + imm) >= alt_ptr &&
142 			    (alt_ptr + i * sizeof(u32) + imm) < (alt_ptr + len))
143 				continue;
144 
145 			riscv_alternative_fix_jal(alt_ptr + i * sizeof(u32),
146 						  insn, patch_offset);
147 		}
148 	}
149 }
150 
151 /* Called on each CPU as it starts */
152 void probe_vendor_features(unsigned int cpu)
153 {
154 	struct cpu_manufacturer_info_t cpu_mfr_info;
155 
156 	riscv_fill_cpu_mfr_info(&cpu_mfr_info);
157 	if (!cpu_mfr_info.feature_probe_func)
158 		return;
159 
160 	cpu_mfr_info.feature_probe_func(cpu,
161 					cpu_mfr_info.arch_id,
162 					cpu_mfr_info.imp_id);
163 }
164 
165 /*
166  * This is called very early in the boot process (directly after we run
167  * a feature detect on the boot CPU). No need to worry about other CPUs
168  * here.
169  */
170 static void __init_or_module _apply_alternatives(struct alt_entry *begin,
171 						 struct alt_entry *end,
172 						 unsigned int stage)
173 {
174 	struct cpu_manufacturer_info_t cpu_mfr_info;
175 
176 	riscv_fill_cpu_mfr_info(&cpu_mfr_info);
177 
178 	riscv_cpufeature_patch_func(begin, end, stage);
179 
180 	if (!cpu_mfr_info.patch_func)
181 		return;
182 
183 	cpu_mfr_info.patch_func(begin, end,
184 				cpu_mfr_info.arch_id,
185 				cpu_mfr_info.imp_id,
186 				stage);
187 }
188 
189 #ifdef CONFIG_MMU
190 static void __init apply_vdso_alternatives(void)
191 {
192 	const Elf_Ehdr *hdr;
193 	const Elf_Shdr *shdr;
194 	const Elf_Shdr *alt;
195 	struct alt_entry *begin, *end;
196 
197 	hdr = (Elf_Ehdr *)vdso_start;
198 	shdr = (void *)hdr + hdr->e_shoff;
199 	alt = find_section(hdr, shdr, ".alternative");
200 	if (!alt)
201 		return;
202 
203 	begin = (void *)hdr + alt->sh_offset,
204 	end = (void *)hdr + alt->sh_offset + alt->sh_size,
205 
206 	_apply_alternatives((struct alt_entry *)begin,
207 			    (struct alt_entry *)end,
208 			    RISCV_ALTERNATIVES_BOOT);
209 }
210 #else
211 static void __init apply_vdso_alternatives(void) { }
212 #endif
213 
214 void __init apply_boot_alternatives(void)
215 {
216 	/* If called on non-boot cpu things could go wrong */
217 	WARN_ON(smp_processor_id() != 0);
218 
219 	probe_vendor_features(0);
220 	_apply_alternatives((struct alt_entry *)__alt_start,
221 			    (struct alt_entry *)__alt_end,
222 			    RISCV_ALTERNATIVES_BOOT);
223 
224 	apply_vdso_alternatives();
225 }
226 
227 /*
228  * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
229  *
230  * Following requirements should be honoured for it to work correctly:
231  * 1) It should use PC-relative addressing for accessing kernel symbols.
232  *    To achieve this we always use GCC cmodel=medany.
233  * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
234  *    so disable compiler instrumentation when FTRACE is enabled.
235  *
236  * Currently, the above requirements are honoured by using custom CFLAGS
237  * for alternative.o in kernel/Makefile.
238  */
239 void __init apply_early_boot_alternatives(void)
240 {
241 #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
242 	_apply_alternatives((struct alt_entry *)__alt_start,
243 			    (struct alt_entry *)__alt_end,
244 			    RISCV_ALTERNATIVES_EARLY_BOOT);
245 #endif
246 }
247 
248 #ifdef CONFIG_MODULES
249 void apply_module_alternatives(void *start, size_t length)
250 {
251 	_apply_alternatives((struct alt_entry *)start,
252 			    (struct alt_entry *)(start + length),
253 			    RISCV_ALTERNATIVES_MODULE);
254 }
255 #endif
256