1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * alternative runtime patching
4 * inspired by the ARM64 and x86 version
5 *
6 * Copyright (C) 2021 Sifive.
7 */
8
9 #include <linux/init.h>
10 #include <linux/module.h>
11 #include <linux/cpu.h>
12 #include <linux/uaccess.h>
13 #include <asm/alternative.h>
14 #include <asm/module.h>
15 #include <asm/sections.h>
16 #include <asm/vdso.h>
17 #include <asm/vendorid_list.h>
18 #include <asm/sbi.h>
19 #include <asm/csr.h>
20 #include <asm/insn.h>
21 #include <asm/text-patching.h>
22
23 struct cpu_manufacturer_info_t {
24 unsigned long vendor_id;
25 unsigned long arch_id;
26 unsigned long imp_id;
27 void (*patch_func)(struct alt_entry *begin, struct alt_entry *end,
28 unsigned long archid, unsigned long impid,
29 unsigned int stage);
30 };
31
riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t * cpu_mfr_info)32 static void riscv_fill_cpu_mfr_info(struct cpu_manufacturer_info_t *cpu_mfr_info)
33 {
34 #ifdef CONFIG_RISCV_M_MODE
35 cpu_mfr_info->vendor_id = csr_read(CSR_MVENDORID);
36 cpu_mfr_info->arch_id = csr_read(CSR_MARCHID);
37 cpu_mfr_info->imp_id = csr_read(CSR_MIMPID);
38 #else
39 cpu_mfr_info->vendor_id = sbi_get_mvendorid();
40 cpu_mfr_info->arch_id = sbi_get_marchid();
41 cpu_mfr_info->imp_id = sbi_get_mimpid();
42 #endif
43
44 switch (cpu_mfr_info->vendor_id) {
45 #ifdef CONFIG_ERRATA_ANDES
46 case ANDES_VENDOR_ID:
47 cpu_mfr_info->patch_func = andes_errata_patch_func;
48 break;
49 #endif
50 #ifdef CONFIG_ERRATA_MIPS
51 case MIPS_VENDOR_ID:
52 cpu_mfr_info->patch_func = mips_errata_patch_func;
53 break;
54 #endif
55 #ifdef CONFIG_ERRATA_SIFIVE
56 case SIFIVE_VENDOR_ID:
57 cpu_mfr_info->patch_func = sifive_errata_patch_func;
58 break;
59 #endif
60 #ifdef CONFIG_ERRATA_THEAD
61 case THEAD_VENDOR_ID:
62 cpu_mfr_info->patch_func = thead_errata_patch_func;
63 break;
64 #endif
65 default:
66 cpu_mfr_info->patch_func = NULL;
67 }
68 }
69
riscv_instruction_at(void * p)70 static u32 riscv_instruction_at(void *p)
71 {
72 u16 *parcel = p;
73
74 return (u32)parcel[0] | (u32)parcel[1] << 16;
75 }
76
riscv_alternative_fix_auipc_jalr(void * ptr,u32 auipc_insn,u32 jalr_insn,int patch_offset)77 static void riscv_alternative_fix_auipc_jalr(void *ptr, u32 auipc_insn,
78 u32 jalr_insn, int patch_offset)
79 {
80 u32 call[2] = { auipc_insn, jalr_insn };
81 s32 imm;
82
83 /* get and adjust new target address */
84 imm = riscv_insn_extract_utype_itype_imm(auipc_insn, jalr_insn);
85 imm -= patch_offset;
86
87 /* update instructions */
88 riscv_insn_insert_utype_itype_imm(&call[0], &call[1], imm);
89
90 /* patch the call place again */
91 patch_text_nosync(ptr, call, sizeof(u32) * 2);
92 }
93
riscv_alternative_fix_jal(void * ptr,u32 jal_insn,int patch_offset)94 static void riscv_alternative_fix_jal(void *ptr, u32 jal_insn, int patch_offset)
95 {
96 s32 imm;
97
98 /* get and adjust new target address */
99 imm = riscv_insn_extract_jtype_imm(jal_insn);
100 imm -= patch_offset;
101
102 /* update instruction */
103 riscv_insn_insert_jtype_imm(&jal_insn, imm);
104
105 /* patch the call place again */
106 patch_text_nosync(ptr, &jal_insn, sizeof(u32));
107 }
108
riscv_alternative_fix_offsets(void * alt_ptr,unsigned int len,int patch_offset)109 void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
110 int patch_offset)
111 {
112 int num_insn = len / sizeof(u32);
113 int i;
114
115 for (i = 0; i < num_insn; i++) {
116 u32 insn = riscv_instruction_at(alt_ptr + i * sizeof(u32));
117
118 /*
119 * May be the start of an auipc + jalr pair
120 * Needs to check that at least one more instruction
121 * is in the list.
122 */
123 if (riscv_insn_is_auipc(insn) && i < num_insn - 1) {
124 u32 insn2 = riscv_instruction_at(alt_ptr + (i + 1) * sizeof(u32));
125
126 if (!riscv_insn_is_jalr(insn2))
127 continue;
128
129 /* if instruction pair is a call, it will use the ra register */
130 if (RV_EXTRACT_RD_REG(insn) != 1)
131 continue;
132
133 riscv_alternative_fix_auipc_jalr(alt_ptr + i * sizeof(u32),
134 insn, insn2, patch_offset);
135 i++;
136 }
137
138 if (riscv_insn_is_jal(insn)) {
139 s32 imm = riscv_insn_extract_jtype_imm(insn);
140
141 /* Don't modify jumps inside the alternative block */
142 if ((alt_ptr + i * sizeof(u32) + imm) >= alt_ptr &&
143 (alt_ptr + i * sizeof(u32) + imm) < (alt_ptr + len))
144 continue;
145
146 riscv_alternative_fix_jal(alt_ptr + i * sizeof(u32),
147 insn, patch_offset);
148 }
149 }
150 }
151
152 /*
153 * This is called very early in the boot process (directly after we run
154 * a feature detect on the boot CPU). No need to worry about other CPUs
155 * here.
156 */
_apply_alternatives(struct alt_entry * begin,struct alt_entry * end,unsigned int stage)157 static void __init_or_module _apply_alternatives(struct alt_entry *begin,
158 struct alt_entry *end,
159 unsigned int stage)
160 {
161 struct cpu_manufacturer_info_t cpu_mfr_info;
162
163 riscv_fill_cpu_mfr_info(&cpu_mfr_info);
164
165 riscv_cpufeature_patch_func(begin, end, stage);
166
167 if (!cpu_mfr_info.patch_func)
168 return;
169
170 cpu_mfr_info.patch_func(begin, end,
171 cpu_mfr_info.arch_id,
172 cpu_mfr_info.imp_id,
173 stage);
174 }
175
176 #ifdef CONFIG_MMU
apply_vdso_alternatives(void)177 static void __init apply_vdso_alternatives(void)
178 {
179 const Elf_Ehdr *hdr;
180 const Elf_Shdr *shdr;
181 const Elf_Shdr *alt;
182 struct alt_entry *begin, *end;
183
184 hdr = (Elf_Ehdr *)vdso_start;
185 shdr = (void *)hdr + hdr->e_shoff;
186 alt = find_section(hdr, shdr, ".alternative");
187 if (!alt)
188 return;
189
190 begin = (void *)hdr + alt->sh_offset,
191 end = (void *)hdr + alt->sh_offset + alt->sh_size,
192
193 _apply_alternatives((struct alt_entry *)begin,
194 (struct alt_entry *)end,
195 RISCV_ALTERNATIVES_BOOT);
196 }
197 #else
apply_vdso_alternatives(void)198 static void __init apply_vdso_alternatives(void) { }
199 #endif
200
apply_boot_alternatives(void)201 void __init apply_boot_alternatives(void)
202 {
203 /* If called on non-boot cpu things could go wrong */
204 WARN_ON(smp_processor_id() != 0);
205
206 _apply_alternatives((struct alt_entry *)__alt_start,
207 (struct alt_entry *)__alt_end,
208 RISCV_ALTERNATIVES_BOOT);
209
210 apply_vdso_alternatives();
211 }
212
213 /*
214 * apply_early_boot_alternatives() is called from setup_vm() with MMU-off.
215 *
216 * Following requirements should be honoured for it to work correctly:
217 * 1) It should use PC-relative addressing for accessing kernel symbols.
218 * To achieve this we always use GCC cmodel=medany.
219 * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
220 * so disable compiler instrumentation when FTRACE is enabled.
221 *
222 * Currently, the above requirements are honoured by using custom CFLAGS
223 * for alternative.o in kernel/Makefile.
224 */
apply_early_boot_alternatives(void)225 void __init apply_early_boot_alternatives(void)
226 {
227 #ifdef CONFIG_RISCV_ALTERNATIVE_EARLY
228 _apply_alternatives((struct alt_entry *)__alt_start,
229 (struct alt_entry *)__alt_end,
230 RISCV_ALTERNATIVES_EARLY_BOOT);
231 #endif
232 }
233
234 #ifdef CONFIG_MODULES
apply_module_alternatives(void * start,size_t length)235 void apply_module_alternatives(void *start, size_t length)
236 {
237 _apply_alternatives((struct alt_entry *)start,
238 (struct alt_entry *)(start + length),
239 RISCV_ALTERNATIVES_MODULE);
240 }
241 #endif
242