1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/export.h>
3 #include <linux/mm.h>
4 #include <linux/module.h>
5 #include <asm/alternative.h>
6 #include <asm/cacheflush.h>
7 #include <asm/inst.h>
8 #include <asm/sections.h>
9
10 int __read_mostly alternatives_patched;
11
12 EXPORT_SYMBOL_GPL(alternatives_patched);
13
14 #define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
15
16 static int __initdata_or_module debug_alternative;
17
debug_alt(char * str)18 static int __init debug_alt(char *str)
19 {
20 debug_alternative = 1;
21 return 1;
22 }
23 __setup("debug-alternative", debug_alt);
24
25 #define DPRINTK(fmt, args...) \
26 do { \
27 if (debug_alternative) \
28 printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
29 } while (0)
30
31 #define DUMP_WORDS(buf, count, fmt, args...) \
32 do { \
33 if (unlikely(debug_alternative)) { \
34 int _j; \
35 union loongarch_instruction *_buf = buf; \
36 \
37 if (!(count)) \
38 break; \
39 \
40 printk(KERN_DEBUG fmt, ##args); \
41 for (_j = 0; _j < count - 1; _j++) \
42 printk(KERN_CONT "<%08x> ", _buf[_j].word); \
43 printk(KERN_CONT "<%08x>\n", _buf[_j].word); \
44 } \
45 } while (0)
46
47 /* Use this to add nops to a buffer, then text_poke the whole buffer. */
add_nops(union loongarch_instruction * insn,int count)48 static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
49 {
50 while (count--) {
51 insn->word = INSN_NOP;
52 insn++;
53 }
54 }
55
56 /* Is the jump addr in local .altinstructions */
in_alt_jump(unsigned long jump,void * start,void * end)57 static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
58 {
59 return jump >= (unsigned long)start && jump < (unsigned long)end;
60 }
61
recompute_jump(union loongarch_instruction * buf,union loongarch_instruction * dest,union loongarch_instruction * src,void * start,void * end)62 static void __init_or_module recompute_jump(union loongarch_instruction *buf,
63 union loongarch_instruction *dest, union loongarch_instruction *src,
64 void *start, void *end)
65 {
66 unsigned int si, si_l, si_h;
67 unsigned long cur_pc, jump_addr, pc;
68 long offset;
69
70 cur_pc = (unsigned long)src;
71 pc = (unsigned long)dest;
72
73 si_l = src->reg0i26_format.immediate_l;
74 si_h = src->reg0i26_format.immediate_h;
75 switch (src->reg0i26_format.opcode) {
76 case b_op:
77 case bl_op:
78 jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
79 if (in_alt_jump(jump_addr, start, end))
80 return;
81 offset = jump_addr - pc;
82 BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
83 offset >>= 2;
84 buf->reg0i26_format.immediate_h = offset >> 16;
85 buf->reg0i26_format.immediate_l = offset;
86 return;
87 }
88
89 si_l = src->reg1i21_format.immediate_l;
90 si_h = src->reg1i21_format.immediate_h;
91 switch (src->reg1i21_format.opcode) {
92 case bceqz_op: /* bceqz_op = bcnez_op */
93 BUG_ON(buf->reg1i21_format.rj & BIT(4));
94 fallthrough;
95 case beqz_op:
96 case bnez_op:
97 jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
98 if (in_alt_jump(jump_addr, start, end))
99 return;
100 offset = jump_addr - pc;
101 BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
102 offset >>= 2;
103 buf->reg1i21_format.immediate_h = offset >> 16;
104 buf->reg1i21_format.immediate_l = offset;
105 return;
106 }
107
108 si = src->reg2i16_format.immediate;
109 switch (src->reg2i16_format.opcode) {
110 case beq_op:
111 case bne_op:
112 case blt_op:
113 case bge_op:
114 case bltu_op:
115 case bgeu_op:
116 jump_addr = cur_pc + sign_extend64(si << 2, 17);
117 if (in_alt_jump(jump_addr, start, end))
118 return;
119 offset = jump_addr - pc;
120 BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
121 offset >>= 2;
122 buf->reg2i16_format.immediate = offset;
123 return;
124 }
125 }
126
copy_alt_insns(union loongarch_instruction * buf,union loongarch_instruction * dest,union loongarch_instruction * src,int nr)127 static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
128 union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
129 {
130 int i;
131
132 for (i = 0; i < nr; i++) {
133 buf[i].word = src[i].word;
134
135 if (is_pc_ins(&src[i])) {
136 pr_err("Not support pcrel instruction at present!");
137 return -EINVAL;
138 }
139
140 if (is_branch_ins(&src[i]) &&
141 src[i].reg2i16_format.opcode != jirl_op) {
142 recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
143 }
144 }
145
146 return 0;
147 }
148
149 /*
150 * text_poke_early - Update instructions on a live kernel at boot time
151 *
152 * When you use this code to patch more than one byte of an instruction
153 * you need to make sure that other CPUs cannot execute this code in parallel.
154 * Also no thread must be currently preempted in the middle of these
155 * instructions. And on the local CPU you need to be protected again NMI or MCE
156 * handlers seeing an inconsistent instruction while you patch.
157 */
text_poke_early(union loongarch_instruction * insn,union loongarch_instruction * buf,unsigned int nr)158 static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
159 union loongarch_instruction *buf, unsigned int nr)
160 {
161 int i;
162 unsigned long flags;
163
164 local_irq_save(flags);
165
166 for (i = 0; i < nr; i++)
167 insn[i].word = buf[i].word;
168
169 local_irq_restore(flags);
170
171 wbflush();
172 flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
173
174 return insn;
175 }
176
177 /*
178 * Replace instructions with better alternatives for this CPU type. This runs
179 * before SMP is initialized to avoid SMP problems with self modifying code.
180 * This implies that asymmetric systems where APs have less capabilities than
181 * the boot processor are not handled. Tough. Make sure you disable such
182 * features by hand.
183 */
apply_alternatives(struct alt_instr * start,struct alt_instr * end)184 void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
185 {
186 struct alt_instr *a;
187 unsigned int nr_instr, nr_repl, nr_insnbuf;
188 union loongarch_instruction *instr, *replacement;
189 union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
190
191 DPRINTK("alt table %px, -> %px", start, end);
192 /*
193 * The scan order should be from start to end. A later scanned
194 * alternative code can overwrite previously scanned alternative code.
195 * Some kernel functions (e.g. memcpy, memset, etc) use this order to
196 * patch code.
197 *
198 * So be careful if you want to change the scan order to any other
199 * order.
200 */
201 for (a = start; a < end; a++) {
202 nr_insnbuf = 0;
203
204 instr = (void *)&a->instr_offset + a->instr_offset;
205 replacement = (void *)&a->replace_offset + a->replace_offset;
206
207 BUG_ON(a->instrlen > sizeof(insnbuf));
208 BUG_ON(a->instrlen & 0x3);
209 BUG_ON(a->replacementlen & 0x3);
210
211 nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
212 nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
213
214 if (!cpu_has(a->feature)) {
215 DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
216 a->feature, instr, a->instrlen,
217 replacement, a->replacementlen);
218
219 continue;
220 }
221
222 DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
223 a->feature, instr, a->instrlen,
224 replacement, a->replacementlen);
225
226 DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
227 DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
228
229 copy_alt_insns(insnbuf, instr, replacement, nr_repl);
230 nr_insnbuf = nr_repl;
231
232 if (nr_instr > nr_repl) {
233 add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
234 nr_insnbuf += nr_instr - nr_repl;
235 }
236 DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
237
238 text_poke_early(instr, insnbuf, nr_insnbuf);
239 }
240 }
241
alternative_instructions(void)242 void __init alternative_instructions(void)
243 {
244 apply_alternatives(__alt_instructions, __alt_instructions_end);
245
246 alternatives_patched = 1;
247 }
248