xref: /linux/arch/x86/kernel/callthunks.c (revision 2a6b6c9a226279b4f6668450ddb21ae655558087)
1 // SPDX-License-Identifier: GPL-2.0-only
2 
3 #define pr_fmt(fmt) "callthunks: " fmt
4 
5 #include <linux/debugfs.h>
6 #include <linux/kallsyms.h>
7 #include <linux/memory.h>
8 #include <linux/moduleloader.h>
9 #include <linux/static_call.h>
10 
11 #include <asm/alternative.h>
12 #include <asm/asm-offsets.h>
13 #include <asm/cpu.h>
14 #include <asm/ftrace.h>
15 #include <asm/insn.h>
16 #include <asm/kexec.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/paravirt.h>
19 #include <asm/sections.h>
20 #include <asm/switch_to.h>
21 #include <asm/sync_core.h>
22 #include <asm/text-patching.h>
23 #include <asm/xen/hypercall.h>
24 
25 static int __initdata_or_module debug_callthunks;
26 
27 #define MAX_PATCH_LEN (255-1)
28 
29 #define prdbg(fmt, args...)					\
30 do {								\
31 	if (debug_callthunks)					\
32 		printk(KERN_DEBUG pr_fmt(fmt), ##args);		\
33 } while(0)
34 
35 static int __init debug_thunks(char *str)
36 {
37 	debug_callthunks = 1;
38 	return 1;
39 }
40 __setup("debug-callthunks", debug_thunks);
41 
42 #ifdef CONFIG_CALL_THUNKS_DEBUG
43 DEFINE_PER_CPU(u64, __x86_call_count);
44 DEFINE_PER_CPU(u64, __x86_ret_count);
45 DEFINE_PER_CPU(u64, __x86_stuffs_count);
46 DEFINE_PER_CPU(u64, __x86_ctxsw_count);
47 EXPORT_PER_CPU_SYMBOL_GPL(__x86_ctxsw_count);
48 EXPORT_PER_CPU_SYMBOL_GPL(__x86_call_count);
49 #endif
50 
51 extern s32 __call_sites[], __call_sites_end[];
52 
53 struct core_text {
54 	unsigned long	base;
55 	unsigned long	end;
56 	const char	*name;
57 };
58 
59 static bool thunks_initialized __ro_after_init;
60 
61 static const struct core_text builtin_coretext = {
62 	.base = (unsigned long)_text,
63 	.end  = (unsigned long)_etext,
64 	.name = "builtin",
65 };
66 
67 asm (
68 	".pushsection .rodata				\n"
69 	".global skl_call_thunk_template		\n"
70 	"skl_call_thunk_template:			\n"
71 		__stringify(INCREMENT_CALL_DEPTH)"	\n"
72 	".global skl_call_thunk_tail			\n"
73 	"skl_call_thunk_tail:				\n"
74 	".popsection					\n"
75 );
76 
77 extern u8 skl_call_thunk_template[];
78 extern u8 skl_call_thunk_tail[];
79 
80 #define SKL_TMPL_SIZE \
81 	((unsigned int)(skl_call_thunk_tail - skl_call_thunk_template))
82 
83 extern void error_entry(void);
84 extern void xen_error_entry(void);
85 extern void paranoid_entry(void);
86 
87 static inline bool within_coretext(const struct core_text *ct, void *addr)
88 {
89 	unsigned long p = (unsigned long)addr;
90 
91 	return ct->base <= p && p < ct->end;
92 }
93 
94 static inline bool within_module_coretext(void *addr)
95 {
96 	bool ret = false;
97 
98 #ifdef CONFIG_MODULES
99 	struct module *mod;
100 
101 	preempt_disable();
102 	mod = __module_address((unsigned long)addr);
103 	if (mod && within_module_core((unsigned long)addr, mod))
104 		ret = true;
105 	preempt_enable();
106 #endif
107 	return ret;
108 }
109 
110 static bool is_coretext(const struct core_text *ct, void *addr)
111 {
112 	if (ct && within_coretext(ct, addr))
113 		return true;
114 	if (within_coretext(&builtin_coretext, addr))
115 		return true;
116 	return within_module_coretext(addr);
117 }
118 
119 static bool skip_addr(void *dest)
120 {
121 	if (dest == error_entry)
122 		return true;
123 	if (dest == paranoid_entry)
124 		return true;
125 	if (dest == xen_error_entry)
126 		return true;
127 	/* Does FILL_RSB... */
128 	if (dest == __switch_to_asm)
129 		return true;
130 	/* Accounts directly */
131 	if (dest == ret_from_fork)
132 		return true;
133 #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_AMD_MEM_ENCRYPT)
134 	if (dest == soft_restart_cpu)
135 		return true;
136 #endif
137 #ifdef CONFIG_FUNCTION_TRACER
138 	if (dest == __fentry__)
139 		return true;
140 #endif
141 #ifdef CONFIG_KEXEC_CORE
142 	if (dest >= (void *)relocate_kernel &&
143 	    dest < (void*)relocate_kernel + KEXEC_CONTROL_CODE_MAX_SIZE)
144 		return true;
145 #endif
146 #ifdef CONFIG_XEN
147 	if (dest >= (void *)hypercall_page &&
148 	    dest < (void*)hypercall_page + PAGE_SIZE)
149 		return true;
150 #endif
151 	return false;
152 }
153 
154 static __init_or_module void *call_get_dest(void *addr)
155 {
156 	struct insn insn;
157 	void *dest;
158 	int ret;
159 
160 	ret = insn_decode_kernel(&insn, addr);
161 	if (ret)
162 		return ERR_PTR(ret);
163 
164 	/* Patched out call? */
165 	if (insn.opcode.bytes[0] != CALL_INSN_OPCODE)
166 		return NULL;
167 
168 	dest = addr + insn.length + insn.immediate.value;
169 	if (skip_addr(dest))
170 		return NULL;
171 	return dest;
172 }
173 
174 static const u8 nops[] = {
175 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
176 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
177 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
178 	0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90, 0x90,
179 };
180 
181 static void *patch_dest(void *dest, bool direct)
182 {
183 	unsigned int tsize = SKL_TMPL_SIZE;
184 	u8 insn_buff[MAX_PATCH_LEN];
185 	u8 *pad = dest - tsize;
186 
187 	memcpy(insn_buff, skl_call_thunk_template, tsize);
188 	apply_relocation(insn_buff, pad, tsize, skl_call_thunk_template, tsize);
189 
190 	/* Already patched? */
191 	if (!bcmp(pad, insn_buff, tsize))
192 		return pad;
193 
194 	/* Ensure there are nops */
195 	if (bcmp(pad, nops, tsize)) {
196 		pr_warn_once("Invalid padding area for %pS\n", dest);
197 		return NULL;
198 	}
199 
200 	if (direct)
201 		memcpy(pad, insn_buff, tsize);
202 	else
203 		text_poke_copy_locked(pad, insn_buff, tsize, true);
204 	return pad;
205 }
206 
207 static __init_or_module void patch_call(void *addr, const struct core_text *ct)
208 {
209 	void *pad, *dest;
210 	u8 bytes[8];
211 
212 	if (!within_coretext(ct, addr))
213 		return;
214 
215 	dest = call_get_dest(addr);
216 	if (!dest || WARN_ON_ONCE(IS_ERR(dest)))
217 		return;
218 
219 	if (!is_coretext(ct, dest))
220 		return;
221 
222 	pad = patch_dest(dest, within_coretext(ct, dest));
223 	if (!pad)
224 		return;
225 
226 	prdbg("Patch call at: %pS %px to %pS %px -> %px \n", addr, addr,
227 		dest, dest, pad);
228 	__text_gen_insn(bytes, CALL_INSN_OPCODE, addr, pad, CALL_INSN_SIZE);
229 	text_poke_early(addr, bytes, CALL_INSN_SIZE);
230 }
231 
232 static __init_or_module void
233 patch_call_sites(s32 *start, s32 *end, const struct core_text *ct)
234 {
235 	s32 *s;
236 
237 	for (s = start; s < end; s++)
238 		patch_call((void *)s + *s, ct);
239 }
240 
241 static __init_or_module void
242 patch_alt_call_sites(struct alt_instr *start, struct alt_instr *end,
243 		     const struct core_text *ct)
244 {
245 	struct alt_instr *a;
246 
247 	for (a = start; a < end; a++)
248 		patch_call((void *)&a->instr_offset + a->instr_offset, ct);
249 }
250 
251 static __init_or_module void
252 callthunks_setup(struct callthunk_sites *cs, const struct core_text *ct)
253 {
254 	prdbg("Patching call sites %s\n", ct->name);
255 	patch_call_sites(cs->call_start, cs->call_end, ct);
256 	patch_alt_call_sites(cs->alt_start, cs->alt_end, ct);
257 	prdbg("Patching call sites done%s\n", ct->name);
258 }
259 
260 void __init callthunks_patch_builtin_calls(void)
261 {
262 	struct callthunk_sites cs = {
263 		.call_start	= __call_sites,
264 		.call_end	= __call_sites_end,
265 		.alt_start	= __alt_instructions,
266 		.alt_end	= __alt_instructions_end
267 	};
268 
269 	if (!cpu_feature_enabled(X86_FEATURE_CALL_DEPTH))
270 		return;
271 
272 	pr_info("Setting up call depth tracking\n");
273 	mutex_lock(&text_mutex);
274 	callthunks_setup(&cs, &builtin_coretext);
275 	thunks_initialized = true;
276 	mutex_unlock(&text_mutex);
277 }
278 
279 void *callthunks_translate_call_dest(void *dest)
280 {
281 	void *target;
282 
283 	lockdep_assert_held(&text_mutex);
284 
285 	if (!thunks_initialized || skip_addr(dest))
286 		return dest;
287 
288 	if (!is_coretext(NULL, dest))
289 		return dest;
290 
291 	target = patch_dest(dest, false);
292 	return target ? : dest;
293 }
294 
295 #ifdef CONFIG_BPF_JIT
296 static bool is_callthunk(void *addr)
297 {
298 	unsigned int tmpl_size = SKL_TMPL_SIZE;
299 	u8 insn_buff[MAX_PATCH_LEN];
300 	unsigned long dest;
301 	u8 *pad;
302 
303 	dest = roundup((unsigned long)addr, CONFIG_FUNCTION_ALIGNMENT);
304 	if (!thunks_initialized || skip_addr((void *)dest))
305 		return false;
306 
307 	pad = (void *)(dest - tmpl_size);
308 
309 	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
310 	apply_relocation(insn_buff, pad, tmpl_size, skl_call_thunk_template, tmpl_size);
311 
312 	return !bcmp(pad, insn_buff, tmpl_size);
313 }
314 
315 int x86_call_depth_emit_accounting(u8 **pprog, void *func, void *ip)
316 {
317 	unsigned int tmpl_size = SKL_TMPL_SIZE;
318 	u8 insn_buff[MAX_PATCH_LEN];
319 
320 	if (!thunks_initialized)
321 		return 0;
322 
323 	/* Is function call target a thunk? */
324 	if (func && is_callthunk(func))
325 		return 0;
326 
327 	memcpy(insn_buff, skl_call_thunk_template, tmpl_size);
328 	apply_relocation(insn_buff, ip, tmpl_size, skl_call_thunk_template, tmpl_size);
329 
330 	memcpy(*pprog, insn_buff, tmpl_size);
331 	*pprog += tmpl_size;
332 	return tmpl_size;
333 }
334 #endif
335 
336 #ifdef CONFIG_MODULES
337 void noinline callthunks_patch_module_calls(struct callthunk_sites *cs,
338 					    struct module *mod)
339 {
340 	struct core_text ct = {
341 		.base = (unsigned long)mod->mem[MOD_TEXT].base,
342 		.end  = (unsigned long)mod->mem[MOD_TEXT].base + mod->mem[MOD_TEXT].size,
343 		.name = mod->name,
344 	};
345 
346 	if (!thunks_initialized)
347 		return;
348 
349 	mutex_lock(&text_mutex);
350 	callthunks_setup(cs, &ct);
351 	mutex_unlock(&text_mutex);
352 }
353 #endif /* CONFIG_MODULES */
354 
355 #if defined(CONFIG_CALL_THUNKS_DEBUG) && defined(CONFIG_DEBUG_FS)
356 static int callthunks_debug_show(struct seq_file *m, void *p)
357 {
358 	unsigned long cpu = (unsigned long)m->private;
359 
360 	seq_printf(m, "C: %16llu R: %16llu S: %16llu X: %16llu\n,",
361 		   per_cpu(__x86_call_count, cpu),
362 		   per_cpu(__x86_ret_count, cpu),
363 		   per_cpu(__x86_stuffs_count, cpu),
364 		   per_cpu(__x86_ctxsw_count, cpu));
365 	return 0;
366 }
367 
368 static int callthunks_debug_open(struct inode *inode, struct file *file)
369 {
370 	return single_open(file, callthunks_debug_show, inode->i_private);
371 }
372 
373 static const struct file_operations dfs_ops = {
374 	.open		= callthunks_debug_open,
375 	.read		= seq_read,
376 	.llseek		= seq_lseek,
377 	.release	= single_release,
378 };
379 
380 static int __init callthunks_debugfs_init(void)
381 {
382 	struct dentry *dir;
383 	unsigned long cpu;
384 
385 	dir = debugfs_create_dir("callthunks", NULL);
386 	for_each_possible_cpu(cpu) {
387 		void *arg = (void *)cpu;
388 		char name [10];
389 
390 		sprintf(name, "cpu%lu", cpu);
391 		debugfs_create_file(name, 0644, dir, arg, &dfs_ops);
392 	}
393 	return 0;
394 }
395 __initcall(callthunks_debugfs_init);
396 #endif
397