xref: /linux/arch/riscv/kernel/patch.c (revision a7f7f6248d9740d710fd6bd190293fe5e16410ac)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 SiFive
4  */
5 
6 #include <linux/spinlock.h>
7 #include <linux/mm.h>
8 #include <linux/memory.h>
9 #include <linux/uaccess.h>
10 #include <linux/stop_machine.h>
11 #include <asm/kprobes.h>
12 #include <asm/cacheflush.h>
13 #include <asm/fixmap.h>
14 
15 struct patch_insn {
16 	void *addr;
17 	u32 insn;
18 	atomic_t cpu_count;
19 };
20 
21 #ifdef CONFIG_MMU
22 static void *patch_map(void *addr, int fixmap)
23 {
24 	uintptr_t uintaddr = (uintptr_t) addr;
25 	struct page *page;
26 
27 	if (core_kernel_text(uintaddr))
28 		page = phys_to_page(__pa_symbol(addr));
29 	else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
30 		page = vmalloc_to_page(addr);
31 	else
32 		return addr;
33 
34 	BUG_ON(!page);
35 
36 	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
37 					 (uintaddr & ~PAGE_MASK));
38 }
39 NOKPROBE_SYMBOL(patch_map);
40 
41 static void patch_unmap(int fixmap)
42 {
43 	clear_fixmap(fixmap);
44 }
45 NOKPROBE_SYMBOL(patch_unmap);
46 
47 static int patch_insn_write(void *addr, const void *insn, size_t len)
48 {
49 	void *waddr = addr;
50 	bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
51 	int ret;
52 
53 	/*
54 	 * Before reaching here, it was expected to lock the text_mutex
55 	 * already, so we don't need to give another lock here and could
56 	 * ensure that it was safe between each cores.
57 	 */
58 	lockdep_assert_held(&text_mutex);
59 
60 	if (across_pages)
61 		patch_map(addr + len, FIX_TEXT_POKE1);
62 
63 	waddr = patch_map(addr, FIX_TEXT_POKE0);
64 
65 	ret = probe_kernel_write(waddr, insn, len);
66 
67 	patch_unmap(FIX_TEXT_POKE0);
68 
69 	if (across_pages)
70 		patch_unmap(FIX_TEXT_POKE1);
71 
72 	return ret;
73 }
74 NOKPROBE_SYMBOL(patch_insn_write);
75 #else
76 static int patch_insn_write(void *addr, const void *insn, size_t len)
77 {
78 	return probe_kernel_write(addr, insn, len);
79 }
80 NOKPROBE_SYMBOL(patch_insn_write);
81 #endif /* CONFIG_MMU */
82 
83 int patch_text_nosync(void *addr, const void *insns, size_t len)
84 {
85 	u32 *tp = addr;
86 	int ret;
87 
88 	ret = patch_insn_write(tp, insns, len);
89 
90 	if (!ret)
91 		flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
92 
93 	return ret;
94 }
95 NOKPROBE_SYMBOL(patch_text_nosync);
96 
97 static int patch_text_cb(void *data)
98 {
99 	struct patch_insn *patch = data;
100 	int ret = 0;
101 
102 	if (atomic_inc_return(&patch->cpu_count) == 1) {
103 		ret =
104 		    patch_text_nosync(patch->addr, &patch->insn,
105 					    GET_INSN_LENGTH(patch->insn));
106 		atomic_inc(&patch->cpu_count);
107 	} else {
108 		while (atomic_read(&patch->cpu_count) <= num_online_cpus())
109 			cpu_relax();
110 		smp_mb();
111 	}
112 
113 	return ret;
114 }
115 NOKPROBE_SYMBOL(patch_text_cb);
116 
117 int patch_text(void *addr, u32 insn)
118 {
119 	struct patch_insn patch = {
120 		.addr = addr,
121 		.insn = insn,
122 		.cpu_count = ATOMIC_INIT(0),
123 	};
124 
125 	return stop_machine_cpuslocked(patch_text_cb,
126 				       &patch, cpu_online_mask);
127 }
128 NOKPROBE_SYMBOL(patch_text);
129