1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 SiFive 4 */ 5 6 #include <linux/spinlock.h> 7 #include <linux/mm.h> 8 #include <linux/memory.h> 9 #include <linux/string.h> 10 #include <linux/uaccess.h> 11 #include <linux/stop_machine.h> 12 #include <asm/kprobes.h> 13 #include <asm/cacheflush.h> 14 #include <asm/fixmap.h> 15 #include <asm/ftrace.h> 16 #include <asm/patch.h> 17 #include <asm/sections.h> 18 19 struct patch_insn { 20 void *addr; 21 u32 *insns; 22 int ninsns; 23 atomic_t cpu_count; 24 }; 25 26 int riscv_patch_in_stop_machine = false; 27 28 #ifdef CONFIG_MMU 29 30 static inline bool is_kernel_exittext(uintptr_t addr) 31 { 32 return system_state < SYSTEM_RUNNING && 33 addr >= (uintptr_t)__exittext_begin && 34 addr < (uintptr_t)__exittext_end; 35 } 36 37 /* 38 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of 39 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses". 40 * So use '__always_inline' and 'const unsigned int fixmap' here. 41 */ 42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap) 43 { 44 uintptr_t uintaddr = (uintptr_t) addr; 45 struct page *page; 46 47 if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) 48 page = phys_to_page(__pa_symbol(addr)); 49 else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) 50 page = vmalloc_to_page(addr); 51 else 52 return addr; 53 54 BUG_ON(!page); 55 56 return (void *)set_fixmap_offset(fixmap, page_to_phys(page) + 57 (uintaddr & ~PAGE_MASK)); 58 } 59 60 static void patch_unmap(int fixmap) 61 { 62 clear_fixmap(fixmap); 63 } 64 NOKPROBE_SYMBOL(patch_unmap); 65 66 static int __patch_insn_set(void *addr, u8 c, size_t len) 67 { 68 void *waddr = addr; 69 bool across_pages = (((uintptr_t)addr & ~PAGE_MASK) + len) > PAGE_SIZE; 70 71 /* 72 * Only two pages can be mapped at a time for writing. 73 */ 74 if (len + offset_in_page(addr) > 2 * PAGE_SIZE) 75 return -EINVAL; 76 /* 77 * Before reaching here, it was expected to lock the text_mutex 78 * already, so we don't need to give another lock here and could 79 * ensure that it was safe between each cores. 80 */ 81 lockdep_assert_held(&text_mutex); 82 83 if (across_pages) 84 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); 85 86 waddr = patch_map(addr, FIX_TEXT_POKE0); 87 88 memset(waddr, c, len); 89 90 patch_unmap(FIX_TEXT_POKE0); 91 92 if (across_pages) 93 patch_unmap(FIX_TEXT_POKE1); 94 95 return 0; 96 } 97 NOKPROBE_SYMBOL(__patch_insn_set); 98 99 static int __patch_insn_write(void *addr, const void *insn, size_t len) 100 { 101 void *waddr = addr; 102 bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE; 103 int ret; 104 105 /* 106 * Only two pages can be mapped at a time for writing. 107 */ 108 if (len + offset_in_page(addr) > 2 * PAGE_SIZE) 109 return -EINVAL; 110 111 /* 112 * Before reaching here, it was expected to lock the text_mutex 113 * already, so we don't need to give another lock here and could 114 * ensure that it was safe between each cores. 115 * 116 * We're currently using stop_machine() for ftrace & kprobes, and while 117 * that ensures text_mutex is held before installing the mappings it 118 * does not ensure text_mutex is held by the calling thread. That's 119 * safe but triggers a lockdep failure, so just elide it for that 120 * specific case. 121 */ 122 if (!riscv_patch_in_stop_machine) 123 lockdep_assert_held(&text_mutex); 124 125 if (across_pages) 126 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); 127 128 waddr = patch_map(addr, FIX_TEXT_POKE0); 129 130 ret = copy_to_kernel_nofault(waddr, insn, len); 131 132 patch_unmap(FIX_TEXT_POKE0); 133 134 if (across_pages) 135 patch_unmap(FIX_TEXT_POKE1); 136 137 return ret; 138 } 139 NOKPROBE_SYMBOL(__patch_insn_write); 140 #else 141 static int __patch_insn_set(void *addr, u8 c, size_t len) 142 { 143 memset(addr, c, len); 144 145 return 0; 146 } 147 NOKPROBE_SYMBOL(__patch_insn_set); 148 149 static int __patch_insn_write(void *addr, const void *insn, size_t len) 150 { 151 return copy_to_kernel_nofault(addr, insn, len); 152 } 153 NOKPROBE_SYMBOL(__patch_insn_write); 154 #endif /* CONFIG_MMU */ 155 156 static int patch_insn_set(void *addr, u8 c, size_t len) 157 { 158 size_t patched = 0; 159 size_t size; 160 int ret = 0; 161 162 /* 163 * __patch_insn_set() can only work on 2 pages at a time so call it in a 164 * loop with len <= 2 * PAGE_SIZE. 165 */ 166 while (patched < len && !ret) { 167 size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); 168 ret = __patch_insn_set(addr + patched, c, size); 169 170 patched += size; 171 } 172 173 return ret; 174 } 175 NOKPROBE_SYMBOL(patch_insn_set); 176 177 int patch_text_set_nosync(void *addr, u8 c, size_t len) 178 { 179 u32 *tp = addr; 180 int ret; 181 182 ret = patch_insn_set(tp, c, len); 183 184 if (!ret) 185 flush_icache_range((uintptr_t)tp, (uintptr_t)tp + len); 186 187 return ret; 188 } 189 NOKPROBE_SYMBOL(patch_text_set_nosync); 190 191 static int patch_insn_write(void *addr, const void *insn, size_t len) 192 { 193 size_t patched = 0; 194 size_t size; 195 int ret = 0; 196 197 /* 198 * Copy the instructions to the destination address, two pages at a time 199 * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. 200 */ 201 while (patched < len && !ret) { 202 size = min_t(size_t, PAGE_SIZE * 2 - offset_in_page(addr + patched), len - patched); 203 ret = __patch_insn_write(addr + patched, insn + patched, size); 204 205 patched += size; 206 } 207 208 return ret; 209 } 210 NOKPROBE_SYMBOL(patch_insn_write); 211 212 int patch_text_nosync(void *addr, const void *insns, size_t len) 213 { 214 u32 *tp = addr; 215 int ret; 216 217 ret = patch_insn_write(tp, insns, len); 218 219 if (!ret) 220 flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len); 221 222 return ret; 223 } 224 NOKPROBE_SYMBOL(patch_text_nosync); 225 226 static int patch_text_cb(void *data) 227 { 228 struct patch_insn *patch = data; 229 unsigned long len; 230 int i, ret = 0; 231 232 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { 233 for (i = 0; ret == 0 && i < patch->ninsns; i++) { 234 len = GET_INSN_LENGTH(patch->insns[i]); 235 ret = patch_text_nosync(patch->addr + i * len, 236 &patch->insns[i], len); 237 } 238 atomic_inc(&patch->cpu_count); 239 } else { 240 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) 241 cpu_relax(); 242 smp_mb(); 243 } 244 245 return ret; 246 } 247 NOKPROBE_SYMBOL(patch_text_cb); 248 249 int patch_text(void *addr, u32 *insns, int ninsns) 250 { 251 int ret; 252 struct patch_insn patch = { 253 .addr = addr, 254 .insns = insns, 255 .ninsns = ninsns, 256 .cpu_count = ATOMIC_INIT(0), 257 }; 258 259 /* 260 * kprobes takes text_mutex, before calling patch_text(), but as we call 261 * calls stop_machine(), the lockdep assertion in patch_insn_write() 262 * gets confused by the context in which the lock is taken. 263 * Instead, ensure the lock is held before calling stop_machine(), and 264 * set riscv_patch_in_stop_machine to skip the check in 265 * patch_insn_write(). 266 */ 267 lockdep_assert_held(&text_mutex); 268 riscv_patch_in_stop_machine = true; 269 ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); 270 riscv_patch_in_stop_machine = false; 271 return ret; 272 } 273 NOKPROBE_SYMBOL(patch_text); 274