1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2020 SiFive 4 */ 5 6 #include <linux/spinlock.h> 7 #include <linux/mm.h> 8 #include <linux/memory.h> 9 #include <linux/string.h> 10 #include <linux/uaccess.h> 11 #include <linux/stop_machine.h> 12 #include <asm/kprobes.h> 13 #include <asm/cacheflush.h> 14 #include <asm/fixmap.h> 15 #include <asm/ftrace.h> 16 #include <asm/text-patching.h> 17 #include <asm/sections.h> 18 19 struct patch_insn { 20 void *addr; 21 u32 *insns; 22 size_t len; 23 atomic_t cpu_count; 24 }; 25 26 int riscv_patch_in_stop_machine = false; 27 28 #ifdef CONFIG_MMU 29 30 static inline bool is_kernel_exittext(uintptr_t addr) 31 { 32 return system_state < SYSTEM_RUNNING && 33 addr >= (uintptr_t)__exittext_begin && 34 addr < (uintptr_t)__exittext_end; 35 } 36 37 /* 38 * The fix_to_virt(, idx) needs a const value (not a dynamic variable of 39 * reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses". 40 * So use '__always_inline' and 'const unsigned int fixmap' here. 41 */ 42 static __always_inline void *patch_map(void *addr, const unsigned int fixmap) 43 { 44 uintptr_t uintaddr = (uintptr_t) addr; 45 phys_addr_t phys; 46 47 if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr)) { 48 phys = __pa_symbol(addr); 49 } else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX)) { 50 struct page *page = vmalloc_to_page(addr); 51 52 BUG_ON(!page); 53 phys = page_to_phys(page) + offset_in_page(addr); 54 } else { 55 return addr; 56 } 57 58 return (void *)set_fixmap_offset(fixmap, phys); 59 } 60 61 static void patch_unmap(int fixmap) 62 { 63 clear_fixmap(fixmap); 64 } 65 NOKPROBE_SYMBOL(patch_unmap); 66 67 static int __patch_insn_set(void *addr, u8 c, size_t len) 68 { 69 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE; 70 void *waddr = addr; 71 72 /* 73 * Only two pages can be mapped at a time for writing. 74 */ 75 if (len + offset_in_page(addr) > 2 * PAGE_SIZE) 76 return -EINVAL; 77 /* 78 * Before reaching here, it was expected to lock the text_mutex 79 * already, so we don't need to give another lock here and could 80 * ensure that it was safe between each cores. 81 */ 82 lockdep_assert_held(&text_mutex); 83 84 preempt_disable(); 85 86 if (across_pages) 87 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); 88 89 waddr = patch_map(addr, FIX_TEXT_POKE0); 90 91 memset(waddr, c, len); 92 93 /* 94 * We could have just patched a function that is about to be 95 * called so make sure we don't execute partially patched 96 * instructions by flushing the icache as soon as possible. 97 */ 98 local_flush_icache_range((unsigned long)waddr, 99 (unsigned long)waddr + len); 100 101 patch_unmap(FIX_TEXT_POKE0); 102 103 if (across_pages) 104 patch_unmap(FIX_TEXT_POKE1); 105 106 preempt_enable(); 107 108 return 0; 109 } 110 NOKPROBE_SYMBOL(__patch_insn_set); 111 112 static int __patch_insn_write(void *addr, const void *insn, size_t len) 113 { 114 bool across_pages = (offset_in_page(addr) + len) > PAGE_SIZE; 115 void *waddr = addr; 116 int ret; 117 118 /* 119 * Only two pages can be mapped at a time for writing. 120 */ 121 if (len + offset_in_page(addr) > 2 * PAGE_SIZE) 122 return -EINVAL; 123 124 /* 125 * Before reaching here, it was expected to lock the text_mutex 126 * already, so we don't need to give another lock here and could 127 * ensure that it was safe between each cores. 128 * 129 * We're currently using stop_machine() for ftrace & kprobes, and while 130 * that ensures text_mutex is held before installing the mappings it 131 * does not ensure text_mutex is held by the calling thread. That's 132 * safe but triggers a lockdep failure, so just elide it for that 133 * specific case. 134 */ 135 if (!riscv_patch_in_stop_machine) 136 lockdep_assert_held(&text_mutex); 137 138 preempt_disable(); 139 140 if (across_pages) 141 patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); 142 143 waddr = patch_map(addr, FIX_TEXT_POKE0); 144 145 ret = copy_to_kernel_nofault(waddr, insn, len); 146 147 /* 148 * We could have just patched a function that is about to be 149 * called so make sure we don't execute partially patched 150 * instructions by flushing the icache as soon as possible. 151 */ 152 local_flush_icache_range((unsigned long)waddr, 153 (unsigned long)waddr + len); 154 155 patch_unmap(FIX_TEXT_POKE0); 156 157 if (across_pages) 158 patch_unmap(FIX_TEXT_POKE1); 159 160 preempt_enable(); 161 162 return ret; 163 } 164 NOKPROBE_SYMBOL(__patch_insn_write); 165 #else 166 static int __patch_insn_set(void *addr, u8 c, size_t len) 167 { 168 memset(addr, c, len); 169 170 return 0; 171 } 172 NOKPROBE_SYMBOL(__patch_insn_set); 173 174 static int __patch_insn_write(void *addr, const void *insn, size_t len) 175 { 176 return copy_to_kernel_nofault(addr, insn, len); 177 } 178 NOKPROBE_SYMBOL(__patch_insn_write); 179 #endif /* CONFIG_MMU */ 180 181 static int patch_insn_set(void *addr, u8 c, size_t len) 182 { 183 size_t size; 184 int ret; 185 186 /* 187 * __patch_insn_set() can only work on 2 pages at a time so call it in a 188 * loop with len <= 2 * PAGE_SIZE. 189 */ 190 while (len) { 191 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr)); 192 ret = __patch_insn_set(addr, c, size); 193 if (ret) 194 return ret; 195 196 addr += size; 197 len -= size; 198 } 199 200 return 0; 201 } 202 NOKPROBE_SYMBOL(patch_insn_set); 203 204 int patch_text_set_nosync(void *addr, u8 c, size_t len) 205 { 206 int ret; 207 208 ret = patch_insn_set(addr, c, len); 209 if (!ret) 210 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len); 211 212 return ret; 213 } 214 NOKPROBE_SYMBOL(patch_text_set_nosync); 215 216 int patch_insn_write(void *addr, const void *insn, size_t len) 217 { 218 size_t size; 219 int ret; 220 221 /* 222 * Copy the instructions to the destination address, two pages at a time 223 * because __patch_insn_write() can only handle len <= 2 * PAGE_SIZE. 224 */ 225 while (len) { 226 size = min(len, PAGE_SIZE * 2 - offset_in_page(addr)); 227 ret = __patch_insn_write(addr, insn, size); 228 if (ret) 229 return ret; 230 231 addr += size; 232 insn += size; 233 len -= size; 234 } 235 236 return 0; 237 } 238 NOKPROBE_SYMBOL(patch_insn_write); 239 240 int patch_text_nosync(void *addr, const void *insns, size_t len) 241 { 242 int ret; 243 244 ret = patch_insn_write(addr, insns, len); 245 if (!ret) 246 flush_icache_range((uintptr_t)addr, (uintptr_t)addr + len); 247 248 return ret; 249 } 250 NOKPROBE_SYMBOL(patch_text_nosync); 251 252 static int patch_text_cb(void *data) 253 { 254 struct patch_insn *patch = data; 255 int ret = 0; 256 257 if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) { 258 ret = patch_insn_write(patch->addr, patch->insns, patch->len); 259 /* 260 * Make sure the patching store is effective *before* we 261 * increment the counter which releases all waiting CPUs 262 * by using the release variant of atomic increment. The 263 * release pairs with the call to local_flush_icache_all() 264 * on the waiting CPU. 265 */ 266 atomic_inc_return_release(&patch->cpu_count); 267 } else { 268 while (atomic_read(&patch->cpu_count) <= num_online_cpus()) 269 cpu_relax(); 270 271 local_flush_icache_all(); 272 } 273 274 return ret; 275 } 276 NOKPROBE_SYMBOL(patch_text_cb); 277 278 int patch_text(void *addr, u32 *insns, size_t len) 279 { 280 int ret; 281 struct patch_insn patch = { 282 .addr = addr, 283 .insns = insns, 284 .len = len, 285 .cpu_count = ATOMIC_INIT(0), 286 }; 287 288 /* 289 * kprobes takes text_mutex, before calling patch_text(), but as we call 290 * calls stop_machine(), the lockdep assertion in patch_insn_write() 291 * gets confused by the context in which the lock is taken. 292 * Instead, ensure the lock is held before calling stop_machine(), and 293 * set riscv_patch_in_stop_machine to skip the check in 294 * patch_insn_write(). 295 */ 296 lockdep_assert_held(&text_mutex); 297 riscv_patch_in_stop_machine = true; 298 ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask); 299 riscv_patch_in_stop_machine = false; 300 return ret; 301 } 302 NOKPROBE_SYMBOL(patch_text); 303