code-patching.c (446279168e030fd0ed68e2bba336bef8bb3da352) | code-patching.c (8b4bb0ad00cb347f62e76a636ce08eb179c843fc) |
---|---|
1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright 2008 Michael Ellerman, IBM Corporation. 4 */ 5 6#include <linux/kprobes.h> 7#include <linux/vmalloc.h> 8#include <linux/init.h> --- 80 unchanged lines hidden (view full) --- 89void __init poking_init(void) 90{ 91 BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 92 "powerpc/text_poke:online", text_area_cpu_up, 93 text_area_cpu_down)); 94 static_branch_enable(&poking_init_done); 95} 96 | 1// SPDX-License-Identifier: GPL-2.0-or-later 2/* 3 * Copyright 2008 Michael Ellerman, IBM Corporation. 4 */ 5 6#include <linux/kprobes.h> 7#include <linux/vmalloc.h> 8#include <linux/init.h> --- 80 unchanged lines hidden (view full) --- 89void __init poking_init(void) 90{ 91 BUG_ON(!cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 92 "powerpc/text_poke:online", text_area_cpu_up, 93 text_area_cpu_down)); 94 static_branch_enable(&poking_init_done); 95} 96 |
97static unsigned long get_patch_pfn(void *addr) 98{ 99 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 100 return vmalloc_to_pfn(addr); 101 else 102 return __pa_symbol(addr) >> PAGE_SHIFT; 103} 104 |
|
97/* 98 * This can be called for kernel text or a module. 99 */ 100static int map_patch_area(void *addr, unsigned long text_poke_addr) 101{ | 105/* 106 * This can be called for kernel text or a module. 107 */ 108static int map_patch_area(void *addr, unsigned long text_poke_addr) 109{ |
102 unsigned long pfn; | 110 unsigned long pfn = get_patch_pfn(addr); |
103 | 111 |
104 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 105 pfn = vmalloc_to_pfn(addr); 106 else 107 pfn = __pa_symbol(addr) >> PAGE_SHIFT; 108 | |
109 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 110} 111 112static void unmap_patch_area(unsigned long addr) 113{ 114 pte_t *ptep; 115 pmd_t *pmdp; 116 pud_t *pudp; --- 27 unchanged lines hidden (view full) --- 144 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 145} 146 147static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 148{ 149 int err; 150 u32 *patch_addr; 151 unsigned long text_poke_addr; | 112 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 113} 114 115static void unmap_patch_area(unsigned long addr) 116{ 117 pte_t *ptep; 118 pmd_t *pmdp; 119 pud_t *pudp; --- 27 unchanged lines hidden (view full) --- 147 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 148} 149 150static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 151{ 152 int err; 153 u32 *patch_addr; 154 unsigned long text_poke_addr; |
155 pte_t *pte; 156 unsigned long pfn = get_patch_pfn(addr); |
|
152 | 157 |
153 text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr; | 158 text_poke_addr = (unsigned long)__this_cpu_read(text_poke_area)->addr & PAGE_MASK; |
154 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 155 | 159 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 160 |
156 err = map_patch_area(addr, text_poke_addr); 157 if (err) 158 return err; | 161 pte = virt_to_kpte(text_poke_addr); 162 __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 163 /* See ptesync comment in radix__set_pte_at() */ 164 if (radix_enabled()) 165 asm volatile("ptesync": : :"memory"); |
159 160 err = __patch_instruction(addr, instr, patch_addr); 161 | 166 167 err = __patch_instruction(addr, instr, patch_addr); 168 |
162 unmap_patch_area(text_poke_addr); | 169 pte_clear(&init_mm, text_poke_addr, pte); 170 flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); |
163 164 return err; 165} 166 167static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 168{ 169 int err; 170 unsigned long flags; --- 156 unchanged lines hidden --- | 171 172 return err; 173} 174 175static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 176{ 177 int err; 178 unsigned long flags; --- 156 unchanged lines hidden --- |