12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2aaddd3eaSMichael Ellerman /* 3aaddd3eaSMichael Ellerman * Copyright 2008 Michael Ellerman, IBM Corporation. 4aaddd3eaSMichael Ellerman */ 5aaddd3eaSMichael Ellerman 671f6e58eSNaveen N. Rao #include <linux/kprobes.h> 7c28c15b6SChristopher M. Riedl #include <linux/mmu_context.h> 8c28c15b6SChristopher M. Riedl #include <linux/random.h> 9ae0dc736SMichael Ellerman #include <linux/vmalloc.h> 10ae0dc736SMichael Ellerman #include <linux/init.h> 1137bc3e5fSBalbir Singh #include <linux/cpuhotplug.h> 127c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 13b0337678SChristophe Leroy #include <linux/jump_label.h> 14aaddd3eaSMichael Ellerman 15c28c15b6SChristopher M. Riedl #include <asm/debug.h> 16c28c15b6SChristopher M. Riedl #include <asm/pgalloc.h> 17c28c15b6SChristopher M. Riedl #include <asm/tlb.h> 1837bc3e5fSBalbir Singh #include <asm/tlbflush.h> 1937bc3e5fSBalbir Singh #include <asm/page.h> 2037bc3e5fSBalbir Singh #include <asm/code-patching.h> 2175346251SJordan Niethe #include <asm/inst.h> 22aaddd3eaSMichael Ellerman 23c545b9f0SChristophe Leroy static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 24aaddd3eaSMichael Ellerman { 25e63ceebdSChristophe Leroy if (!ppc_inst_prefixed(instr)) { 26e63ceebdSChristophe Leroy u32 val = ppc_inst_val(instr); 27e63ceebdSChristophe Leroy 28e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u32, failed); 29e63ceebdSChristophe Leroy } else { 30693557ebSChristophe Leroy u64 val = ppc_inst_as_ulong(instr); 31e63ceebdSChristophe Leroy 32e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u64, failed); 33e63ceebdSChristophe Leroy } 3437bc3e5fSBalbir Singh 358cf4c057SChristophe Leroy asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 368cf4c057SChristophe Leroy "r" (exec_addr)); 3737bc3e5fSBalbir Singh 38b6e37968SSteven Rostedt return 0; 39e64ac41aSChristophe Leroy 40e64ac41aSChristophe Leroy failed: 41bbffdd2fSChristophe Leroy return -EPERM; 42aaddd3eaSMichael Ellerman } 43aaddd3eaSMichael Ellerman 44c545b9f0SChristophe Leroy int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 458cf4c057SChristophe Leroy { 468cf4c057SChristophe Leroy return __patch_instruction(addr, instr, addr); 478cf4c057SChristophe Leroy } 488cf4c057SChristophe Leroy 4937bc3e5fSBalbir Singh #ifdef CONFIG_STRICT_KERNEL_RWX 50c28c15b6SChristopher M. Riedl 51*2f228ee1SBenjamin Gray struct patch_context { 52*2f228ee1SBenjamin Gray union { 53*2f228ee1SBenjamin Gray struct vm_struct *area; 54*2f228ee1SBenjamin Gray struct mm_struct *mm; 55*2f228ee1SBenjamin Gray }; 56*2f228ee1SBenjamin Gray unsigned long addr; 57*2f228ee1SBenjamin Gray pte_t *pte; 58*2f228ee1SBenjamin Gray }; 59*2f228ee1SBenjamin Gray 60*2f228ee1SBenjamin Gray static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); 6137bc3e5fSBalbir Singh 62591b4b26SMichael Ellerman static int map_patch_area(void *addr, unsigned long text_poke_addr); 63591b4b26SMichael Ellerman static void unmap_patch_area(unsigned long addr); 64591b4b26SMichael Ellerman 65c28c15b6SChristopher M. Riedl static bool mm_patch_enabled(void) 66c28c15b6SChristopher M. Riedl { 67c28c15b6SChristopher M. Riedl return IS_ENABLED(CONFIG_SMP) && radix_enabled(); 68c28c15b6SChristopher M. Riedl } 69c28c15b6SChristopher M. Riedl 70c28c15b6SChristopher M. Riedl /* 71c28c15b6SChristopher M. Riedl * The following applies for Radix MMU. Hash MMU has different requirements, 72c28c15b6SChristopher M. Riedl * and so is not supported. 73c28c15b6SChristopher M. Riedl * 74c28c15b6SChristopher M. Riedl * Changing mm requires context synchronising instructions on both sides of 75c28c15b6SChristopher M. Riedl * the context switch, as well as a hwsync between the last instruction for 76c28c15b6SChristopher M. Riedl * which the address of an associated storage access was translated using 77c28c15b6SChristopher M. Riedl * the current context. 78c28c15b6SChristopher M. Riedl * 79c28c15b6SChristopher M. Riedl * switch_mm_irqs_off() performs an isync after the context switch. It is 80c28c15b6SChristopher M. Riedl * the responsibility of the caller to perform the CSI and hwsync before 81c28c15b6SChristopher M. Riedl * starting/stopping the temp mm. 82c28c15b6SChristopher M. Riedl */ 83c28c15b6SChristopher M. Riedl static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) 84c28c15b6SChristopher M. Riedl { 85c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm = current->active_mm; 86c28c15b6SChristopher M. Riedl 87c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 88c28c15b6SChristopher M. Riedl switch_mm_irqs_off(orig_mm, temp_mm, current); 89c28c15b6SChristopher M. Riedl 90c28c15b6SChristopher M. Riedl WARN_ON(!mm_is_thread_local(temp_mm)); 91c28c15b6SChristopher M. Riedl 92c28c15b6SChristopher M. Riedl suspend_breakpoints(); 93c28c15b6SChristopher M. Riedl return orig_mm; 94c28c15b6SChristopher M. Riedl } 95c28c15b6SChristopher M. Riedl 96c28c15b6SChristopher M. Riedl static void stop_using_temp_mm(struct mm_struct *temp_mm, 97c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm) 98c28c15b6SChristopher M. Riedl { 99c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 100c28c15b6SChristopher M. Riedl switch_mm_irqs_off(temp_mm, orig_mm, current); 101c28c15b6SChristopher M. Riedl restore_breakpoints(); 102c28c15b6SChristopher M. Riedl } 103c28c15b6SChristopher M. Riedl 10437bc3e5fSBalbir Singh static int text_area_cpu_up(unsigned int cpu) 10537bc3e5fSBalbir Singh { 10637bc3e5fSBalbir Singh struct vm_struct *area; 107591b4b26SMichael Ellerman unsigned long addr; 108591b4b26SMichael Ellerman int err; 10937bc3e5fSBalbir Singh 11037bc3e5fSBalbir Singh area = get_vm_area(PAGE_SIZE, VM_ALLOC); 11137bc3e5fSBalbir Singh if (!area) { 11237bc3e5fSBalbir Singh WARN_ONCE(1, "Failed to create text area for cpu %d\n", 11337bc3e5fSBalbir Singh cpu); 11437bc3e5fSBalbir Singh return -1; 11537bc3e5fSBalbir Singh } 116591b4b26SMichael Ellerman 117591b4b26SMichael Ellerman // Map/unmap the area to ensure all page tables are pre-allocated 118591b4b26SMichael Ellerman addr = (unsigned long)area->addr; 119591b4b26SMichael Ellerman err = map_patch_area(empty_zero_page, addr); 120591b4b26SMichael Ellerman if (err) 121591b4b26SMichael Ellerman return err; 122591b4b26SMichael Ellerman 123591b4b26SMichael Ellerman unmap_patch_area(addr); 124591b4b26SMichael Ellerman 125*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, area); 126*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 127*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); 12837bc3e5fSBalbir Singh 12937bc3e5fSBalbir Singh return 0; 13037bc3e5fSBalbir Singh } 13137bc3e5fSBalbir Singh 13237bc3e5fSBalbir Singh static int text_area_cpu_down(unsigned int cpu) 13337bc3e5fSBalbir Singh { 134*2f228ee1SBenjamin Gray free_vm_area(this_cpu_read(cpu_patching_context.area)); 135*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, NULL); 136*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 137*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, NULL); 13837bc3e5fSBalbir Singh return 0; 13937bc3e5fSBalbir Singh } 14037bc3e5fSBalbir Singh 141c28c15b6SChristopher M. Riedl static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) 142c28c15b6SChristopher M. Riedl { 143c28c15b6SChristopher M. Riedl struct mmu_gather tlb; 144c28c15b6SChristopher M. Riedl 145c28c15b6SChristopher M. Riedl tlb_gather_mmu(&tlb, mm); 146c28c15b6SChristopher M. Riedl free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); 147c28c15b6SChristopher M. Riedl mmput(mm); 148c28c15b6SChristopher M. Riedl } 149c28c15b6SChristopher M. Riedl 150c28c15b6SChristopher M. Riedl static int text_area_cpu_up_mm(unsigned int cpu) 151c28c15b6SChristopher M. Riedl { 152c28c15b6SChristopher M. Riedl struct mm_struct *mm; 153c28c15b6SChristopher M. Riedl unsigned long addr; 154c28c15b6SChristopher M. Riedl pte_t *pte; 155c28c15b6SChristopher M. Riedl spinlock_t *ptl; 156c28c15b6SChristopher M. Riedl 157c28c15b6SChristopher M. Riedl mm = mm_alloc(); 158c28c15b6SChristopher M. Riedl if (WARN_ON(!mm)) 159c28c15b6SChristopher M. Riedl goto fail_no_mm; 160c28c15b6SChristopher M. Riedl 161c28c15b6SChristopher M. Riedl /* 162c28c15b6SChristopher M. Riedl * Choose a random page-aligned address from the interval 163c28c15b6SChristopher M. Riedl * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. 164c28c15b6SChristopher M. Riedl * The lower address bound is PAGE_SIZE to avoid the zero-page. 165c28c15b6SChristopher M. Riedl */ 166c28c15b6SChristopher M. Riedl addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; 167c28c15b6SChristopher M. Riedl 168c28c15b6SChristopher M. Riedl /* 169c28c15b6SChristopher M. Riedl * PTE allocation uses GFP_KERNEL which means we need to 170c28c15b6SChristopher M. Riedl * pre-allocate the PTE here because we cannot do the 171c28c15b6SChristopher M. Riedl * allocation during patching when IRQs are disabled. 172c28c15b6SChristopher M. Riedl * 173c28c15b6SChristopher M. Riedl * Using get_locked_pte() to avoid open coding, the lock 174c28c15b6SChristopher M. Riedl * is unnecessary. 175c28c15b6SChristopher M. Riedl */ 176c28c15b6SChristopher M. Riedl pte = get_locked_pte(mm, addr, &ptl); 177c28c15b6SChristopher M. Riedl if (!pte) 178c28c15b6SChristopher M. Riedl goto fail_no_pte; 179c28c15b6SChristopher M. Riedl pte_unmap_unlock(pte, ptl); 180c28c15b6SChristopher M. Riedl 181*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, mm); 182*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 183*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, pte); 184c28c15b6SChristopher M. Riedl 185c28c15b6SChristopher M. Riedl return 0; 186c28c15b6SChristopher M. Riedl 187c28c15b6SChristopher M. Riedl fail_no_pte: 188c28c15b6SChristopher M. Riedl put_patching_mm(mm, addr); 189c28c15b6SChristopher M. Riedl fail_no_mm: 190c28c15b6SChristopher M. Riedl return -ENOMEM; 191c28c15b6SChristopher M. Riedl } 192c28c15b6SChristopher M. Riedl 193c28c15b6SChristopher M. Riedl static int text_area_cpu_down_mm(unsigned int cpu) 194c28c15b6SChristopher M. Riedl { 195*2f228ee1SBenjamin Gray put_patching_mm(this_cpu_read(cpu_patching_context.mm), 196*2f228ee1SBenjamin Gray this_cpu_read(cpu_patching_context.addr)); 197c28c15b6SChristopher M. Riedl 198*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, NULL); 199*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 200*2f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, NULL); 201c28c15b6SChristopher M. Riedl 202c28c15b6SChristopher M. Riedl return 0; 203c28c15b6SChristopher M. Riedl } 204c28c15b6SChristopher M. Riedl 20517512892SChristophe Leroy static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 20617512892SChristophe Leroy 20771a5b3dbSJordan Niethe void __init poking_init(void) 20837bc3e5fSBalbir Singh { 209c28c15b6SChristopher M. Riedl int ret; 210c28c15b6SChristopher M. Riedl 211c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 212c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 213c28c15b6SChristopher M. Riedl "powerpc/text_poke_mm:online", 214c28c15b6SChristopher M. Riedl text_area_cpu_up_mm, 215c28c15b6SChristopher M. Riedl text_area_cpu_down_mm); 216c28c15b6SChristopher M. Riedl else 217c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 218071c95c1SBenjamin Gray "powerpc/text_poke:online", 219071c95c1SBenjamin Gray text_area_cpu_up, 220071c95c1SBenjamin Gray text_area_cpu_down); 221071c95c1SBenjamin Gray 222071c95c1SBenjamin Gray /* cpuhp_setup_state returns >= 0 on success */ 223071c95c1SBenjamin Gray if (WARN_ON(ret < 0)) 224071c95c1SBenjamin Gray return; 225071c95c1SBenjamin Gray 22617512892SChristophe Leroy static_branch_enable(&poking_init_done); 22737bc3e5fSBalbir Singh } 22837bc3e5fSBalbir Singh 2298b4bb0adSChristophe Leroy static unsigned long get_patch_pfn(void *addr) 2308b4bb0adSChristophe Leroy { 2318b4bb0adSChristophe Leroy if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 2328b4bb0adSChristophe Leroy return vmalloc_to_pfn(addr); 2338b4bb0adSChristophe Leroy else 2348b4bb0adSChristophe Leroy return __pa_symbol(addr) >> PAGE_SHIFT; 2358b4bb0adSChristophe Leroy } 2368b4bb0adSChristophe Leroy 23737bc3e5fSBalbir Singh /* 23837bc3e5fSBalbir Singh * This can be called for kernel text or a module. 23937bc3e5fSBalbir Singh */ 24037bc3e5fSBalbir Singh static int map_patch_area(void *addr, unsigned long text_poke_addr) 24137bc3e5fSBalbir Singh { 2428b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 24337bc3e5fSBalbir Singh 244285672f9SChristophe Leroy return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 24537bc3e5fSBalbir Singh } 24637bc3e5fSBalbir Singh 247a3483c3dSChristophe Leroy static void unmap_patch_area(unsigned long addr) 24837bc3e5fSBalbir Singh { 24937bc3e5fSBalbir Singh pte_t *ptep; 25037bc3e5fSBalbir Singh pmd_t *pmdp; 25137bc3e5fSBalbir Singh pud_t *pudp; 2522fb47060SMike Rapoport p4d_t *p4dp; 25337bc3e5fSBalbir Singh pgd_t *pgdp; 25437bc3e5fSBalbir Singh 25537bc3e5fSBalbir Singh pgdp = pgd_offset_k(addr); 256a3483c3dSChristophe Leroy if (WARN_ON(pgd_none(*pgdp))) 257a3483c3dSChristophe Leroy return; 25837bc3e5fSBalbir Singh 2592fb47060SMike Rapoport p4dp = p4d_offset(pgdp, addr); 260a3483c3dSChristophe Leroy if (WARN_ON(p4d_none(*p4dp))) 261a3483c3dSChristophe Leroy return; 2622fb47060SMike Rapoport 2632fb47060SMike Rapoport pudp = pud_offset(p4dp, addr); 264a3483c3dSChristophe Leroy if (WARN_ON(pud_none(*pudp))) 265a3483c3dSChristophe Leroy return; 26637bc3e5fSBalbir Singh 26737bc3e5fSBalbir Singh pmdp = pmd_offset(pudp, addr); 268a3483c3dSChristophe Leroy if (WARN_ON(pmd_none(*pmdp))) 269a3483c3dSChristophe Leroy return; 27037bc3e5fSBalbir Singh 27137bc3e5fSBalbir Singh ptep = pte_offset_kernel(pmdp, addr); 272a3483c3dSChristophe Leroy if (WARN_ON(pte_none(*ptep))) 273a3483c3dSChristophe Leroy return; 27437bc3e5fSBalbir Singh 27537bc3e5fSBalbir Singh /* 27637bc3e5fSBalbir Singh * In hash, pte_clear flushes the tlb, in radix, we have to 27737bc3e5fSBalbir Singh */ 27837bc3e5fSBalbir Singh pte_clear(&init_mm, addr, ptep); 27937bc3e5fSBalbir Singh flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 28037bc3e5fSBalbir Singh } 28137bc3e5fSBalbir Singh 282c28c15b6SChristopher M. Riedl static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) 283c28c15b6SChristopher M. Riedl { 284c28c15b6SChristopher M. Riedl int err; 285c28c15b6SChristopher M. Riedl u32 *patch_addr; 286c28c15b6SChristopher M. Riedl unsigned long text_poke_addr; 287c28c15b6SChristopher M. Riedl pte_t *pte; 288c28c15b6SChristopher M. Riedl unsigned long pfn = get_patch_pfn(addr); 289c28c15b6SChristopher M. Riedl struct mm_struct *patching_mm; 290c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm; 291c28c15b6SChristopher M. Riedl 292*2f228ee1SBenjamin Gray patching_mm = __this_cpu_read(cpu_patching_context.mm); 293*2f228ee1SBenjamin Gray pte = __this_cpu_read(cpu_patching_context.pte); 294*2f228ee1SBenjamin Gray text_poke_addr = __this_cpu_read(cpu_patching_context.addr); 295c28c15b6SChristopher M. Riedl patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 296c28c15b6SChristopher M. Riedl 297c28c15b6SChristopher M. Riedl __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 298c28c15b6SChristopher M. Riedl 299c28c15b6SChristopher M. Riedl /* order PTE update before use, also serves as the hwsync */ 300c28c15b6SChristopher M. Riedl asm volatile("ptesync": : :"memory"); 301c28c15b6SChristopher M. Riedl 302c28c15b6SChristopher M. Riedl /* order context switch after arbitrary prior code */ 303c28c15b6SChristopher M. Riedl isync(); 304c28c15b6SChristopher M. Riedl 305c28c15b6SChristopher M. Riedl orig_mm = start_using_temp_mm(patching_mm); 306c28c15b6SChristopher M. Riedl 307c28c15b6SChristopher M. Riedl err = __patch_instruction(addr, instr, patch_addr); 308c28c15b6SChristopher M. Riedl 309c28c15b6SChristopher M. Riedl /* hwsync performed by __patch_instruction (sync) if successful */ 310c28c15b6SChristopher M. Riedl if (err) 311c28c15b6SChristopher M. Riedl mb(); /* sync */ 312c28c15b6SChristopher M. Riedl 313c28c15b6SChristopher M. Riedl /* context synchronisation performed by __patch_instruction (isync or exception) */ 314c28c15b6SChristopher M. Riedl stop_using_temp_mm(patching_mm, orig_mm); 315c28c15b6SChristopher M. Riedl 316c28c15b6SChristopher M. Riedl pte_clear(patching_mm, text_poke_addr, pte); 317c28c15b6SChristopher M. Riedl /* 318c28c15b6SChristopher M. Riedl * ptesync to order PTE update before TLB invalidation done 319c28c15b6SChristopher M. Riedl * by radix__local_flush_tlb_page_psize (in _tlbiel_va) 320c28c15b6SChristopher M. Riedl */ 321c28c15b6SChristopher M. Riedl local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); 322c28c15b6SChristopher M. Riedl 323c28c15b6SChristopher M. Riedl return err; 324c28c15b6SChristopher M. Riedl } 325c28c15b6SChristopher M. Riedl 3266b21af74SChristophe Leroy static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 3276b21af74SChristophe Leroy { 3286b21af74SChristophe Leroy int err; 3296b21af74SChristophe Leroy u32 *patch_addr; 3306b21af74SChristophe Leroy unsigned long text_poke_addr; 3318b4bb0adSChristophe Leroy pte_t *pte; 3328b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 3336b21af74SChristophe Leroy 334*2f228ee1SBenjamin Gray text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; 3356b21af74SChristophe Leroy patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 3366b21af74SChristophe Leroy 337*2f228ee1SBenjamin Gray pte = __this_cpu_read(cpu_patching_context.pte); 3388b4bb0adSChristophe Leroy __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 3398b4bb0adSChristophe Leroy /* See ptesync comment in radix__set_pte_at() */ 3408b4bb0adSChristophe Leroy if (radix_enabled()) 3418b4bb0adSChristophe Leroy asm volatile("ptesync": : :"memory"); 3426b21af74SChristophe Leroy 3436b21af74SChristophe Leroy err = __patch_instruction(addr, instr, patch_addr); 3446b21af74SChristophe Leroy 3458b4bb0adSChristophe Leroy pte_clear(&init_mm, text_poke_addr, pte); 3468b4bb0adSChristophe Leroy flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); 3476b21af74SChristophe Leroy 3486b21af74SChristophe Leroy return err; 3496b21af74SChristophe Leroy } 3506b21af74SChristophe Leroy 351c545b9f0SChristophe Leroy static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 35237bc3e5fSBalbir Singh { 35337bc3e5fSBalbir Singh int err; 35437bc3e5fSBalbir Singh unsigned long flags; 35537bc3e5fSBalbir Singh 35637bc3e5fSBalbir Singh /* 35737bc3e5fSBalbir Singh * During early early boot patch_instruction is called 35837bc3e5fSBalbir Singh * when text_poke_area is not ready, but we still need 35937bc3e5fSBalbir Singh * to allow patching. We just do the plain old patching 36037bc3e5fSBalbir Singh */ 36117512892SChristophe Leroy if (!static_branch_likely(&poking_init_done)) 3628cf4c057SChristophe Leroy return raw_patch_instruction(addr, instr); 36337bc3e5fSBalbir Singh 36437bc3e5fSBalbir Singh local_irq_save(flags); 365c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 366c28c15b6SChristopher M. Riedl err = __do_patch_instruction_mm(addr, instr); 367c28c15b6SChristopher M. Riedl else 3686b21af74SChristophe Leroy err = __do_patch_instruction(addr, instr); 36937bc3e5fSBalbir Singh local_irq_restore(flags); 37037bc3e5fSBalbir Singh 37137bc3e5fSBalbir Singh return err; 37237bc3e5fSBalbir Singh } 37337bc3e5fSBalbir Singh #else /* !CONFIG_STRICT_KERNEL_RWX */ 37437bc3e5fSBalbir Singh 375c545b9f0SChristophe Leroy static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 37637bc3e5fSBalbir Singh { 3778cf4c057SChristophe Leroy return raw_patch_instruction(addr, instr); 37837bc3e5fSBalbir Singh } 37937bc3e5fSBalbir Singh 38037bc3e5fSBalbir Singh #endif /* CONFIG_STRICT_KERNEL_RWX */ 381b45ba4a5SChristophe Leroy 382b0337678SChristophe Leroy __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); 383b0337678SChristophe Leroy 384c545b9f0SChristophe Leroy int patch_instruction(u32 *addr, ppc_inst_t instr) 385b45ba4a5SChristophe Leroy { 386b45ba4a5SChristophe Leroy /* Make sure we aren't patching a freed init section */ 387b0337678SChristophe Leroy if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) 388b45ba4a5SChristophe Leroy return 0; 389edecd2d6SChristophe Leroy 390b45ba4a5SChristophe Leroy return do_patch_instruction(addr, instr); 391b45ba4a5SChristophe Leroy } 39237bc3e5fSBalbir Singh NOKPROBE_SYMBOL(patch_instruction); 39337bc3e5fSBalbir Singh 39469d4d6e5SChristophe Leroy int patch_branch(u32 *addr, unsigned long target, int flags) 395e7a57273SMichael Ellerman { 396c545b9f0SChristophe Leroy ppc_inst_t instr; 3977c95d889SJordan Niethe 398d5937db1SChristophe Leroy if (create_branch(&instr, addr, target, flags)) 399d5937db1SChristophe Leroy return -ERANGE; 400d5937db1SChristophe Leroy 4017c95d889SJordan Niethe return patch_instruction(addr, instr); 402e7a57273SMichael Ellerman } 403e7a57273SMichael Ellerman 40451c9c084SAnju T /* 40551c9c084SAnju T * Helper to check if a given instruction is a conditional branch 40651c9c084SAnju T * Derived from the conditional checks in analyse_instr() 40751c9c084SAnju T */ 408c545b9f0SChristophe Leroy bool is_conditional_branch(ppc_inst_t instr) 40951c9c084SAnju T { 4108094892dSJordan Niethe unsigned int opcode = ppc_inst_primary_opcode(instr); 41151c9c084SAnju T 41251c9c084SAnju T if (opcode == 16) /* bc, bca, bcl, bcla */ 41351c9c084SAnju T return true; 41451c9c084SAnju T if (opcode == 19) { 415777e26f0SJordan Niethe switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 41651c9c084SAnju T case 16: /* bclr, bclrl */ 41751c9c084SAnju T case 528: /* bcctr, bcctrl */ 41851c9c084SAnju T case 560: /* bctar, bctarl */ 41951c9c084SAnju T return true; 42051c9c084SAnju T } 42151c9c084SAnju T } 42251c9c084SAnju T return false; 42351c9c084SAnju T } 42471f6e58eSNaveen N. Rao NOKPROBE_SYMBOL(is_conditional_branch); 42551c9c084SAnju T 426c545b9f0SChristophe Leroy int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 427411781a2SMichael Ellerman unsigned long target, int flags) 428411781a2SMichael Ellerman { 429411781a2SMichael Ellerman long offset; 430411781a2SMichael Ellerman 431411781a2SMichael Ellerman offset = target; 432411781a2SMichael Ellerman if (! (flags & BRANCH_ABSOLUTE)) 433411781a2SMichael Ellerman offset = offset - (unsigned long)addr; 434411781a2SMichael Ellerman 435411781a2SMichael Ellerman /* Check we can represent the target in the instruction format */ 4364549c3eaSNaveen N. Rao if (!is_offset_in_cond_branch_range(offset)) 4377c95d889SJordan Niethe return 1; 438411781a2SMichael Ellerman 439411781a2SMichael Ellerman /* Mask out the flags and target, so they don't step on each other. */ 44094afd069SJordan Niethe *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 441411781a2SMichael Ellerman 4427c95d889SJordan Niethe return 0; 443411781a2SMichael Ellerman } 444411781a2SMichael Ellerman 445c545b9f0SChristophe Leroy int instr_is_relative_branch(ppc_inst_t instr) 446411781a2SMichael Ellerman { 447777e26f0SJordan Niethe if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 448411781a2SMichael Ellerman return 0; 449411781a2SMichael Ellerman 450411781a2SMichael Ellerman return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 451411781a2SMichael Ellerman } 452411781a2SMichael Ellerman 453c545b9f0SChristophe Leroy int instr_is_relative_link_branch(ppc_inst_t instr) 454b9eab08dSJosh Poimboeuf { 455777e26f0SJordan Niethe return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 456b9eab08dSJosh Poimboeuf } 457b9eab08dSJosh Poimboeuf 45869d4d6e5SChristophe Leroy static unsigned long branch_iform_target(const u32 *instr) 459411781a2SMichael Ellerman { 460411781a2SMichael Ellerman signed long imm; 461411781a2SMichael Ellerman 46218c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 463411781a2SMichael Ellerman 464411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 465411781a2SMichael Ellerman if (imm & 0x2000000) 466411781a2SMichael Ellerman imm -= 0x4000000; 467411781a2SMichael Ellerman 46818c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 469411781a2SMichael Ellerman imm += (unsigned long)instr; 470411781a2SMichael Ellerman 471411781a2SMichael Ellerman return (unsigned long)imm; 472411781a2SMichael Ellerman } 473411781a2SMichael Ellerman 47469d4d6e5SChristophe Leroy static unsigned long branch_bform_target(const u32 *instr) 475411781a2SMichael Ellerman { 476411781a2SMichael Ellerman signed long imm; 477411781a2SMichael Ellerman 47818c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 479411781a2SMichael Ellerman 480411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 481411781a2SMichael Ellerman if (imm & 0x8000) 482411781a2SMichael Ellerman imm -= 0x10000; 483411781a2SMichael Ellerman 48418c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 485411781a2SMichael Ellerman imm += (unsigned long)instr; 486411781a2SMichael Ellerman 487411781a2SMichael Ellerman return (unsigned long)imm; 488411781a2SMichael Ellerman } 489411781a2SMichael Ellerman 49069d4d6e5SChristophe Leroy unsigned long branch_target(const u32 *instr) 491411781a2SMichael Ellerman { 492f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(instr))) 493411781a2SMichael Ellerman return branch_iform_target(instr); 494f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(instr))) 495411781a2SMichael Ellerman return branch_bform_target(instr); 496411781a2SMichael Ellerman 497411781a2SMichael Ellerman return 0; 498411781a2SMichael Ellerman } 499411781a2SMichael Ellerman 500c545b9f0SChristophe Leroy int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 501411781a2SMichael Ellerman { 502411781a2SMichael Ellerman unsigned long target; 503411781a2SMichael Ellerman target = branch_target(src); 504411781a2SMichael Ellerman 505f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(src))) 506f8faaffaSJordan Niethe return create_branch(instr, dest, target, 507f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 508f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(src))) 509f8faaffaSJordan Niethe return create_cond_branch(instr, dest, target, 510f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 511411781a2SMichael Ellerman 5127c95d889SJordan Niethe return 1; 513411781a2SMichael Ellerman } 514