12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2aaddd3eaSMichael Ellerman /* 3aaddd3eaSMichael Ellerman * Copyright 2008 Michael Ellerman, IBM Corporation. 4aaddd3eaSMichael Ellerman */ 5aaddd3eaSMichael Ellerman 671f6e58eSNaveen N. Rao #include <linux/kprobes.h> 7c28c15b6SChristopher M. Riedl #include <linux/mmu_context.h> 8c28c15b6SChristopher M. Riedl #include <linux/random.h> 9ae0dc736SMichael Ellerman #include <linux/vmalloc.h> 10ae0dc736SMichael Ellerman #include <linux/init.h> 1137bc3e5fSBalbir Singh #include <linux/cpuhotplug.h> 127c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 13b0337678SChristophe Leroy #include <linux/jump_label.h> 14aaddd3eaSMichael Ellerman 15c28c15b6SChristopher M. Riedl #include <asm/debug.h> 16c28c15b6SChristopher M. Riedl #include <asm/pgalloc.h> 17c28c15b6SChristopher M. Riedl #include <asm/tlb.h> 1837bc3e5fSBalbir Singh #include <asm/tlbflush.h> 1937bc3e5fSBalbir Singh #include <asm/page.h> 2037bc3e5fSBalbir Singh #include <asm/code-patching.h> 2175346251SJordan Niethe #include <asm/inst.h> 22aaddd3eaSMichael Ellerman 23c545b9f0SChristophe Leroy static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 24aaddd3eaSMichael Ellerman { 25e63ceebdSChristophe Leroy if (!ppc_inst_prefixed(instr)) { 26e63ceebdSChristophe Leroy u32 val = ppc_inst_val(instr); 27e63ceebdSChristophe Leroy 28e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u32, failed); 29e63ceebdSChristophe Leroy } else { 30693557ebSChristophe Leroy u64 val = ppc_inst_as_ulong(instr); 31e63ceebdSChristophe Leroy 32e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u64, failed); 33e63ceebdSChristophe Leroy } 3437bc3e5fSBalbir Singh 358cf4c057SChristophe Leroy asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 368cf4c057SChristophe Leroy "r" (exec_addr)); 3737bc3e5fSBalbir Singh 38b6e37968SSteven Rostedt return 0; 39e64ac41aSChristophe Leroy 40e64ac41aSChristophe Leroy failed: 41*74726fdaSChristophe Leroy mb(); /* sync */ 42bbffdd2fSChristophe Leroy return -EPERM; 43aaddd3eaSMichael Ellerman } 44aaddd3eaSMichael Ellerman 45c545b9f0SChristophe Leroy int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 468cf4c057SChristophe Leroy { 478cf4c057SChristophe Leroy return __patch_instruction(addr, instr, addr); 488cf4c057SChristophe Leroy } 498cf4c057SChristophe Leroy 502f228ee1SBenjamin Gray struct patch_context { 512f228ee1SBenjamin Gray union { 522f228ee1SBenjamin Gray struct vm_struct *area; 532f228ee1SBenjamin Gray struct mm_struct *mm; 542f228ee1SBenjamin Gray }; 552f228ee1SBenjamin Gray unsigned long addr; 562f228ee1SBenjamin Gray pte_t *pte; 572f228ee1SBenjamin Gray }; 582f228ee1SBenjamin Gray 592f228ee1SBenjamin Gray static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); 6037bc3e5fSBalbir Singh 61591b4b26SMichael Ellerman static int map_patch_area(void *addr, unsigned long text_poke_addr); 62591b4b26SMichael Ellerman static void unmap_patch_area(unsigned long addr); 63591b4b26SMichael Ellerman 64c28c15b6SChristopher M. Riedl static bool mm_patch_enabled(void) 65c28c15b6SChristopher M. Riedl { 66c28c15b6SChristopher M. Riedl return IS_ENABLED(CONFIG_SMP) && radix_enabled(); 67c28c15b6SChristopher M. Riedl } 68c28c15b6SChristopher M. Riedl 69c28c15b6SChristopher M. Riedl /* 70c28c15b6SChristopher M. Riedl * The following applies for Radix MMU. Hash MMU has different requirements, 71c28c15b6SChristopher M. Riedl * and so is not supported. 72c28c15b6SChristopher M. Riedl * 73c28c15b6SChristopher M. Riedl * Changing mm requires context synchronising instructions on both sides of 74c28c15b6SChristopher M. Riedl * the context switch, as well as a hwsync between the last instruction for 75c28c15b6SChristopher M. Riedl * which the address of an associated storage access was translated using 76c28c15b6SChristopher M. Riedl * the current context. 77c28c15b6SChristopher M. Riedl * 78c28c15b6SChristopher M. Riedl * switch_mm_irqs_off() performs an isync after the context switch. It is 79c28c15b6SChristopher M. Riedl * the responsibility of the caller to perform the CSI and hwsync before 80c28c15b6SChristopher M. Riedl * starting/stopping the temp mm. 81c28c15b6SChristopher M. Riedl */ 82c28c15b6SChristopher M. Riedl static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) 83c28c15b6SChristopher M. Riedl { 84c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm = current->active_mm; 85c28c15b6SChristopher M. Riedl 86c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 87c28c15b6SChristopher M. Riedl switch_mm_irqs_off(orig_mm, temp_mm, current); 88c28c15b6SChristopher M. Riedl 89c28c15b6SChristopher M. Riedl WARN_ON(!mm_is_thread_local(temp_mm)); 90c28c15b6SChristopher M. Riedl 91c28c15b6SChristopher M. Riedl suspend_breakpoints(); 92c28c15b6SChristopher M. Riedl return orig_mm; 93c28c15b6SChristopher M. Riedl } 94c28c15b6SChristopher M. Riedl 95c28c15b6SChristopher M. Riedl static void stop_using_temp_mm(struct mm_struct *temp_mm, 96c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm) 97c28c15b6SChristopher M. Riedl { 98c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 99c28c15b6SChristopher M. Riedl switch_mm_irqs_off(temp_mm, orig_mm, current); 100c28c15b6SChristopher M. Riedl restore_breakpoints(); 101c28c15b6SChristopher M. Riedl } 102c28c15b6SChristopher M. Riedl 10337bc3e5fSBalbir Singh static int text_area_cpu_up(unsigned int cpu) 10437bc3e5fSBalbir Singh { 10537bc3e5fSBalbir Singh struct vm_struct *area; 106591b4b26SMichael Ellerman unsigned long addr; 107591b4b26SMichael Ellerman int err; 10837bc3e5fSBalbir Singh 10937bc3e5fSBalbir Singh area = get_vm_area(PAGE_SIZE, VM_ALLOC); 11037bc3e5fSBalbir Singh if (!area) { 11137bc3e5fSBalbir Singh WARN_ONCE(1, "Failed to create text area for cpu %d\n", 11237bc3e5fSBalbir Singh cpu); 11337bc3e5fSBalbir Singh return -1; 11437bc3e5fSBalbir Singh } 115591b4b26SMichael Ellerman 116591b4b26SMichael Ellerman // Map/unmap the area to ensure all page tables are pre-allocated 117591b4b26SMichael Ellerman addr = (unsigned long)area->addr; 118591b4b26SMichael Ellerman err = map_patch_area(empty_zero_page, addr); 119591b4b26SMichael Ellerman if (err) 120591b4b26SMichael Ellerman return err; 121591b4b26SMichael Ellerman 122591b4b26SMichael Ellerman unmap_patch_area(addr); 123591b4b26SMichael Ellerman 1242f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, area); 1252f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 1262f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); 12737bc3e5fSBalbir Singh 12837bc3e5fSBalbir Singh return 0; 12937bc3e5fSBalbir Singh } 13037bc3e5fSBalbir Singh 13137bc3e5fSBalbir Singh static int text_area_cpu_down(unsigned int cpu) 13237bc3e5fSBalbir Singh { 1332f228ee1SBenjamin Gray free_vm_area(this_cpu_read(cpu_patching_context.area)); 1342f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, NULL); 1352f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 1362f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, NULL); 13737bc3e5fSBalbir Singh return 0; 13837bc3e5fSBalbir Singh } 13937bc3e5fSBalbir Singh 140c28c15b6SChristopher M. Riedl static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) 141c28c15b6SChristopher M. Riedl { 142c28c15b6SChristopher M. Riedl struct mmu_gather tlb; 143c28c15b6SChristopher M. Riedl 144c28c15b6SChristopher M. Riedl tlb_gather_mmu(&tlb, mm); 145c28c15b6SChristopher M. Riedl free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); 146c28c15b6SChristopher M. Riedl mmput(mm); 147c28c15b6SChristopher M. Riedl } 148c28c15b6SChristopher M. Riedl 149c28c15b6SChristopher M. Riedl static int text_area_cpu_up_mm(unsigned int cpu) 150c28c15b6SChristopher M. Riedl { 151c28c15b6SChristopher M. Riedl struct mm_struct *mm; 152c28c15b6SChristopher M. Riedl unsigned long addr; 153c28c15b6SChristopher M. Riedl pte_t *pte; 154c28c15b6SChristopher M. Riedl spinlock_t *ptl; 155c28c15b6SChristopher M. Riedl 156c28c15b6SChristopher M. Riedl mm = mm_alloc(); 157c28c15b6SChristopher M. Riedl if (WARN_ON(!mm)) 158c28c15b6SChristopher M. Riedl goto fail_no_mm; 159c28c15b6SChristopher M. Riedl 160c28c15b6SChristopher M. Riedl /* 161c28c15b6SChristopher M. Riedl * Choose a random page-aligned address from the interval 162c28c15b6SChristopher M. Riedl * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. 163c28c15b6SChristopher M. Riedl * The lower address bound is PAGE_SIZE to avoid the zero-page. 164c28c15b6SChristopher M. Riedl */ 165c28c15b6SChristopher M. Riedl addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; 166c28c15b6SChristopher M. Riedl 167c28c15b6SChristopher M. Riedl /* 168c28c15b6SChristopher M. Riedl * PTE allocation uses GFP_KERNEL which means we need to 169c28c15b6SChristopher M. Riedl * pre-allocate the PTE here because we cannot do the 170c28c15b6SChristopher M. Riedl * allocation during patching when IRQs are disabled. 171c28c15b6SChristopher M. Riedl * 172c28c15b6SChristopher M. Riedl * Using get_locked_pte() to avoid open coding, the lock 173c28c15b6SChristopher M. Riedl * is unnecessary. 174c28c15b6SChristopher M. Riedl */ 175c28c15b6SChristopher M. Riedl pte = get_locked_pte(mm, addr, &ptl); 176c28c15b6SChristopher M. Riedl if (!pte) 177c28c15b6SChristopher M. Riedl goto fail_no_pte; 178c28c15b6SChristopher M. Riedl pte_unmap_unlock(pte, ptl); 179c28c15b6SChristopher M. Riedl 1802f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, mm); 1812f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 182c28c15b6SChristopher M. Riedl 183c28c15b6SChristopher M. Riedl return 0; 184c28c15b6SChristopher M. Riedl 185c28c15b6SChristopher M. Riedl fail_no_pte: 186c28c15b6SChristopher M. Riedl put_patching_mm(mm, addr); 187c28c15b6SChristopher M. Riedl fail_no_mm: 188c28c15b6SChristopher M. Riedl return -ENOMEM; 189c28c15b6SChristopher M. Riedl } 190c28c15b6SChristopher M. Riedl 191c28c15b6SChristopher M. Riedl static int text_area_cpu_down_mm(unsigned int cpu) 192c28c15b6SChristopher M. Riedl { 1932f228ee1SBenjamin Gray put_patching_mm(this_cpu_read(cpu_patching_context.mm), 1942f228ee1SBenjamin Gray this_cpu_read(cpu_patching_context.addr)); 195c28c15b6SChristopher M. Riedl 1962f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, NULL); 1972f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 198c28c15b6SChristopher M. Riedl 199c28c15b6SChristopher M. Riedl return 0; 200c28c15b6SChristopher M. Riedl } 201c28c15b6SChristopher M. Riedl 20217512892SChristophe Leroy static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 20317512892SChristophe Leroy 20471a5b3dbSJordan Niethe void __init poking_init(void) 20537bc3e5fSBalbir Singh { 206c28c15b6SChristopher M. Riedl int ret; 207c28c15b6SChristopher M. Riedl 20884ecfe6fSChristophe Leroy if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 20984ecfe6fSChristophe Leroy return; 21084ecfe6fSChristophe Leroy 211c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 212c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 213c28c15b6SChristopher M. Riedl "powerpc/text_poke_mm:online", 214c28c15b6SChristopher M. Riedl text_area_cpu_up_mm, 215c28c15b6SChristopher M. Riedl text_area_cpu_down_mm); 216c28c15b6SChristopher M. Riedl else 217c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 218071c95c1SBenjamin Gray "powerpc/text_poke:online", 219071c95c1SBenjamin Gray text_area_cpu_up, 220071c95c1SBenjamin Gray text_area_cpu_down); 221071c95c1SBenjamin Gray 222071c95c1SBenjamin Gray /* cpuhp_setup_state returns >= 0 on success */ 223071c95c1SBenjamin Gray if (WARN_ON(ret < 0)) 224071c95c1SBenjamin Gray return; 225071c95c1SBenjamin Gray 22617512892SChristophe Leroy static_branch_enable(&poking_init_done); 22737bc3e5fSBalbir Singh } 22837bc3e5fSBalbir Singh 2298b4bb0adSChristophe Leroy static unsigned long get_patch_pfn(void *addr) 2308b4bb0adSChristophe Leroy { 2318b4bb0adSChristophe Leroy if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 2328b4bb0adSChristophe Leroy return vmalloc_to_pfn(addr); 2338b4bb0adSChristophe Leroy else 2348b4bb0adSChristophe Leroy return __pa_symbol(addr) >> PAGE_SHIFT; 2358b4bb0adSChristophe Leroy } 2368b4bb0adSChristophe Leroy 23737bc3e5fSBalbir Singh /* 23837bc3e5fSBalbir Singh * This can be called for kernel text or a module. 23937bc3e5fSBalbir Singh */ 24037bc3e5fSBalbir Singh static int map_patch_area(void *addr, unsigned long text_poke_addr) 24137bc3e5fSBalbir Singh { 2428b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 24337bc3e5fSBalbir Singh 244285672f9SChristophe Leroy return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 24537bc3e5fSBalbir Singh } 24637bc3e5fSBalbir Singh 247a3483c3dSChristophe Leroy static void unmap_patch_area(unsigned long addr) 24837bc3e5fSBalbir Singh { 24937bc3e5fSBalbir Singh pte_t *ptep; 25037bc3e5fSBalbir Singh pmd_t *pmdp; 25137bc3e5fSBalbir Singh pud_t *pudp; 2522fb47060SMike Rapoport p4d_t *p4dp; 25337bc3e5fSBalbir Singh pgd_t *pgdp; 25437bc3e5fSBalbir Singh 25537bc3e5fSBalbir Singh pgdp = pgd_offset_k(addr); 256a3483c3dSChristophe Leroy if (WARN_ON(pgd_none(*pgdp))) 257a3483c3dSChristophe Leroy return; 25837bc3e5fSBalbir Singh 2592fb47060SMike Rapoport p4dp = p4d_offset(pgdp, addr); 260a3483c3dSChristophe Leroy if (WARN_ON(p4d_none(*p4dp))) 261a3483c3dSChristophe Leroy return; 2622fb47060SMike Rapoport 2632fb47060SMike Rapoport pudp = pud_offset(p4dp, addr); 264a3483c3dSChristophe Leroy if (WARN_ON(pud_none(*pudp))) 265a3483c3dSChristophe Leroy return; 26637bc3e5fSBalbir Singh 26737bc3e5fSBalbir Singh pmdp = pmd_offset(pudp, addr); 268a3483c3dSChristophe Leroy if (WARN_ON(pmd_none(*pmdp))) 269a3483c3dSChristophe Leroy return; 27037bc3e5fSBalbir Singh 27137bc3e5fSBalbir Singh ptep = pte_offset_kernel(pmdp, addr); 272a3483c3dSChristophe Leroy if (WARN_ON(pte_none(*ptep))) 273a3483c3dSChristophe Leroy return; 27437bc3e5fSBalbir Singh 27537bc3e5fSBalbir Singh /* 27637bc3e5fSBalbir Singh * In hash, pte_clear flushes the tlb, in radix, we have to 27737bc3e5fSBalbir Singh */ 27837bc3e5fSBalbir Singh pte_clear(&init_mm, addr, ptep); 27937bc3e5fSBalbir Singh flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 28037bc3e5fSBalbir Singh } 28137bc3e5fSBalbir Singh 282c28c15b6SChristopher M. Riedl static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) 283c28c15b6SChristopher M. Riedl { 284c28c15b6SChristopher M. Riedl int err; 285c28c15b6SChristopher M. Riedl u32 *patch_addr; 286c28c15b6SChristopher M. Riedl unsigned long text_poke_addr; 287c28c15b6SChristopher M. Riedl pte_t *pte; 288c28c15b6SChristopher M. Riedl unsigned long pfn = get_patch_pfn(addr); 289c28c15b6SChristopher M. Riedl struct mm_struct *patching_mm; 290c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm; 291980411a4SMichael Ellerman spinlock_t *ptl; 292c28c15b6SChristopher M. Riedl 2932f228ee1SBenjamin Gray patching_mm = __this_cpu_read(cpu_patching_context.mm); 2942f228ee1SBenjamin Gray text_poke_addr = __this_cpu_read(cpu_patching_context.addr); 295c28c15b6SChristopher M. Riedl patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 296c28c15b6SChristopher M. Riedl 297980411a4SMichael Ellerman pte = get_locked_pte(patching_mm, text_poke_addr, &ptl); 298980411a4SMichael Ellerman if (!pte) 299980411a4SMichael Ellerman return -ENOMEM; 300980411a4SMichael Ellerman 301c28c15b6SChristopher M. Riedl __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 302c28c15b6SChristopher M. Riedl 303c28c15b6SChristopher M. Riedl /* order PTE update before use, also serves as the hwsync */ 304c28c15b6SChristopher M. Riedl asm volatile("ptesync": : :"memory"); 305c28c15b6SChristopher M. Riedl 306c28c15b6SChristopher M. Riedl /* order context switch after arbitrary prior code */ 307c28c15b6SChristopher M. Riedl isync(); 308c28c15b6SChristopher M. Riedl 309c28c15b6SChristopher M. Riedl orig_mm = start_using_temp_mm(patching_mm); 310c28c15b6SChristopher M. Riedl 311c28c15b6SChristopher M. Riedl err = __patch_instruction(addr, instr, patch_addr); 312c28c15b6SChristopher M. Riedl 313c28c15b6SChristopher M. Riedl /* context synchronisation performed by __patch_instruction (isync or exception) */ 314c28c15b6SChristopher M. Riedl stop_using_temp_mm(patching_mm, orig_mm); 315c28c15b6SChristopher M. Riedl 316c28c15b6SChristopher M. Riedl pte_clear(patching_mm, text_poke_addr, pte); 317c28c15b6SChristopher M. Riedl /* 318c28c15b6SChristopher M. Riedl * ptesync to order PTE update before TLB invalidation done 319c28c15b6SChristopher M. Riedl * by radix__local_flush_tlb_page_psize (in _tlbiel_va) 320c28c15b6SChristopher M. Riedl */ 321c28c15b6SChristopher M. Riedl local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); 322c28c15b6SChristopher M. Riedl 323980411a4SMichael Ellerman pte_unmap_unlock(pte, ptl); 324980411a4SMichael Ellerman 325c28c15b6SChristopher M. Riedl return err; 326c28c15b6SChristopher M. Riedl } 327c28c15b6SChristopher M. Riedl 3286b21af74SChristophe Leroy static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 3296b21af74SChristophe Leroy { 3306b21af74SChristophe Leroy int err; 3316b21af74SChristophe Leroy u32 *patch_addr; 3326b21af74SChristophe Leroy unsigned long text_poke_addr; 3338b4bb0adSChristophe Leroy pte_t *pte; 3348b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 3356b21af74SChristophe Leroy 3362f228ee1SBenjamin Gray text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; 3376b21af74SChristophe Leroy patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 3386b21af74SChristophe Leroy 3392f228ee1SBenjamin Gray pte = __this_cpu_read(cpu_patching_context.pte); 3408b4bb0adSChristophe Leroy __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 3418b4bb0adSChristophe Leroy /* See ptesync comment in radix__set_pte_at() */ 3428b4bb0adSChristophe Leroy if (radix_enabled()) 3438b4bb0adSChristophe Leroy asm volatile("ptesync": : :"memory"); 3446b21af74SChristophe Leroy 3456b21af74SChristophe Leroy err = __patch_instruction(addr, instr, patch_addr); 3466b21af74SChristophe Leroy 3478b4bb0adSChristophe Leroy pte_clear(&init_mm, text_poke_addr, pte); 3488b4bb0adSChristophe Leroy flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); 3496b21af74SChristophe Leroy 3506b21af74SChristophe Leroy return err; 3516b21af74SChristophe Leroy } 3526b21af74SChristophe Leroy 3536f3a81b6SChristophe Leroy int patch_instruction(u32 *addr, ppc_inst_t instr) 35437bc3e5fSBalbir Singh { 35537bc3e5fSBalbir Singh int err; 35637bc3e5fSBalbir Singh unsigned long flags; 35737bc3e5fSBalbir Singh 35837bc3e5fSBalbir Singh /* 35937bc3e5fSBalbir Singh * During early early boot patch_instruction is called 36037bc3e5fSBalbir Singh * when text_poke_area is not ready, but we still need 36137bc3e5fSBalbir Singh * to allow patching. We just do the plain old patching 36237bc3e5fSBalbir Singh */ 36384ecfe6fSChristophe Leroy if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || 36484ecfe6fSChristophe Leroy !static_branch_likely(&poking_init_done)) 3658cf4c057SChristophe Leroy return raw_patch_instruction(addr, instr); 36637bc3e5fSBalbir Singh 36737bc3e5fSBalbir Singh local_irq_save(flags); 368c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 369c28c15b6SChristopher M. Riedl err = __do_patch_instruction_mm(addr, instr); 370c28c15b6SChristopher M. Riedl else 3716b21af74SChristophe Leroy err = __do_patch_instruction(addr, instr); 37237bc3e5fSBalbir Singh local_irq_restore(flags); 37337bc3e5fSBalbir Singh 37437bc3e5fSBalbir Singh return err; 37537bc3e5fSBalbir Singh } 37637bc3e5fSBalbir Singh NOKPROBE_SYMBOL(patch_instruction); 37737bc3e5fSBalbir Singh 37869d4d6e5SChristophe Leroy int patch_branch(u32 *addr, unsigned long target, int flags) 379e7a57273SMichael Ellerman { 380c545b9f0SChristophe Leroy ppc_inst_t instr; 3817c95d889SJordan Niethe 382d5937db1SChristophe Leroy if (create_branch(&instr, addr, target, flags)) 383d5937db1SChristophe Leroy return -ERANGE; 384d5937db1SChristophe Leroy 3857c95d889SJordan Niethe return patch_instruction(addr, instr); 386e7a57273SMichael Ellerman } 387e7a57273SMichael Ellerman 38851c9c084SAnju T /* 38951c9c084SAnju T * Helper to check if a given instruction is a conditional branch 39051c9c084SAnju T * Derived from the conditional checks in analyse_instr() 39151c9c084SAnju T */ 392c545b9f0SChristophe Leroy bool is_conditional_branch(ppc_inst_t instr) 39351c9c084SAnju T { 3948094892dSJordan Niethe unsigned int opcode = ppc_inst_primary_opcode(instr); 39551c9c084SAnju T 39651c9c084SAnju T if (opcode == 16) /* bc, bca, bcl, bcla */ 39751c9c084SAnju T return true; 39851c9c084SAnju T if (opcode == 19) { 399777e26f0SJordan Niethe switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 40051c9c084SAnju T case 16: /* bclr, bclrl */ 40151c9c084SAnju T case 528: /* bcctr, bcctrl */ 40251c9c084SAnju T case 560: /* bctar, bctarl */ 40351c9c084SAnju T return true; 40451c9c084SAnju T } 40551c9c084SAnju T } 40651c9c084SAnju T return false; 40751c9c084SAnju T } 40871f6e58eSNaveen N. Rao NOKPROBE_SYMBOL(is_conditional_branch); 40951c9c084SAnju T 410c545b9f0SChristophe Leroy int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 411411781a2SMichael Ellerman unsigned long target, int flags) 412411781a2SMichael Ellerman { 413411781a2SMichael Ellerman long offset; 414411781a2SMichael Ellerman 415411781a2SMichael Ellerman offset = target; 416411781a2SMichael Ellerman if (! (flags & BRANCH_ABSOLUTE)) 417411781a2SMichael Ellerman offset = offset - (unsigned long)addr; 418411781a2SMichael Ellerman 419411781a2SMichael Ellerman /* Check we can represent the target in the instruction format */ 4204549c3eaSNaveen N. Rao if (!is_offset_in_cond_branch_range(offset)) 4217c95d889SJordan Niethe return 1; 422411781a2SMichael Ellerman 423411781a2SMichael Ellerman /* Mask out the flags and target, so they don't step on each other. */ 42494afd069SJordan Niethe *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 425411781a2SMichael Ellerman 4267c95d889SJordan Niethe return 0; 427411781a2SMichael Ellerman } 428411781a2SMichael Ellerman 429c545b9f0SChristophe Leroy int instr_is_relative_branch(ppc_inst_t instr) 430411781a2SMichael Ellerman { 431777e26f0SJordan Niethe if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 432411781a2SMichael Ellerman return 0; 433411781a2SMichael Ellerman 434411781a2SMichael Ellerman return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 435411781a2SMichael Ellerman } 436411781a2SMichael Ellerman 437c545b9f0SChristophe Leroy int instr_is_relative_link_branch(ppc_inst_t instr) 438b9eab08dSJosh Poimboeuf { 439777e26f0SJordan Niethe return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 440b9eab08dSJosh Poimboeuf } 441b9eab08dSJosh Poimboeuf 44269d4d6e5SChristophe Leroy static unsigned long branch_iform_target(const u32 *instr) 443411781a2SMichael Ellerman { 444411781a2SMichael Ellerman signed long imm; 445411781a2SMichael Ellerman 44618c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 447411781a2SMichael Ellerman 448411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 449411781a2SMichael Ellerman if (imm & 0x2000000) 450411781a2SMichael Ellerman imm -= 0x4000000; 451411781a2SMichael Ellerman 45218c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 453411781a2SMichael Ellerman imm += (unsigned long)instr; 454411781a2SMichael Ellerman 455411781a2SMichael Ellerman return (unsigned long)imm; 456411781a2SMichael Ellerman } 457411781a2SMichael Ellerman 45869d4d6e5SChristophe Leroy static unsigned long branch_bform_target(const u32 *instr) 459411781a2SMichael Ellerman { 460411781a2SMichael Ellerman signed long imm; 461411781a2SMichael Ellerman 46218c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 463411781a2SMichael Ellerman 464411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 465411781a2SMichael Ellerman if (imm & 0x8000) 466411781a2SMichael Ellerman imm -= 0x10000; 467411781a2SMichael Ellerman 46818c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 469411781a2SMichael Ellerman imm += (unsigned long)instr; 470411781a2SMichael Ellerman 471411781a2SMichael Ellerman return (unsigned long)imm; 472411781a2SMichael Ellerman } 473411781a2SMichael Ellerman 47469d4d6e5SChristophe Leroy unsigned long branch_target(const u32 *instr) 475411781a2SMichael Ellerman { 476f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(instr))) 477411781a2SMichael Ellerman return branch_iform_target(instr); 478f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(instr))) 479411781a2SMichael Ellerman return branch_bform_target(instr); 480411781a2SMichael Ellerman 481411781a2SMichael Ellerman return 0; 482411781a2SMichael Ellerman } 483411781a2SMichael Ellerman 484c545b9f0SChristophe Leroy int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 485411781a2SMichael Ellerman { 486411781a2SMichael Ellerman unsigned long target; 487411781a2SMichael Ellerman target = branch_target(src); 488411781a2SMichael Ellerman 489f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(src))) 490f8faaffaSJordan Niethe return create_branch(instr, dest, target, 491f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 492f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(src))) 493f8faaffaSJordan Niethe return create_cond_branch(instr, dest, target, 494f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 495411781a2SMichael Ellerman 4967c95d889SJordan Niethe return 1; 497411781a2SMichael Ellerman } 498