12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later 2aaddd3eaSMichael Ellerman /* 3aaddd3eaSMichael Ellerman * Copyright 2008 Michael Ellerman, IBM Corporation. 4aaddd3eaSMichael Ellerman */ 5aaddd3eaSMichael Ellerman 671f6e58eSNaveen N. Rao #include <linux/kprobes.h> 7c28c15b6SChristopher M. Riedl #include <linux/mmu_context.h> 8c28c15b6SChristopher M. Riedl #include <linux/random.h> 9ae0dc736SMichael Ellerman #include <linux/vmalloc.h> 10ae0dc736SMichael Ellerman #include <linux/init.h> 1137bc3e5fSBalbir Singh #include <linux/cpuhotplug.h> 127c0f6ba6SLinus Torvalds #include <linux/uaccess.h> 13b0337678SChristophe Leroy #include <linux/jump_label.h> 14aaddd3eaSMichael Ellerman 15c28c15b6SChristopher M. Riedl #include <asm/debug.h> 16c28c15b6SChristopher M. Riedl #include <asm/pgalloc.h> 17c28c15b6SChristopher M. Riedl #include <asm/tlb.h> 1837bc3e5fSBalbir Singh #include <asm/tlbflush.h> 1937bc3e5fSBalbir Singh #include <asm/page.h> 2037bc3e5fSBalbir Singh #include <asm/code-patching.h> 2175346251SJordan Niethe #include <asm/inst.h> 22aaddd3eaSMichael Ellerman 23c545b9f0SChristophe Leroy static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 24aaddd3eaSMichael Ellerman { 25e63ceebdSChristophe Leroy if (!ppc_inst_prefixed(instr)) { 26e63ceebdSChristophe Leroy u32 val = ppc_inst_val(instr); 27e63ceebdSChristophe Leroy 28e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u32, failed); 29e63ceebdSChristophe Leroy } else { 30693557ebSChristophe Leroy u64 val = ppc_inst_as_ulong(instr); 31e63ceebdSChristophe Leroy 32e63ceebdSChristophe Leroy __put_kernel_nofault(patch_addr, &val, u64, failed); 33e63ceebdSChristophe Leroy } 3437bc3e5fSBalbir Singh 358cf4c057SChristophe Leroy asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 368cf4c057SChristophe Leroy "r" (exec_addr)); 3737bc3e5fSBalbir Singh 38b6e37968SSteven Rostedt return 0; 39e64ac41aSChristophe Leroy 40e64ac41aSChristophe Leroy failed: 41bbffdd2fSChristophe Leroy return -EPERM; 42aaddd3eaSMichael Ellerman } 43aaddd3eaSMichael Ellerman 44c545b9f0SChristophe Leroy int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 458cf4c057SChristophe Leroy { 468cf4c057SChristophe Leroy return __patch_instruction(addr, instr, addr); 478cf4c057SChristophe Leroy } 488cf4c057SChristophe Leroy 492f228ee1SBenjamin Gray struct patch_context { 502f228ee1SBenjamin Gray union { 512f228ee1SBenjamin Gray struct vm_struct *area; 522f228ee1SBenjamin Gray struct mm_struct *mm; 532f228ee1SBenjamin Gray }; 542f228ee1SBenjamin Gray unsigned long addr; 552f228ee1SBenjamin Gray pte_t *pte; 562f228ee1SBenjamin Gray }; 572f228ee1SBenjamin Gray 582f228ee1SBenjamin Gray static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); 5937bc3e5fSBalbir Singh 60591b4b26SMichael Ellerman static int map_patch_area(void *addr, unsigned long text_poke_addr); 61591b4b26SMichael Ellerman static void unmap_patch_area(unsigned long addr); 62591b4b26SMichael Ellerman 63c28c15b6SChristopher M. Riedl static bool mm_patch_enabled(void) 64c28c15b6SChristopher M. Riedl { 65c28c15b6SChristopher M. Riedl return IS_ENABLED(CONFIG_SMP) && radix_enabled(); 66c28c15b6SChristopher M. Riedl } 67c28c15b6SChristopher M. Riedl 68c28c15b6SChristopher M. Riedl /* 69c28c15b6SChristopher M. Riedl * The following applies for Radix MMU. Hash MMU has different requirements, 70c28c15b6SChristopher M. Riedl * and so is not supported. 71c28c15b6SChristopher M. Riedl * 72c28c15b6SChristopher M. Riedl * Changing mm requires context synchronising instructions on both sides of 73c28c15b6SChristopher M. Riedl * the context switch, as well as a hwsync between the last instruction for 74c28c15b6SChristopher M. Riedl * which the address of an associated storage access was translated using 75c28c15b6SChristopher M. Riedl * the current context. 76c28c15b6SChristopher M. Riedl * 77c28c15b6SChristopher M. Riedl * switch_mm_irqs_off() performs an isync after the context switch. It is 78c28c15b6SChristopher M. Riedl * the responsibility of the caller to perform the CSI and hwsync before 79c28c15b6SChristopher M. Riedl * starting/stopping the temp mm. 80c28c15b6SChristopher M. Riedl */ 81c28c15b6SChristopher M. Riedl static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) 82c28c15b6SChristopher M. Riedl { 83c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm = current->active_mm; 84c28c15b6SChristopher M. Riedl 85c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 86c28c15b6SChristopher M. Riedl switch_mm_irqs_off(orig_mm, temp_mm, current); 87c28c15b6SChristopher M. Riedl 88c28c15b6SChristopher M. Riedl WARN_ON(!mm_is_thread_local(temp_mm)); 89c28c15b6SChristopher M. Riedl 90c28c15b6SChristopher M. Riedl suspend_breakpoints(); 91c28c15b6SChristopher M. Riedl return orig_mm; 92c28c15b6SChristopher M. Riedl } 93c28c15b6SChristopher M. Riedl 94c28c15b6SChristopher M. Riedl static void stop_using_temp_mm(struct mm_struct *temp_mm, 95c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm) 96c28c15b6SChristopher M. Riedl { 97c28c15b6SChristopher M. Riedl lockdep_assert_irqs_disabled(); 98c28c15b6SChristopher M. Riedl switch_mm_irqs_off(temp_mm, orig_mm, current); 99c28c15b6SChristopher M. Riedl restore_breakpoints(); 100c28c15b6SChristopher M. Riedl } 101c28c15b6SChristopher M. Riedl 10237bc3e5fSBalbir Singh static int text_area_cpu_up(unsigned int cpu) 10337bc3e5fSBalbir Singh { 10437bc3e5fSBalbir Singh struct vm_struct *area; 105591b4b26SMichael Ellerman unsigned long addr; 106591b4b26SMichael Ellerman int err; 10737bc3e5fSBalbir Singh 10837bc3e5fSBalbir Singh area = get_vm_area(PAGE_SIZE, VM_ALLOC); 10937bc3e5fSBalbir Singh if (!area) { 11037bc3e5fSBalbir Singh WARN_ONCE(1, "Failed to create text area for cpu %d\n", 11137bc3e5fSBalbir Singh cpu); 11237bc3e5fSBalbir Singh return -1; 11337bc3e5fSBalbir Singh } 114591b4b26SMichael Ellerman 115591b4b26SMichael Ellerman // Map/unmap the area to ensure all page tables are pre-allocated 116591b4b26SMichael Ellerman addr = (unsigned long)area->addr; 117591b4b26SMichael Ellerman err = map_patch_area(empty_zero_page, addr); 118591b4b26SMichael Ellerman if (err) 119591b4b26SMichael Ellerman return err; 120591b4b26SMichael Ellerman 121591b4b26SMichael Ellerman unmap_patch_area(addr); 122591b4b26SMichael Ellerman 1232f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, area); 1242f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 1252f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); 12637bc3e5fSBalbir Singh 12737bc3e5fSBalbir Singh return 0; 12837bc3e5fSBalbir Singh } 12937bc3e5fSBalbir Singh 13037bc3e5fSBalbir Singh static int text_area_cpu_down(unsigned int cpu) 13137bc3e5fSBalbir Singh { 1322f228ee1SBenjamin Gray free_vm_area(this_cpu_read(cpu_patching_context.area)); 1332f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.area, NULL); 1342f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 1352f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, NULL); 13637bc3e5fSBalbir Singh return 0; 13737bc3e5fSBalbir Singh } 13837bc3e5fSBalbir Singh 139c28c15b6SChristopher M. Riedl static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) 140c28c15b6SChristopher M. Riedl { 141c28c15b6SChristopher M. Riedl struct mmu_gather tlb; 142c28c15b6SChristopher M. Riedl 143c28c15b6SChristopher M. Riedl tlb_gather_mmu(&tlb, mm); 144c28c15b6SChristopher M. Riedl free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); 145c28c15b6SChristopher M. Riedl mmput(mm); 146c28c15b6SChristopher M. Riedl } 147c28c15b6SChristopher M. Riedl 148c28c15b6SChristopher M. Riedl static int text_area_cpu_up_mm(unsigned int cpu) 149c28c15b6SChristopher M. Riedl { 150c28c15b6SChristopher M. Riedl struct mm_struct *mm; 151c28c15b6SChristopher M. Riedl unsigned long addr; 152c28c15b6SChristopher M. Riedl pte_t *pte; 153c28c15b6SChristopher M. Riedl spinlock_t *ptl; 154c28c15b6SChristopher M. Riedl 155c28c15b6SChristopher M. Riedl mm = mm_alloc(); 156c28c15b6SChristopher M. Riedl if (WARN_ON(!mm)) 157c28c15b6SChristopher M. Riedl goto fail_no_mm; 158c28c15b6SChristopher M. Riedl 159c28c15b6SChristopher M. Riedl /* 160c28c15b6SChristopher M. Riedl * Choose a random page-aligned address from the interval 161c28c15b6SChristopher M. Riedl * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. 162c28c15b6SChristopher M. Riedl * The lower address bound is PAGE_SIZE to avoid the zero-page. 163c28c15b6SChristopher M. Riedl */ 164c28c15b6SChristopher M. Riedl addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; 165c28c15b6SChristopher M. Riedl 166c28c15b6SChristopher M. Riedl /* 167c28c15b6SChristopher M. Riedl * PTE allocation uses GFP_KERNEL which means we need to 168c28c15b6SChristopher M. Riedl * pre-allocate the PTE here because we cannot do the 169c28c15b6SChristopher M. Riedl * allocation during patching when IRQs are disabled. 170c28c15b6SChristopher M. Riedl * 171c28c15b6SChristopher M. Riedl * Using get_locked_pte() to avoid open coding, the lock 172c28c15b6SChristopher M. Riedl * is unnecessary. 173c28c15b6SChristopher M. Riedl */ 174c28c15b6SChristopher M. Riedl pte = get_locked_pte(mm, addr, &ptl); 175c28c15b6SChristopher M. Riedl if (!pte) 176c28c15b6SChristopher M. Riedl goto fail_no_pte; 177c28c15b6SChristopher M. Riedl pte_unmap_unlock(pte, ptl); 178c28c15b6SChristopher M. Riedl 1792f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, mm); 1802f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, addr); 1812f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, pte); 182c28c15b6SChristopher M. Riedl 183c28c15b6SChristopher M. Riedl return 0; 184c28c15b6SChristopher M. Riedl 185c28c15b6SChristopher M. Riedl fail_no_pte: 186c28c15b6SChristopher M. Riedl put_patching_mm(mm, addr); 187c28c15b6SChristopher M. Riedl fail_no_mm: 188c28c15b6SChristopher M. Riedl return -ENOMEM; 189c28c15b6SChristopher M. Riedl } 190c28c15b6SChristopher M. Riedl 191c28c15b6SChristopher M. Riedl static int text_area_cpu_down_mm(unsigned int cpu) 192c28c15b6SChristopher M. Riedl { 1932f228ee1SBenjamin Gray put_patching_mm(this_cpu_read(cpu_patching_context.mm), 1942f228ee1SBenjamin Gray this_cpu_read(cpu_patching_context.addr)); 195c28c15b6SChristopher M. Riedl 1962f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.mm, NULL); 1972f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.addr, 0); 1982f228ee1SBenjamin Gray this_cpu_write(cpu_patching_context.pte, NULL); 199c28c15b6SChristopher M. Riedl 200c28c15b6SChristopher M. Riedl return 0; 201c28c15b6SChristopher M. Riedl } 202c28c15b6SChristopher M. Riedl 20317512892SChristophe Leroy static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 20417512892SChristophe Leroy 20571a5b3dbSJordan Niethe void __init poking_init(void) 20637bc3e5fSBalbir Singh { 207c28c15b6SChristopher M. Riedl int ret; 208c28c15b6SChristopher M. Riedl 209*84ecfe6fSChristophe Leroy if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX)) 210*84ecfe6fSChristophe Leroy return; 211*84ecfe6fSChristophe Leroy 212c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 213c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 214c28c15b6SChristopher M. Riedl "powerpc/text_poke_mm:online", 215c28c15b6SChristopher M. Riedl text_area_cpu_up_mm, 216c28c15b6SChristopher M. Riedl text_area_cpu_down_mm); 217c28c15b6SChristopher M. Riedl else 218c28c15b6SChristopher M. Riedl ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 219071c95c1SBenjamin Gray "powerpc/text_poke:online", 220071c95c1SBenjamin Gray text_area_cpu_up, 221071c95c1SBenjamin Gray text_area_cpu_down); 222071c95c1SBenjamin Gray 223071c95c1SBenjamin Gray /* cpuhp_setup_state returns >= 0 on success */ 224071c95c1SBenjamin Gray if (WARN_ON(ret < 0)) 225071c95c1SBenjamin Gray return; 226071c95c1SBenjamin Gray 22717512892SChristophe Leroy static_branch_enable(&poking_init_done); 22837bc3e5fSBalbir Singh } 22937bc3e5fSBalbir Singh 2308b4bb0adSChristophe Leroy static unsigned long get_patch_pfn(void *addr) 2318b4bb0adSChristophe Leroy { 2328b4bb0adSChristophe Leroy if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 2338b4bb0adSChristophe Leroy return vmalloc_to_pfn(addr); 2348b4bb0adSChristophe Leroy else 2358b4bb0adSChristophe Leroy return __pa_symbol(addr) >> PAGE_SHIFT; 2368b4bb0adSChristophe Leroy } 2378b4bb0adSChristophe Leroy 23837bc3e5fSBalbir Singh /* 23937bc3e5fSBalbir Singh * This can be called for kernel text or a module. 24037bc3e5fSBalbir Singh */ 24137bc3e5fSBalbir Singh static int map_patch_area(void *addr, unsigned long text_poke_addr) 24237bc3e5fSBalbir Singh { 2438b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 24437bc3e5fSBalbir Singh 245285672f9SChristophe Leroy return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 24637bc3e5fSBalbir Singh } 24737bc3e5fSBalbir Singh 248a3483c3dSChristophe Leroy static void unmap_patch_area(unsigned long addr) 24937bc3e5fSBalbir Singh { 25037bc3e5fSBalbir Singh pte_t *ptep; 25137bc3e5fSBalbir Singh pmd_t *pmdp; 25237bc3e5fSBalbir Singh pud_t *pudp; 2532fb47060SMike Rapoport p4d_t *p4dp; 25437bc3e5fSBalbir Singh pgd_t *pgdp; 25537bc3e5fSBalbir Singh 25637bc3e5fSBalbir Singh pgdp = pgd_offset_k(addr); 257a3483c3dSChristophe Leroy if (WARN_ON(pgd_none(*pgdp))) 258a3483c3dSChristophe Leroy return; 25937bc3e5fSBalbir Singh 2602fb47060SMike Rapoport p4dp = p4d_offset(pgdp, addr); 261a3483c3dSChristophe Leroy if (WARN_ON(p4d_none(*p4dp))) 262a3483c3dSChristophe Leroy return; 2632fb47060SMike Rapoport 2642fb47060SMike Rapoport pudp = pud_offset(p4dp, addr); 265a3483c3dSChristophe Leroy if (WARN_ON(pud_none(*pudp))) 266a3483c3dSChristophe Leroy return; 26737bc3e5fSBalbir Singh 26837bc3e5fSBalbir Singh pmdp = pmd_offset(pudp, addr); 269a3483c3dSChristophe Leroy if (WARN_ON(pmd_none(*pmdp))) 270a3483c3dSChristophe Leroy return; 27137bc3e5fSBalbir Singh 27237bc3e5fSBalbir Singh ptep = pte_offset_kernel(pmdp, addr); 273a3483c3dSChristophe Leroy if (WARN_ON(pte_none(*ptep))) 274a3483c3dSChristophe Leroy return; 27537bc3e5fSBalbir Singh 27637bc3e5fSBalbir Singh /* 27737bc3e5fSBalbir Singh * In hash, pte_clear flushes the tlb, in radix, we have to 27837bc3e5fSBalbir Singh */ 27937bc3e5fSBalbir Singh pte_clear(&init_mm, addr, ptep); 28037bc3e5fSBalbir Singh flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 28137bc3e5fSBalbir Singh } 28237bc3e5fSBalbir Singh 283c28c15b6SChristopher M. Riedl static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) 284c28c15b6SChristopher M. Riedl { 285c28c15b6SChristopher M. Riedl int err; 286c28c15b6SChristopher M. Riedl u32 *patch_addr; 287c28c15b6SChristopher M. Riedl unsigned long text_poke_addr; 288c28c15b6SChristopher M. Riedl pte_t *pte; 289c28c15b6SChristopher M. Riedl unsigned long pfn = get_patch_pfn(addr); 290c28c15b6SChristopher M. Riedl struct mm_struct *patching_mm; 291c28c15b6SChristopher M. Riedl struct mm_struct *orig_mm; 292c28c15b6SChristopher M. Riedl 2932f228ee1SBenjamin Gray patching_mm = __this_cpu_read(cpu_patching_context.mm); 2942f228ee1SBenjamin Gray pte = __this_cpu_read(cpu_patching_context.pte); 2952f228ee1SBenjamin Gray text_poke_addr = __this_cpu_read(cpu_patching_context.addr); 296c28c15b6SChristopher M. Riedl patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 297c28c15b6SChristopher M. Riedl 298c28c15b6SChristopher M. Riedl __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 299c28c15b6SChristopher M. Riedl 300c28c15b6SChristopher M. Riedl /* order PTE update before use, also serves as the hwsync */ 301c28c15b6SChristopher M. Riedl asm volatile("ptesync": : :"memory"); 302c28c15b6SChristopher M. Riedl 303c28c15b6SChristopher M. Riedl /* order context switch after arbitrary prior code */ 304c28c15b6SChristopher M. Riedl isync(); 305c28c15b6SChristopher M. Riedl 306c28c15b6SChristopher M. Riedl orig_mm = start_using_temp_mm(patching_mm); 307c28c15b6SChristopher M. Riedl 308c28c15b6SChristopher M. Riedl err = __patch_instruction(addr, instr, patch_addr); 309c28c15b6SChristopher M. Riedl 310c28c15b6SChristopher M. Riedl /* hwsync performed by __patch_instruction (sync) if successful */ 311c28c15b6SChristopher M. Riedl if (err) 312c28c15b6SChristopher M. Riedl mb(); /* sync */ 313c28c15b6SChristopher M. Riedl 314c28c15b6SChristopher M. Riedl /* context synchronisation performed by __patch_instruction (isync or exception) */ 315c28c15b6SChristopher M. Riedl stop_using_temp_mm(patching_mm, orig_mm); 316c28c15b6SChristopher M. Riedl 317c28c15b6SChristopher M. Riedl pte_clear(patching_mm, text_poke_addr, pte); 318c28c15b6SChristopher M. Riedl /* 319c28c15b6SChristopher M. Riedl * ptesync to order PTE update before TLB invalidation done 320c28c15b6SChristopher M. Riedl * by radix__local_flush_tlb_page_psize (in _tlbiel_va) 321c28c15b6SChristopher M. Riedl */ 322c28c15b6SChristopher M. Riedl local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); 323c28c15b6SChristopher M. Riedl 324c28c15b6SChristopher M. Riedl return err; 325c28c15b6SChristopher M. Riedl } 326c28c15b6SChristopher M. Riedl 3276b21af74SChristophe Leroy static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 3286b21af74SChristophe Leroy { 3296b21af74SChristophe Leroy int err; 3306b21af74SChristophe Leroy u32 *patch_addr; 3316b21af74SChristophe Leroy unsigned long text_poke_addr; 3328b4bb0adSChristophe Leroy pte_t *pte; 3338b4bb0adSChristophe Leroy unsigned long pfn = get_patch_pfn(addr); 3346b21af74SChristophe Leroy 3352f228ee1SBenjamin Gray text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; 3366b21af74SChristophe Leroy patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 3376b21af74SChristophe Leroy 3382f228ee1SBenjamin Gray pte = __this_cpu_read(cpu_patching_context.pte); 3398b4bb0adSChristophe Leroy __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 3408b4bb0adSChristophe Leroy /* See ptesync comment in radix__set_pte_at() */ 3418b4bb0adSChristophe Leroy if (radix_enabled()) 3428b4bb0adSChristophe Leroy asm volatile("ptesync": : :"memory"); 3436b21af74SChristophe Leroy 3446b21af74SChristophe Leroy err = __patch_instruction(addr, instr, patch_addr); 3456b21af74SChristophe Leroy 3468b4bb0adSChristophe Leroy pte_clear(&init_mm, text_poke_addr, pte); 3478b4bb0adSChristophe Leroy flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); 3486b21af74SChristophe Leroy 3496b21af74SChristophe Leroy return err; 3506b21af74SChristophe Leroy } 3516b21af74SChristophe Leroy 352c545b9f0SChristophe Leroy static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 35337bc3e5fSBalbir Singh { 35437bc3e5fSBalbir Singh int err; 35537bc3e5fSBalbir Singh unsigned long flags; 35637bc3e5fSBalbir Singh 35737bc3e5fSBalbir Singh /* 35837bc3e5fSBalbir Singh * During early early boot patch_instruction is called 35937bc3e5fSBalbir Singh * when text_poke_area is not ready, but we still need 36037bc3e5fSBalbir Singh * to allow patching. We just do the plain old patching 36137bc3e5fSBalbir Singh */ 362*84ecfe6fSChristophe Leroy if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) || 363*84ecfe6fSChristophe Leroy !static_branch_likely(&poking_init_done)) 3648cf4c057SChristophe Leroy return raw_patch_instruction(addr, instr); 36537bc3e5fSBalbir Singh 36637bc3e5fSBalbir Singh local_irq_save(flags); 367c28c15b6SChristopher M. Riedl if (mm_patch_enabled()) 368c28c15b6SChristopher M. Riedl err = __do_patch_instruction_mm(addr, instr); 369c28c15b6SChristopher M. Riedl else 3706b21af74SChristophe Leroy err = __do_patch_instruction(addr, instr); 37137bc3e5fSBalbir Singh local_irq_restore(flags); 37237bc3e5fSBalbir Singh 37337bc3e5fSBalbir Singh return err; 37437bc3e5fSBalbir Singh } 375b45ba4a5SChristophe Leroy 376b0337678SChristophe Leroy __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); 377b0337678SChristophe Leroy 378c545b9f0SChristophe Leroy int patch_instruction(u32 *addr, ppc_inst_t instr) 379b45ba4a5SChristophe Leroy { 380b45ba4a5SChristophe Leroy /* Make sure we aren't patching a freed init section */ 381b0337678SChristophe Leroy if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) 382b45ba4a5SChristophe Leroy return 0; 383edecd2d6SChristophe Leroy 384b45ba4a5SChristophe Leroy return do_patch_instruction(addr, instr); 385b45ba4a5SChristophe Leroy } 38637bc3e5fSBalbir Singh NOKPROBE_SYMBOL(patch_instruction); 38737bc3e5fSBalbir Singh 38869d4d6e5SChristophe Leroy int patch_branch(u32 *addr, unsigned long target, int flags) 389e7a57273SMichael Ellerman { 390c545b9f0SChristophe Leroy ppc_inst_t instr; 3917c95d889SJordan Niethe 392d5937db1SChristophe Leroy if (create_branch(&instr, addr, target, flags)) 393d5937db1SChristophe Leroy return -ERANGE; 394d5937db1SChristophe Leroy 3957c95d889SJordan Niethe return patch_instruction(addr, instr); 396e7a57273SMichael Ellerman } 397e7a57273SMichael Ellerman 39851c9c084SAnju T /* 39951c9c084SAnju T * Helper to check if a given instruction is a conditional branch 40051c9c084SAnju T * Derived from the conditional checks in analyse_instr() 40151c9c084SAnju T */ 402c545b9f0SChristophe Leroy bool is_conditional_branch(ppc_inst_t instr) 40351c9c084SAnju T { 4048094892dSJordan Niethe unsigned int opcode = ppc_inst_primary_opcode(instr); 40551c9c084SAnju T 40651c9c084SAnju T if (opcode == 16) /* bc, bca, bcl, bcla */ 40751c9c084SAnju T return true; 40851c9c084SAnju T if (opcode == 19) { 409777e26f0SJordan Niethe switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 41051c9c084SAnju T case 16: /* bclr, bclrl */ 41151c9c084SAnju T case 528: /* bcctr, bcctrl */ 41251c9c084SAnju T case 560: /* bctar, bctarl */ 41351c9c084SAnju T return true; 41451c9c084SAnju T } 41551c9c084SAnju T } 41651c9c084SAnju T return false; 41751c9c084SAnju T } 41871f6e58eSNaveen N. Rao NOKPROBE_SYMBOL(is_conditional_branch); 41951c9c084SAnju T 420c545b9f0SChristophe Leroy int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 421411781a2SMichael Ellerman unsigned long target, int flags) 422411781a2SMichael Ellerman { 423411781a2SMichael Ellerman long offset; 424411781a2SMichael Ellerman 425411781a2SMichael Ellerman offset = target; 426411781a2SMichael Ellerman if (! (flags & BRANCH_ABSOLUTE)) 427411781a2SMichael Ellerman offset = offset - (unsigned long)addr; 428411781a2SMichael Ellerman 429411781a2SMichael Ellerman /* Check we can represent the target in the instruction format */ 4304549c3eaSNaveen N. Rao if (!is_offset_in_cond_branch_range(offset)) 4317c95d889SJordan Niethe return 1; 432411781a2SMichael Ellerman 433411781a2SMichael Ellerman /* Mask out the flags and target, so they don't step on each other. */ 43494afd069SJordan Niethe *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 435411781a2SMichael Ellerman 4367c95d889SJordan Niethe return 0; 437411781a2SMichael Ellerman } 438411781a2SMichael Ellerman 439c545b9f0SChristophe Leroy int instr_is_relative_branch(ppc_inst_t instr) 440411781a2SMichael Ellerman { 441777e26f0SJordan Niethe if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 442411781a2SMichael Ellerman return 0; 443411781a2SMichael Ellerman 444411781a2SMichael Ellerman return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 445411781a2SMichael Ellerman } 446411781a2SMichael Ellerman 447c545b9f0SChristophe Leroy int instr_is_relative_link_branch(ppc_inst_t instr) 448b9eab08dSJosh Poimboeuf { 449777e26f0SJordan Niethe return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 450b9eab08dSJosh Poimboeuf } 451b9eab08dSJosh Poimboeuf 45269d4d6e5SChristophe Leroy static unsigned long branch_iform_target(const u32 *instr) 453411781a2SMichael Ellerman { 454411781a2SMichael Ellerman signed long imm; 455411781a2SMichael Ellerman 45618c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 457411781a2SMichael Ellerman 458411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 459411781a2SMichael Ellerman if (imm & 0x2000000) 460411781a2SMichael Ellerman imm -= 0x4000000; 461411781a2SMichael Ellerman 46218c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 463411781a2SMichael Ellerman imm += (unsigned long)instr; 464411781a2SMichael Ellerman 465411781a2SMichael Ellerman return (unsigned long)imm; 466411781a2SMichael Ellerman } 467411781a2SMichael Ellerman 46869d4d6e5SChristophe Leroy static unsigned long branch_bform_target(const u32 *instr) 469411781a2SMichael Ellerman { 470411781a2SMichael Ellerman signed long imm; 471411781a2SMichael Ellerman 47218c85964SChristophe Leroy imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 473411781a2SMichael Ellerman 474411781a2SMichael Ellerman /* If the top bit of the immediate value is set this is negative */ 475411781a2SMichael Ellerman if (imm & 0x8000) 476411781a2SMichael Ellerman imm -= 0x10000; 477411781a2SMichael Ellerman 47818c85964SChristophe Leroy if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 479411781a2SMichael Ellerman imm += (unsigned long)instr; 480411781a2SMichael Ellerman 481411781a2SMichael Ellerman return (unsigned long)imm; 482411781a2SMichael Ellerman } 483411781a2SMichael Ellerman 48469d4d6e5SChristophe Leroy unsigned long branch_target(const u32 *instr) 485411781a2SMichael Ellerman { 486f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(instr))) 487411781a2SMichael Ellerman return branch_iform_target(instr); 488f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(instr))) 489411781a2SMichael Ellerman return branch_bform_target(instr); 490411781a2SMichael Ellerman 491411781a2SMichael Ellerman return 0; 492411781a2SMichael Ellerman } 493411781a2SMichael Ellerman 494c545b9f0SChristophe Leroy int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 495411781a2SMichael Ellerman { 496411781a2SMichael Ellerman unsigned long target; 497411781a2SMichael Ellerman target = branch_target(src); 498411781a2SMichael Ellerman 499f8faaffaSJordan Niethe if (instr_is_branch_iform(ppc_inst_read(src))) 500f8faaffaSJordan Niethe return create_branch(instr, dest, target, 501f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 502f8faaffaSJordan Niethe else if (instr_is_branch_bform(ppc_inst_read(src))) 503f8faaffaSJordan Niethe return create_cond_branch(instr, dest, target, 504f8faaffaSJordan Niethe ppc_inst_val(ppc_inst_read(src))); 505411781a2SMichael Ellerman 5067c95d889SJordan Niethe return 1; 507411781a2SMichael Ellerman } 508