xref: /linux/arch/powerpc/lib/code-patching.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2aaddd3eaSMichael Ellerman /*
3aaddd3eaSMichael Ellerman  *  Copyright 2008 Michael Ellerman, IBM Corporation.
4aaddd3eaSMichael Ellerman  */
5aaddd3eaSMichael Ellerman 
671f6e58eSNaveen N. Rao #include <linux/kprobes.h>
7c28c15b6SChristopher M. Riedl #include <linux/mmu_context.h>
8c28c15b6SChristopher M. Riedl #include <linux/random.h>
9ae0dc736SMichael Ellerman #include <linux/vmalloc.h>
10ae0dc736SMichael Ellerman #include <linux/init.h>
1137bc3e5fSBalbir Singh #include <linux/cpuhotplug.h>
127c0f6ba6SLinus Torvalds #include <linux/uaccess.h>
13b0337678SChristophe Leroy #include <linux/jump_label.h>
14aaddd3eaSMichael Ellerman 
15c28c15b6SChristopher M. Riedl #include <asm/debug.h>
16c28c15b6SChristopher M. Riedl #include <asm/pgalloc.h>
17c28c15b6SChristopher M. Riedl #include <asm/tlb.h>
1837bc3e5fSBalbir Singh #include <asm/tlbflush.h>
1937bc3e5fSBalbir Singh #include <asm/page.h>
2037bc3e5fSBalbir Singh #include <asm/code-patching.h>
2175346251SJordan Niethe #include <asm/inst.h>
22aaddd3eaSMichael Ellerman 
__patch_mem(void * exec_addr,unsigned long val,void * patch_addr,bool is_dword)23e6b8940eSBenjamin Gray static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
24aaddd3eaSMichael Ellerman {
25e6b8940eSBenjamin Gray 	if (!IS_ENABLED(CONFIG_PPC64) || likely(!is_dword)) {
26e6b8940eSBenjamin Gray 		/* For big endian correctness: plain address would use the wrong half */
27e6b8940eSBenjamin Gray 		u32 val32 = val;
28e63ceebdSChristophe Leroy 
29e6b8940eSBenjamin Gray 		__put_kernel_nofault(patch_addr, &val32, u32, failed);
30e63ceebdSChristophe Leroy 	} else {
31e63ceebdSChristophe Leroy 		__put_kernel_nofault(patch_addr, &val, u64, failed);
32e63ceebdSChristophe Leroy 	}
3337bc3e5fSBalbir Singh 
348cf4c057SChristophe Leroy 	asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr),
358cf4c057SChristophe Leroy 							    "r" (exec_addr));
3637bc3e5fSBalbir Singh 
37b6e37968SSteven Rostedt 	return 0;
38e64ac41aSChristophe Leroy 
39e64ac41aSChristophe Leroy failed:
4074726fdaSChristophe Leroy 	mb();  /* sync */
41bbffdd2fSChristophe Leroy 	return -EPERM;
42aaddd3eaSMichael Ellerman }
43aaddd3eaSMichael Ellerman 
raw_patch_instruction(u32 * addr,ppc_inst_t instr)44c545b9f0SChristophe Leroy int raw_patch_instruction(u32 *addr, ppc_inst_t instr)
458cf4c057SChristophe Leroy {
46e6b8940eSBenjamin Gray 	if (ppc_inst_prefixed(instr))
47e6b8940eSBenjamin Gray 		return __patch_mem(addr, ppc_inst_as_ulong(instr), addr, true);
48e6b8940eSBenjamin Gray 	else
49e6b8940eSBenjamin Gray 		return __patch_mem(addr, ppc_inst_val(instr), addr, false);
508cf4c057SChristophe Leroy }
518cf4c057SChristophe Leroy 
522f228ee1SBenjamin Gray struct patch_context {
532f228ee1SBenjamin Gray 	union {
542f228ee1SBenjamin Gray 		struct vm_struct *area;
552f228ee1SBenjamin Gray 		struct mm_struct *mm;
562f228ee1SBenjamin Gray 	};
572f228ee1SBenjamin Gray 	unsigned long addr;
582f228ee1SBenjamin Gray 	pte_t *pte;
592f228ee1SBenjamin Gray };
602f228ee1SBenjamin Gray 
612f228ee1SBenjamin Gray static DEFINE_PER_CPU(struct patch_context, cpu_patching_context);
6237bc3e5fSBalbir Singh 
63591b4b26SMichael Ellerman static int map_patch_area(void *addr, unsigned long text_poke_addr);
64591b4b26SMichael Ellerman static void unmap_patch_area(unsigned long addr);
65591b4b26SMichael Ellerman 
mm_patch_enabled(void)66c28c15b6SChristopher M. Riedl static bool mm_patch_enabled(void)
67c28c15b6SChristopher M. Riedl {
68c28c15b6SChristopher M. Riedl 	return IS_ENABLED(CONFIG_SMP) && radix_enabled();
69c28c15b6SChristopher M. Riedl }
70c28c15b6SChristopher M. Riedl 
71c28c15b6SChristopher M. Riedl /*
72c28c15b6SChristopher M. Riedl  * The following applies for Radix MMU. Hash MMU has different requirements,
73c28c15b6SChristopher M. Riedl  * and so is not supported.
74c28c15b6SChristopher M. Riedl  *
75c28c15b6SChristopher M. Riedl  * Changing mm requires context synchronising instructions on both sides of
76c28c15b6SChristopher M. Riedl  * the context switch, as well as a hwsync between the last instruction for
77c28c15b6SChristopher M. Riedl  * which the address of an associated storage access was translated using
78c28c15b6SChristopher M. Riedl  * the current context.
79c28c15b6SChristopher M. Riedl  *
80c28c15b6SChristopher M. Riedl  * switch_mm_irqs_off() performs an isync after the context switch. It is
81c28c15b6SChristopher M. Riedl  * the responsibility of the caller to perform the CSI and hwsync before
82c28c15b6SChristopher M. Riedl  * starting/stopping the temp mm.
83c28c15b6SChristopher M. Riedl  */
start_using_temp_mm(struct mm_struct * temp_mm)84c28c15b6SChristopher M. Riedl static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm)
85c28c15b6SChristopher M. Riedl {
86c28c15b6SChristopher M. Riedl 	struct mm_struct *orig_mm = current->active_mm;
87c28c15b6SChristopher M. Riedl 
88c28c15b6SChristopher M. Riedl 	lockdep_assert_irqs_disabled();
89c28c15b6SChristopher M. Riedl 	switch_mm_irqs_off(orig_mm, temp_mm, current);
90c28c15b6SChristopher M. Riedl 
91c28c15b6SChristopher M. Riedl 	WARN_ON(!mm_is_thread_local(temp_mm));
92c28c15b6SChristopher M. Riedl 
93c28c15b6SChristopher M. Riedl 	suspend_breakpoints();
94c28c15b6SChristopher M. Riedl 	return orig_mm;
95c28c15b6SChristopher M. Riedl }
96c28c15b6SChristopher M. Riedl 
stop_using_temp_mm(struct mm_struct * temp_mm,struct mm_struct * orig_mm)97c28c15b6SChristopher M. Riedl static void stop_using_temp_mm(struct mm_struct *temp_mm,
98c28c15b6SChristopher M. Riedl 			       struct mm_struct *orig_mm)
99c28c15b6SChristopher M. Riedl {
100c28c15b6SChristopher M. Riedl 	lockdep_assert_irqs_disabled();
101c28c15b6SChristopher M. Riedl 	switch_mm_irqs_off(temp_mm, orig_mm, current);
102c28c15b6SChristopher M. Riedl 	restore_breakpoints();
103c28c15b6SChristopher M. Riedl }
104c28c15b6SChristopher M. Riedl 
text_area_cpu_up(unsigned int cpu)10537bc3e5fSBalbir Singh static int text_area_cpu_up(unsigned int cpu)
10637bc3e5fSBalbir Singh {
10737bc3e5fSBalbir Singh 	struct vm_struct *area;
108591b4b26SMichael Ellerman 	unsigned long addr;
109591b4b26SMichael Ellerman 	int err;
11037bc3e5fSBalbir Singh 
11137bc3e5fSBalbir Singh 	area = get_vm_area(PAGE_SIZE, VM_ALLOC);
11237bc3e5fSBalbir Singh 	if (!area) {
11337bc3e5fSBalbir Singh 		WARN_ONCE(1, "Failed to create text area for cpu %d\n",
11437bc3e5fSBalbir Singh 			cpu);
11537bc3e5fSBalbir Singh 		return -1;
11637bc3e5fSBalbir Singh 	}
117591b4b26SMichael Ellerman 
118591b4b26SMichael Ellerman 	// Map/unmap the area to ensure all page tables are pre-allocated
119591b4b26SMichael Ellerman 	addr = (unsigned long)area->addr;
120591b4b26SMichael Ellerman 	err = map_patch_area(empty_zero_page, addr);
121591b4b26SMichael Ellerman 	if (err)
122591b4b26SMichael Ellerman 		return err;
123591b4b26SMichael Ellerman 
124591b4b26SMichael Ellerman 	unmap_patch_area(addr);
125591b4b26SMichael Ellerman 
1262f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.area, area);
1272f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.addr, addr);
1282f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr));
12937bc3e5fSBalbir Singh 
13037bc3e5fSBalbir Singh 	return 0;
13137bc3e5fSBalbir Singh }
13237bc3e5fSBalbir Singh 
text_area_cpu_down(unsigned int cpu)13337bc3e5fSBalbir Singh static int text_area_cpu_down(unsigned int cpu)
13437bc3e5fSBalbir Singh {
1352f228ee1SBenjamin Gray 	free_vm_area(this_cpu_read(cpu_patching_context.area));
1362f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.area, NULL);
1372f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.addr, 0);
1382f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.pte, NULL);
13937bc3e5fSBalbir Singh 	return 0;
14037bc3e5fSBalbir Singh }
14137bc3e5fSBalbir Singh 
put_patching_mm(struct mm_struct * mm,unsigned long patching_addr)142c28c15b6SChristopher M. Riedl static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr)
143c28c15b6SChristopher M. Riedl {
144c28c15b6SChristopher M. Riedl 	struct mmu_gather tlb;
145c28c15b6SChristopher M. Riedl 
146c28c15b6SChristopher M. Riedl 	tlb_gather_mmu(&tlb, mm);
147c28c15b6SChristopher M. Riedl 	free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0);
148c28c15b6SChristopher M. Riedl 	mmput(mm);
149c28c15b6SChristopher M. Riedl }
150c28c15b6SChristopher M. Riedl 
text_area_cpu_up_mm(unsigned int cpu)151c28c15b6SChristopher M. Riedl static int text_area_cpu_up_mm(unsigned int cpu)
152c28c15b6SChristopher M. Riedl {
153c28c15b6SChristopher M. Riedl 	struct mm_struct *mm;
154c28c15b6SChristopher M. Riedl 	unsigned long addr;
155c28c15b6SChristopher M. Riedl 	pte_t *pte;
156c28c15b6SChristopher M. Riedl 	spinlock_t *ptl;
157c28c15b6SChristopher M. Riedl 
158c28c15b6SChristopher M. Riedl 	mm = mm_alloc();
159c28c15b6SChristopher M. Riedl 	if (WARN_ON(!mm))
160c28c15b6SChristopher M. Riedl 		goto fail_no_mm;
161c28c15b6SChristopher M. Riedl 
162c28c15b6SChristopher M. Riedl 	/*
163c28c15b6SChristopher M. Riedl 	 * Choose a random page-aligned address from the interval
164c28c15b6SChristopher M. Riedl 	 * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE].
165c28c15b6SChristopher M. Riedl 	 * The lower address bound is PAGE_SIZE to avoid the zero-page.
166c28c15b6SChristopher M. Riedl 	 */
167c28c15b6SChristopher M. Riedl 	addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT;
168c28c15b6SChristopher M. Riedl 
169c28c15b6SChristopher M. Riedl 	/*
170c28c15b6SChristopher M. Riedl 	 * PTE allocation uses GFP_KERNEL which means we need to
171c28c15b6SChristopher M. Riedl 	 * pre-allocate the PTE here because we cannot do the
172c28c15b6SChristopher M. Riedl 	 * allocation during patching when IRQs are disabled.
173c28c15b6SChristopher M. Riedl 	 *
174c28c15b6SChristopher M. Riedl 	 * Using get_locked_pte() to avoid open coding, the lock
175c28c15b6SChristopher M. Riedl 	 * is unnecessary.
176c28c15b6SChristopher M. Riedl 	 */
177c28c15b6SChristopher M. Riedl 	pte = get_locked_pte(mm, addr, &ptl);
178c28c15b6SChristopher M. Riedl 	if (!pte)
179c28c15b6SChristopher M. Riedl 		goto fail_no_pte;
180c28c15b6SChristopher M. Riedl 	pte_unmap_unlock(pte, ptl);
181c28c15b6SChristopher M. Riedl 
1822f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.mm, mm);
1832f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.addr, addr);
184c28c15b6SChristopher M. Riedl 
185c28c15b6SChristopher M. Riedl 	return 0;
186c28c15b6SChristopher M. Riedl 
187c28c15b6SChristopher M. Riedl fail_no_pte:
188c28c15b6SChristopher M. Riedl 	put_patching_mm(mm, addr);
189c28c15b6SChristopher M. Riedl fail_no_mm:
190c28c15b6SChristopher M. Riedl 	return -ENOMEM;
191c28c15b6SChristopher M. Riedl }
192c28c15b6SChristopher M. Riedl 
text_area_cpu_down_mm(unsigned int cpu)193c28c15b6SChristopher M. Riedl static int text_area_cpu_down_mm(unsigned int cpu)
194c28c15b6SChristopher M. Riedl {
1952f228ee1SBenjamin Gray 	put_patching_mm(this_cpu_read(cpu_patching_context.mm),
1962f228ee1SBenjamin Gray 			this_cpu_read(cpu_patching_context.addr));
197c28c15b6SChristopher M. Riedl 
1982f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.mm, NULL);
1992f228ee1SBenjamin Gray 	this_cpu_write(cpu_patching_context.addr, 0);
200c28c15b6SChristopher M. Riedl 
201c28c15b6SChristopher M. Riedl 	return 0;
202c28c15b6SChristopher M. Riedl }
203c28c15b6SChristopher M. Riedl 
20417512892SChristophe Leroy static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done);
20517512892SChristophe Leroy 
poking_init(void)20671a5b3dbSJordan Niethe void __init poking_init(void)
20737bc3e5fSBalbir Singh {
208c28c15b6SChristopher M. Riedl 	int ret;
209c28c15b6SChristopher M. Riedl 
210c28c15b6SChristopher M. Riedl 	if (mm_patch_enabled())
211c28c15b6SChristopher M. Riedl 		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
212c28c15b6SChristopher M. Riedl 					"powerpc/text_poke_mm:online",
213c28c15b6SChristopher M. Riedl 					text_area_cpu_up_mm,
214c28c15b6SChristopher M. Riedl 					text_area_cpu_down_mm);
215c28c15b6SChristopher M. Riedl 	else
216c28c15b6SChristopher M. Riedl 		ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
217071c95c1SBenjamin Gray 					"powerpc/text_poke:online",
218071c95c1SBenjamin Gray 					text_area_cpu_up,
219071c95c1SBenjamin Gray 					text_area_cpu_down);
220071c95c1SBenjamin Gray 
221071c95c1SBenjamin Gray 	/* cpuhp_setup_state returns >= 0 on success */
222071c95c1SBenjamin Gray 	if (WARN_ON(ret < 0))
223071c95c1SBenjamin Gray 		return;
224071c95c1SBenjamin Gray 
22517512892SChristophe Leroy 	static_branch_enable(&poking_init_done);
22637bc3e5fSBalbir Singh }
22737bc3e5fSBalbir Singh 
get_patch_pfn(void * addr)2288b4bb0adSChristophe Leroy static unsigned long get_patch_pfn(void *addr)
2298b4bb0adSChristophe Leroy {
2300a956d52SMike Rapoport (IBM) 	if (IS_ENABLED(CONFIG_EXECMEM) && is_vmalloc_or_module_addr(addr))
2318b4bb0adSChristophe Leroy 		return vmalloc_to_pfn(addr);
2328b4bb0adSChristophe Leroy 	else
2338b4bb0adSChristophe Leroy 		return __pa_symbol(addr) >> PAGE_SHIFT;
2348b4bb0adSChristophe Leroy }
2358b4bb0adSChristophe Leroy 
23637bc3e5fSBalbir Singh /*
23737bc3e5fSBalbir Singh  * This can be called for kernel text or a module.
23837bc3e5fSBalbir Singh  */
map_patch_area(void * addr,unsigned long text_poke_addr)23937bc3e5fSBalbir Singh static int map_patch_area(void *addr, unsigned long text_poke_addr)
24037bc3e5fSBalbir Singh {
2418b4bb0adSChristophe Leroy 	unsigned long pfn = get_patch_pfn(addr);
24237bc3e5fSBalbir Singh 
243285672f9SChristophe Leroy 	return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL);
24437bc3e5fSBalbir Singh }
24537bc3e5fSBalbir Singh 
unmap_patch_area(unsigned long addr)246a3483c3dSChristophe Leroy static void unmap_patch_area(unsigned long addr)
24737bc3e5fSBalbir Singh {
24837bc3e5fSBalbir Singh 	pte_t *ptep;
24937bc3e5fSBalbir Singh 	pmd_t *pmdp;
25037bc3e5fSBalbir Singh 	pud_t *pudp;
2512fb47060SMike Rapoport 	p4d_t *p4dp;
25237bc3e5fSBalbir Singh 	pgd_t *pgdp;
25337bc3e5fSBalbir Singh 
25437bc3e5fSBalbir Singh 	pgdp = pgd_offset_k(addr);
255a3483c3dSChristophe Leroy 	if (WARN_ON(pgd_none(*pgdp)))
256a3483c3dSChristophe Leroy 		return;
25737bc3e5fSBalbir Singh 
2582fb47060SMike Rapoport 	p4dp = p4d_offset(pgdp, addr);
259a3483c3dSChristophe Leroy 	if (WARN_ON(p4d_none(*p4dp)))
260a3483c3dSChristophe Leroy 		return;
2612fb47060SMike Rapoport 
2622fb47060SMike Rapoport 	pudp = pud_offset(p4dp, addr);
263a3483c3dSChristophe Leroy 	if (WARN_ON(pud_none(*pudp)))
264a3483c3dSChristophe Leroy 		return;
26537bc3e5fSBalbir Singh 
26637bc3e5fSBalbir Singh 	pmdp = pmd_offset(pudp, addr);
267a3483c3dSChristophe Leroy 	if (WARN_ON(pmd_none(*pmdp)))
268a3483c3dSChristophe Leroy 		return;
26937bc3e5fSBalbir Singh 
27037bc3e5fSBalbir Singh 	ptep = pte_offset_kernel(pmdp, addr);
271a3483c3dSChristophe Leroy 	if (WARN_ON(pte_none(*ptep)))
272a3483c3dSChristophe Leroy 		return;
27337bc3e5fSBalbir Singh 
27437bc3e5fSBalbir Singh 	/*
27537bc3e5fSBalbir Singh 	 * In hash, pte_clear flushes the tlb, in radix, we have to
27637bc3e5fSBalbir Singh 	 */
27737bc3e5fSBalbir Singh 	pte_clear(&init_mm, addr, ptep);
27837bc3e5fSBalbir Singh 	flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
27937bc3e5fSBalbir Singh }
28037bc3e5fSBalbir Singh 
__do_patch_mem_mm(void * addr,unsigned long val,bool is_dword)281e6b8940eSBenjamin Gray static int __do_patch_mem_mm(void *addr, unsigned long val, bool is_dword)
282c28c15b6SChristopher M. Riedl {
283c28c15b6SChristopher M. Riedl 	int err;
284c28c15b6SChristopher M. Riedl 	u32 *patch_addr;
285c28c15b6SChristopher M. Riedl 	unsigned long text_poke_addr;
286c28c15b6SChristopher M. Riedl 	pte_t *pte;
287c28c15b6SChristopher M. Riedl 	unsigned long pfn = get_patch_pfn(addr);
288c28c15b6SChristopher M. Riedl 	struct mm_struct *patching_mm;
289c28c15b6SChristopher M. Riedl 	struct mm_struct *orig_mm;
290980411a4SMichael Ellerman 	spinlock_t *ptl;
291c28c15b6SChristopher M. Riedl 
2922f228ee1SBenjamin Gray 	patching_mm = __this_cpu_read(cpu_patching_context.mm);
2932f228ee1SBenjamin Gray 	text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
294c28c15b6SChristopher M. Riedl 	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
295c28c15b6SChristopher M. Riedl 
296980411a4SMichael Ellerman 	pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
297980411a4SMichael Ellerman 	if (!pte)
298980411a4SMichael Ellerman 		return -ENOMEM;
299980411a4SMichael Ellerman 
300c28c15b6SChristopher M. Riedl 	__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
301c28c15b6SChristopher M. Riedl 
302c28c15b6SChristopher M. Riedl 	/* order PTE update before use, also serves as the hwsync */
303c28c15b6SChristopher M. Riedl 	asm volatile("ptesync": : :"memory");
304c28c15b6SChristopher M. Riedl 
305c28c15b6SChristopher M. Riedl 	/* order context switch after arbitrary prior code */
306c28c15b6SChristopher M. Riedl 	isync();
307c28c15b6SChristopher M. Riedl 
308c28c15b6SChristopher M. Riedl 	orig_mm = start_using_temp_mm(patching_mm);
309c28c15b6SChristopher M. Riedl 
310e6b8940eSBenjamin Gray 	err = __patch_mem(addr, val, patch_addr, is_dword);
311c28c15b6SChristopher M. Riedl 
312c28c15b6SChristopher M. Riedl 	/* context synchronisation performed by __patch_instruction (isync or exception) */
313c28c15b6SChristopher M. Riedl 	stop_using_temp_mm(patching_mm, orig_mm);
314c28c15b6SChristopher M. Riedl 
315c28c15b6SChristopher M. Riedl 	pte_clear(patching_mm, text_poke_addr, pte);
316c28c15b6SChristopher M. Riedl 	/*
317c28c15b6SChristopher M. Riedl 	 * ptesync to order PTE update before TLB invalidation done
318c28c15b6SChristopher M. Riedl 	 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
319c28c15b6SChristopher M. Riedl 	 */
320c28c15b6SChristopher M. Riedl 	local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
321c28c15b6SChristopher M. Riedl 
322980411a4SMichael Ellerman 	pte_unmap_unlock(pte, ptl);
323980411a4SMichael Ellerman 
324c28c15b6SChristopher M. Riedl 	return err;
325c28c15b6SChristopher M. Riedl }
326c28c15b6SChristopher M. Riedl 
__do_patch_mem(void * addr,unsigned long val,bool is_dword)327e6b8940eSBenjamin Gray static int __do_patch_mem(void *addr, unsigned long val, bool is_dword)
3286b21af74SChristophe Leroy {
3296b21af74SChristophe Leroy 	int err;
3306b21af74SChristophe Leroy 	u32 *patch_addr;
3316b21af74SChristophe Leroy 	unsigned long text_poke_addr;
3328b4bb0adSChristophe Leroy 	pte_t *pte;
3338b4bb0adSChristophe Leroy 	unsigned long pfn = get_patch_pfn(addr);
3346b21af74SChristophe Leroy 
3352f228ee1SBenjamin Gray 	text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
3366b21af74SChristophe Leroy 	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
3376b21af74SChristophe Leroy 
3382f228ee1SBenjamin Gray 	pte = __this_cpu_read(cpu_patching_context.pte);
3398b4bb0adSChristophe Leroy 	__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
3408b4bb0adSChristophe Leroy 	/* See ptesync comment in radix__set_pte_at() */
3418b4bb0adSChristophe Leroy 	if (radix_enabled())
3428b4bb0adSChristophe Leroy 		asm volatile("ptesync": : :"memory");
3436b21af74SChristophe Leroy 
344e6b8940eSBenjamin Gray 	err = __patch_mem(addr, val, patch_addr, is_dword);
3456b21af74SChristophe Leroy 
3468b4bb0adSChristophe Leroy 	pte_clear(&init_mm, text_poke_addr, pte);
3478b4bb0adSChristophe Leroy 	flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
3486b21af74SChristophe Leroy 
3496b21af74SChristophe Leroy 	return err;
3506b21af74SChristophe Leroy }
3516b21af74SChristophe Leroy 
patch_mem(void * addr,unsigned long val,bool is_dword)352e6b8940eSBenjamin Gray static int patch_mem(void *addr, unsigned long val, bool is_dword)
35337bc3e5fSBalbir Singh {
35437bc3e5fSBalbir Singh 	int err;
35537bc3e5fSBalbir Singh 	unsigned long flags;
35637bc3e5fSBalbir Singh 
35737bc3e5fSBalbir Singh 	/*
35837bc3e5fSBalbir Singh 	 * During early early boot patch_instruction is called
35937bc3e5fSBalbir Singh 	 * when text_poke_area is not ready, but we still need
36037bc3e5fSBalbir Singh 	 * to allow patching. We just do the plain old patching
36137bc3e5fSBalbir Singh 	 */
36284ecfe6fSChristophe Leroy 	if (!IS_ENABLED(CONFIG_STRICT_KERNEL_RWX) ||
36384ecfe6fSChristophe Leroy 	    !static_branch_likely(&poking_init_done))
364e6b8940eSBenjamin Gray 		return __patch_mem(addr, val, addr, is_dword);
36537bc3e5fSBalbir Singh 
36637bc3e5fSBalbir Singh 	local_irq_save(flags);
367c28c15b6SChristopher M. Riedl 	if (mm_patch_enabled())
368e6b8940eSBenjamin Gray 		err = __do_patch_mem_mm(addr, val, is_dword);
369c28c15b6SChristopher M. Riedl 	else
370e6b8940eSBenjamin Gray 		err = __do_patch_mem(addr, val, is_dword);
37137bc3e5fSBalbir Singh 	local_irq_restore(flags);
37237bc3e5fSBalbir Singh 
37337bc3e5fSBalbir Singh 	return err;
37437bc3e5fSBalbir Singh }
375e6b8940eSBenjamin Gray 
376e6b8940eSBenjamin Gray #ifdef CONFIG_PPC64
377e6b8940eSBenjamin Gray 
patch_instruction(u32 * addr,ppc_inst_t instr)378e6b8940eSBenjamin Gray int patch_instruction(u32 *addr, ppc_inst_t instr)
379e6b8940eSBenjamin Gray {
380e6b8940eSBenjamin Gray 	if (ppc_inst_prefixed(instr))
381e6b8940eSBenjamin Gray 		return patch_mem(addr, ppc_inst_as_ulong(instr), true);
382e6b8940eSBenjamin Gray 	else
383e6b8940eSBenjamin Gray 		return patch_mem(addr, ppc_inst_val(instr), false);
384e6b8940eSBenjamin Gray }
38537bc3e5fSBalbir Singh NOKPROBE_SYMBOL(patch_instruction);
38637bc3e5fSBalbir Singh 
patch_uint(void * addr,unsigned int val)387e6b8940eSBenjamin Gray int patch_uint(void *addr, unsigned int val)
388e6b8940eSBenjamin Gray {
389*dbf828aaSBenjamin Gray 	if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned int)))
390*dbf828aaSBenjamin Gray 		return -EINVAL;
391*dbf828aaSBenjamin Gray 
392e6b8940eSBenjamin Gray 	return patch_mem(addr, val, false);
393e6b8940eSBenjamin Gray }
394e6b8940eSBenjamin Gray NOKPROBE_SYMBOL(patch_uint);
395e6b8940eSBenjamin Gray 
patch_ulong(void * addr,unsigned long val)396e6b8940eSBenjamin Gray int patch_ulong(void *addr, unsigned long val)
397e6b8940eSBenjamin Gray {
398*dbf828aaSBenjamin Gray 	if (!IS_ALIGNED((unsigned long)addr, sizeof(unsigned long)))
399*dbf828aaSBenjamin Gray 		return -EINVAL;
400*dbf828aaSBenjamin Gray 
401e6b8940eSBenjamin Gray 	return patch_mem(addr, val, true);
402e6b8940eSBenjamin Gray }
403e6b8940eSBenjamin Gray NOKPROBE_SYMBOL(patch_ulong);
404e6b8940eSBenjamin Gray 
405e6b8940eSBenjamin Gray #else
406e6b8940eSBenjamin Gray 
patch_instruction(u32 * addr,ppc_inst_t instr)407e6b8940eSBenjamin Gray int patch_instruction(u32 *addr, ppc_inst_t instr)
408e6b8940eSBenjamin Gray {
409e6b8940eSBenjamin Gray 	return patch_mem(addr, ppc_inst_val(instr), false);
410e6b8940eSBenjamin Gray }
NOKPROBE_SYMBOL(patch_instruction)411e6b8940eSBenjamin Gray NOKPROBE_SYMBOL(patch_instruction)
412e6b8940eSBenjamin Gray 
413e6b8940eSBenjamin Gray #endif
414e6b8940eSBenjamin Gray 
415c3710ee7SBenjamin Gray static int patch_memset64(u64 *addr, u64 val, size_t count)
416c3710ee7SBenjamin Gray {
417c3710ee7SBenjamin Gray 	for (u64 *end = addr + count; addr < end; addr++)
418c3710ee7SBenjamin Gray 		__put_kernel_nofault(addr, &val, u64, failed);
419c3710ee7SBenjamin Gray 
420c3710ee7SBenjamin Gray 	return 0;
421c3710ee7SBenjamin Gray 
422c3710ee7SBenjamin Gray failed:
423c3710ee7SBenjamin Gray 	return -EPERM;
424c3710ee7SBenjamin Gray }
425c3710ee7SBenjamin Gray 
patch_memset32(u32 * addr,u32 val,size_t count)426c3710ee7SBenjamin Gray static int patch_memset32(u32 *addr, u32 val, size_t count)
427c3710ee7SBenjamin Gray {
428c3710ee7SBenjamin Gray 	for (u32 *end = addr + count; addr < end; addr++)
429c3710ee7SBenjamin Gray 		__put_kernel_nofault(addr, &val, u32, failed);
430c3710ee7SBenjamin Gray 
431c3710ee7SBenjamin Gray 	return 0;
432c3710ee7SBenjamin Gray 
433c3710ee7SBenjamin Gray failed:
434c3710ee7SBenjamin Gray 	return -EPERM;
435c3710ee7SBenjamin Gray }
436c3710ee7SBenjamin Gray 
__patch_instructions(u32 * patch_addr,u32 * code,size_t len,bool repeat_instr)437465cabc9SHari Bathini static int __patch_instructions(u32 *patch_addr, u32 *code, size_t len, bool repeat_instr)
438465cabc9SHari Bathini {
439465cabc9SHari Bathini 	unsigned long start = (unsigned long)patch_addr;
440c3710ee7SBenjamin Gray 	int err;
441465cabc9SHari Bathini 
442465cabc9SHari Bathini 	/* Repeat instruction */
443465cabc9SHari Bathini 	if (repeat_instr) {
444465cabc9SHari Bathini 		ppc_inst_t instr = ppc_inst_read(code);
445465cabc9SHari Bathini 
446465cabc9SHari Bathini 		if (ppc_inst_prefixed(instr)) {
447465cabc9SHari Bathini 			u64 val = ppc_inst_as_ulong(instr);
448465cabc9SHari Bathini 
449c3710ee7SBenjamin Gray 			err = patch_memset64((u64 *)patch_addr, val, len / 8);
450465cabc9SHari Bathini 		} else {
451465cabc9SHari Bathini 			u32 val = ppc_inst_val(instr);
452465cabc9SHari Bathini 
453c3710ee7SBenjamin Gray 			err = patch_memset32(patch_addr, val, len / 4);
454465cabc9SHari Bathini 		}
455465cabc9SHari Bathini 	} else {
456c3710ee7SBenjamin Gray 		err = copy_to_kernel_nofault(patch_addr, code, len);
457465cabc9SHari Bathini 	}
458465cabc9SHari Bathini 
459465cabc9SHari Bathini 	smp_wmb();	/* smp write barrier */
460465cabc9SHari Bathini 	flush_icache_range(start, start + len);
461c3710ee7SBenjamin Gray 	return err;
462465cabc9SHari Bathini }
463465cabc9SHari Bathini 
464465cabc9SHari Bathini /*
465465cabc9SHari Bathini  * A page is mapped and instructions that fit the page are patched.
466465cabc9SHari Bathini  * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
467465cabc9SHari Bathini  */
__do_patch_instructions_mm(u32 * addr,u32 * code,size_t len,bool repeat_instr)468465cabc9SHari Bathini static int __do_patch_instructions_mm(u32 *addr, u32 *code, size_t len, bool repeat_instr)
469465cabc9SHari Bathini {
470465cabc9SHari Bathini 	struct mm_struct *patching_mm, *orig_mm;
471465cabc9SHari Bathini 	unsigned long pfn = get_patch_pfn(addr);
472465cabc9SHari Bathini 	unsigned long text_poke_addr;
473465cabc9SHari Bathini 	spinlock_t *ptl;
474465cabc9SHari Bathini 	u32 *patch_addr;
475465cabc9SHari Bathini 	pte_t *pte;
476465cabc9SHari Bathini 	int err;
477465cabc9SHari Bathini 
478465cabc9SHari Bathini 	patching_mm = __this_cpu_read(cpu_patching_context.mm);
479465cabc9SHari Bathini 	text_poke_addr = __this_cpu_read(cpu_patching_context.addr);
480465cabc9SHari Bathini 	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
481465cabc9SHari Bathini 
482465cabc9SHari Bathini 	pte = get_locked_pte(patching_mm, text_poke_addr, &ptl);
483465cabc9SHari Bathini 	if (!pte)
484465cabc9SHari Bathini 		return -ENOMEM;
485465cabc9SHari Bathini 
486465cabc9SHari Bathini 	__set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
487465cabc9SHari Bathini 
488465cabc9SHari Bathini 	/* order PTE update before use, also serves as the hwsync */
489465cabc9SHari Bathini 	asm volatile("ptesync" ::: "memory");
490465cabc9SHari Bathini 
491465cabc9SHari Bathini 	/* order context switch after arbitrary prior code */
492465cabc9SHari Bathini 	isync();
493465cabc9SHari Bathini 
494465cabc9SHari Bathini 	orig_mm = start_using_temp_mm(patching_mm);
495465cabc9SHari Bathini 
496465cabc9SHari Bathini 	err = __patch_instructions(patch_addr, code, len, repeat_instr);
497465cabc9SHari Bathini 
498465cabc9SHari Bathini 	/* context synchronisation performed by __patch_instructions */
499465cabc9SHari Bathini 	stop_using_temp_mm(patching_mm, orig_mm);
500465cabc9SHari Bathini 
501465cabc9SHari Bathini 	pte_clear(patching_mm, text_poke_addr, pte);
502465cabc9SHari Bathini 	/*
503465cabc9SHari Bathini 	 * ptesync to order PTE update before TLB invalidation done
504465cabc9SHari Bathini 	 * by radix__local_flush_tlb_page_psize (in _tlbiel_va)
505465cabc9SHari Bathini 	 */
506465cabc9SHari Bathini 	local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize);
507465cabc9SHari Bathini 
508465cabc9SHari Bathini 	pte_unmap_unlock(pte, ptl);
509465cabc9SHari Bathini 
510465cabc9SHari Bathini 	return err;
511465cabc9SHari Bathini }
512465cabc9SHari Bathini 
513465cabc9SHari Bathini /*
514465cabc9SHari Bathini  * A page is mapped and instructions that fit the page are patched.
515465cabc9SHari Bathini  * Assumes 'len' to be (PAGE_SIZE - offset_in_page(addr)) or below.
516465cabc9SHari Bathini  */
__do_patch_instructions(u32 * addr,u32 * code,size_t len,bool repeat_instr)517465cabc9SHari Bathini static int __do_patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
518465cabc9SHari Bathini {
519465cabc9SHari Bathini 	unsigned long pfn = get_patch_pfn(addr);
520465cabc9SHari Bathini 	unsigned long text_poke_addr;
521465cabc9SHari Bathini 	u32 *patch_addr;
522465cabc9SHari Bathini 	pte_t *pte;
523465cabc9SHari Bathini 	int err;
524465cabc9SHari Bathini 
525465cabc9SHari Bathini 	text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK;
526465cabc9SHari Bathini 	patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr));
527465cabc9SHari Bathini 
528465cabc9SHari Bathini 	pte = __this_cpu_read(cpu_patching_context.pte);
529465cabc9SHari Bathini 	__set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0);
530465cabc9SHari Bathini 	/* See ptesync comment in radix__set_pte_at() */
531465cabc9SHari Bathini 	if (radix_enabled())
532465cabc9SHari Bathini 		asm volatile("ptesync" ::: "memory");
533465cabc9SHari Bathini 
534465cabc9SHari Bathini 	err = __patch_instructions(patch_addr, code, len, repeat_instr);
535465cabc9SHari Bathini 
536465cabc9SHari Bathini 	pte_clear(&init_mm, text_poke_addr, pte);
537465cabc9SHari Bathini 	flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE);
538465cabc9SHari Bathini 
539465cabc9SHari Bathini 	return err;
540465cabc9SHari Bathini }
541465cabc9SHari Bathini 
542465cabc9SHari Bathini /*
543465cabc9SHari Bathini  * Patch 'addr' with 'len' bytes of instructions from 'code'.
544465cabc9SHari Bathini  *
545465cabc9SHari Bathini  * If repeat_instr is true, the same instruction is filled for
546465cabc9SHari Bathini  * 'len' bytes.
547465cabc9SHari Bathini  */
patch_instructions(u32 * addr,u32 * code,size_t len,bool repeat_instr)548465cabc9SHari Bathini int patch_instructions(u32 *addr, u32 *code, size_t len, bool repeat_instr)
549465cabc9SHari Bathini {
550465cabc9SHari Bathini 	while (len > 0) {
551465cabc9SHari Bathini 		unsigned long flags;
552465cabc9SHari Bathini 		size_t plen;
553465cabc9SHari Bathini 		int err;
554465cabc9SHari Bathini 
555465cabc9SHari Bathini 		plen = min_t(size_t, PAGE_SIZE - offset_in_page(addr), len);
556465cabc9SHari Bathini 
557465cabc9SHari Bathini 		local_irq_save(flags);
558465cabc9SHari Bathini 		if (mm_patch_enabled())
559465cabc9SHari Bathini 			err = __do_patch_instructions_mm(addr, code, plen, repeat_instr);
560465cabc9SHari Bathini 		else
561465cabc9SHari Bathini 			err = __do_patch_instructions(addr, code, plen, repeat_instr);
562465cabc9SHari Bathini 		local_irq_restore(flags);
563465cabc9SHari Bathini 		if (err)
564465cabc9SHari Bathini 			return err;
565465cabc9SHari Bathini 
566465cabc9SHari Bathini 		len -= plen;
567465cabc9SHari Bathini 		addr = (u32 *)((unsigned long)addr + plen);
568465cabc9SHari Bathini 		if (!repeat_instr)
569465cabc9SHari Bathini 			code = (u32 *)((unsigned long)code + plen);
570465cabc9SHari Bathini 	}
571465cabc9SHari Bathini 
572465cabc9SHari Bathini 	return 0;
573465cabc9SHari Bathini }
574465cabc9SHari Bathini NOKPROBE_SYMBOL(patch_instructions);
575465cabc9SHari Bathini 
patch_branch(u32 * addr,unsigned long target,int flags)57669d4d6e5SChristophe Leroy int patch_branch(u32 *addr, unsigned long target, int flags)
577e7a57273SMichael Ellerman {
578c545b9f0SChristophe Leroy 	ppc_inst_t instr;
5797c95d889SJordan Niethe 
580d5937db1SChristophe Leroy 	if (create_branch(&instr, addr, target, flags))
581d5937db1SChristophe Leroy 		return -ERANGE;
582d5937db1SChristophe Leroy 
5837c95d889SJordan Niethe 	return patch_instruction(addr, instr);
584e7a57273SMichael Ellerman }
585e7a57273SMichael Ellerman 
58651c9c084SAnju T /*
58751c9c084SAnju T  * Helper to check if a given instruction is a conditional branch
58851c9c084SAnju T  * Derived from the conditional checks in analyse_instr()
58951c9c084SAnju T  */
is_conditional_branch(ppc_inst_t instr)590c545b9f0SChristophe Leroy bool is_conditional_branch(ppc_inst_t instr)
59151c9c084SAnju T {
5928094892dSJordan Niethe 	unsigned int opcode = ppc_inst_primary_opcode(instr);
59351c9c084SAnju T 
59451c9c084SAnju T 	if (opcode == 16)       /* bc, bca, bcl, bcla */
59551c9c084SAnju T 		return true;
59651c9c084SAnju T 	if (opcode == 19) {
597777e26f0SJordan Niethe 		switch ((ppc_inst_val(instr) >> 1) & 0x3ff) {
59851c9c084SAnju T 		case 16:        /* bclr, bclrl */
59951c9c084SAnju T 		case 528:       /* bcctr, bcctrl */
60051c9c084SAnju T 		case 560:       /* bctar, bctarl */
60151c9c084SAnju T 			return true;
60251c9c084SAnju T 		}
60351c9c084SAnju T 	}
60451c9c084SAnju T 	return false;
60551c9c084SAnju T }
60671f6e58eSNaveen N. Rao NOKPROBE_SYMBOL(is_conditional_branch);
60751c9c084SAnju T 
create_cond_branch(ppc_inst_t * instr,const u32 * addr,unsigned long target,int flags)608c545b9f0SChristophe Leroy int create_cond_branch(ppc_inst_t *instr, const u32 *addr,
609411781a2SMichael Ellerman 		       unsigned long target, int flags)
610411781a2SMichael Ellerman {
611411781a2SMichael Ellerman 	long offset;
612411781a2SMichael Ellerman 
613411781a2SMichael Ellerman 	offset = target;
614411781a2SMichael Ellerman 	if (! (flags & BRANCH_ABSOLUTE))
615411781a2SMichael Ellerman 		offset = offset - (unsigned long)addr;
616411781a2SMichael Ellerman 
617411781a2SMichael Ellerman 	/* Check we can represent the target in the instruction format */
6184549c3eaSNaveen N. Rao 	if (!is_offset_in_cond_branch_range(offset))
6197c95d889SJordan Niethe 		return 1;
620411781a2SMichael Ellerman 
621411781a2SMichael Ellerman 	/* Mask out the flags and target, so they don't step on each other. */
62294afd069SJordan Niethe 	*instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC));
623411781a2SMichael Ellerman 
6247c95d889SJordan Niethe 	return 0;
625411781a2SMichael Ellerman }
626411781a2SMichael Ellerman 
instr_is_relative_branch(ppc_inst_t instr)627c545b9f0SChristophe Leroy int instr_is_relative_branch(ppc_inst_t instr)
628411781a2SMichael Ellerman {
629777e26f0SJordan Niethe 	if (ppc_inst_val(instr) & BRANCH_ABSOLUTE)
630411781a2SMichael Ellerman 		return 0;
631411781a2SMichael Ellerman 
632411781a2SMichael Ellerman 	return instr_is_branch_iform(instr) || instr_is_branch_bform(instr);
633411781a2SMichael Ellerman }
634411781a2SMichael Ellerman 
instr_is_relative_link_branch(ppc_inst_t instr)635c545b9f0SChristophe Leroy int instr_is_relative_link_branch(ppc_inst_t instr)
636b9eab08dSJosh Poimboeuf {
637777e26f0SJordan Niethe 	return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK);
638b9eab08dSJosh Poimboeuf }
639b9eab08dSJosh Poimboeuf 
branch_iform_target(const u32 * instr)64069d4d6e5SChristophe Leroy static unsigned long branch_iform_target(const u32 *instr)
641411781a2SMichael Ellerman {
642411781a2SMichael Ellerman 	signed long imm;
643411781a2SMichael Ellerman 
64418c85964SChristophe Leroy 	imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC;
645411781a2SMichael Ellerman 
646411781a2SMichael Ellerman 	/* If the top bit of the immediate value is set this is negative */
647411781a2SMichael Ellerman 	if (imm & 0x2000000)
648411781a2SMichael Ellerman 		imm -= 0x4000000;
649411781a2SMichael Ellerman 
65018c85964SChristophe Leroy 	if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
651411781a2SMichael Ellerman 		imm += (unsigned long)instr;
652411781a2SMichael Ellerman 
653411781a2SMichael Ellerman 	return (unsigned long)imm;
654411781a2SMichael Ellerman }
655411781a2SMichael Ellerman 
branch_bform_target(const u32 * instr)65669d4d6e5SChristophe Leroy static unsigned long branch_bform_target(const u32 *instr)
657411781a2SMichael Ellerman {
658411781a2SMichael Ellerman 	signed long imm;
659411781a2SMichael Ellerman 
66018c85964SChristophe Leroy 	imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC;
661411781a2SMichael Ellerman 
662411781a2SMichael Ellerman 	/* If the top bit of the immediate value is set this is negative */
663411781a2SMichael Ellerman 	if (imm & 0x8000)
664411781a2SMichael Ellerman 		imm -= 0x10000;
665411781a2SMichael Ellerman 
66618c85964SChristophe Leroy 	if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0)
667411781a2SMichael Ellerman 		imm += (unsigned long)instr;
668411781a2SMichael Ellerman 
669411781a2SMichael Ellerman 	return (unsigned long)imm;
670411781a2SMichael Ellerman }
671411781a2SMichael Ellerman 
branch_target(const u32 * instr)67269d4d6e5SChristophe Leroy unsigned long branch_target(const u32 *instr)
673411781a2SMichael Ellerman {
674f8faaffaSJordan Niethe 	if (instr_is_branch_iform(ppc_inst_read(instr)))
675411781a2SMichael Ellerman 		return branch_iform_target(instr);
676f8faaffaSJordan Niethe 	else if (instr_is_branch_bform(ppc_inst_read(instr)))
677411781a2SMichael Ellerman 		return branch_bform_target(instr);
678411781a2SMichael Ellerman 
679411781a2SMichael Ellerman 	return 0;
680411781a2SMichael Ellerman }
681411781a2SMichael Ellerman 
translate_branch(ppc_inst_t * instr,const u32 * dest,const u32 * src)682c545b9f0SChristophe Leroy int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src)
683411781a2SMichael Ellerman {
684411781a2SMichael Ellerman 	unsigned long target;
685411781a2SMichael Ellerman 	target = branch_target(src);
686411781a2SMichael Ellerman 
687f8faaffaSJordan Niethe 	if (instr_is_branch_iform(ppc_inst_read(src)))
688f8faaffaSJordan Niethe 		return create_branch(instr, dest, target,
689f8faaffaSJordan Niethe 				     ppc_inst_val(ppc_inst_read(src)));
690f8faaffaSJordan Niethe 	else if (instr_is_branch_bform(ppc_inst_read(src)))
691f8faaffaSJordan Niethe 		return create_cond_branch(instr, dest, target,
692f8faaffaSJordan Niethe 					  ppc_inst_val(ppc_inst_read(src)));
693411781a2SMichael Ellerman 
6947c95d889SJordan Niethe 	return 1;
695411781a2SMichael Ellerman }
696