xref: /linux/arch/arm64/include/asm/assembler.h (revision da1d9caf95def6f0320819cf941c9fd1069ba9e1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4  *
5  * Copyright (C) 1996-2000 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASSEMBLY__
9 #error "Only include this from assembly code"
10 #endif
11 
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
14 
15 #include <asm-generic/export.h>
16 
17 #include <asm/alternative.h>
18 #include <asm/asm-bug.h>
19 #include <asm/asm-extable.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cputype.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/page.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptrace.h>
27 #include <asm/thread_info.h>
28 
29 	/*
30 	 * Provide a wxN alias for each wN register so what we can paste a xN
31 	 * reference after a 'w' to obtain the 32-bit version.
32 	 */
33 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
34 	wx\n	.req	w\n
35 	.endr
36 
37 	.macro save_and_disable_daif, flags
38 	mrs	\flags, daif
39 	msr	daifset, #0xf
40 	.endm
41 
42 	.macro disable_daif
43 	msr	daifset, #0xf
44 	.endm
45 
46 	.macro enable_daif
47 	msr	daifclr, #0xf
48 	.endm
49 
50 	.macro	restore_daif, flags:req
51 	msr	daif, \flags
52 	.endm
53 
54 	/* IRQ/FIQ are the lowest priority flags, unconditionally unmask the rest. */
55 	.macro enable_da
56 	msr	daifclr, #(8 | 4)
57 	.endm
58 
59 /*
60  * Save/restore interrupts.
61  */
62 	.macro	save_and_disable_irq, flags
63 	mrs	\flags, daif
64 	msr	daifset, #3
65 	.endm
66 
67 	.macro	restore_irq, flags
68 	msr	daif, \flags
69 	.endm
70 
71 	.macro	enable_dbg
72 	msr	daifclr, #8
73 	.endm
74 
75 	.macro	disable_step_tsk, flgs, tmp
76 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
77 	mrs	\tmp, mdscr_el1
78 	bic	\tmp, \tmp, #DBG_MDSCR_SS
79 	msr	mdscr_el1, \tmp
80 	isb	// Synchronise with enable_dbg
81 9990:
82 	.endm
83 
84 	/* call with daif masked */
85 	.macro	enable_step_tsk, flgs, tmp
86 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
87 	mrs	\tmp, mdscr_el1
88 	orr	\tmp, \tmp, #DBG_MDSCR_SS
89 	msr	mdscr_el1, \tmp
90 9990:
91 	.endm
92 
93 /*
94  * RAS Error Synchronization barrier
95  */
96 	.macro  esb
97 #ifdef CONFIG_ARM64_RAS_EXTN
98 	hint    #16
99 #else
100 	nop
101 #endif
102 	.endm
103 
104 /*
105  * Value prediction barrier
106  */
107 	.macro	csdb
108 	hint	#20
109 	.endm
110 
111 /*
112  * Clear Branch History instruction
113  */
114 	.macro clearbhb
115 	hint	#22
116 	.endm
117 
118 /*
119  * Speculation barrier
120  */
121 	.macro	sb
122 alternative_if_not ARM64_HAS_SB
123 	dsb	nsh
124 	isb
125 alternative_else
126 	SB_BARRIER_INSN
127 	nop
128 alternative_endif
129 	.endm
130 
131 /*
132  * NOP sequence
133  */
134 	.macro	nops, num
135 	.rept	\num
136 	nop
137 	.endr
138 	.endm
139 
140 /*
141  * Register aliases.
142  */
143 lr	.req	x30		// link register
144 
145 /*
146  * Vector entry
147  */
148 	 .macro	ventry	label
149 	.align	7
150 	b	\label
151 	.endm
152 
153 /*
154  * Select code when configured for BE.
155  */
156 #ifdef CONFIG_CPU_BIG_ENDIAN
157 #define CPU_BE(code...) code
158 #else
159 #define CPU_BE(code...)
160 #endif
161 
162 /*
163  * Select code when configured for LE.
164  */
165 #ifdef CONFIG_CPU_BIG_ENDIAN
166 #define CPU_LE(code...)
167 #else
168 #define CPU_LE(code...) code
169 #endif
170 
171 /*
172  * Define a macro that constructs a 64-bit value by concatenating two
173  * 32-bit registers. Note that on big endian systems the order of the
174  * registers is swapped.
175  */
176 #ifndef CONFIG_CPU_BIG_ENDIAN
177 	.macro	regs_to_64, rd, lbits, hbits
178 #else
179 	.macro	regs_to_64, rd, hbits, lbits
180 #endif
181 	orr	\rd, \lbits, \hbits, lsl #32
182 	.endm
183 
184 /*
185  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
186  * <symbol> is within the range +/- 4 GB of the PC.
187  */
188 	/*
189 	 * @dst: destination register (64 bit wide)
190 	 * @sym: name of the symbol
191 	 */
192 	.macro	adr_l, dst, sym
193 	adrp	\dst, \sym
194 	add	\dst, \dst, :lo12:\sym
195 	.endm
196 
197 	/*
198 	 * @dst: destination register (32 or 64 bit wide)
199 	 * @sym: name of the symbol
200 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
201 	 *       32-bit wide register, in which case it cannot be used to hold
202 	 *       the address
203 	 */
204 	.macro	ldr_l, dst, sym, tmp=
205 	.ifb	\tmp
206 	adrp	\dst, \sym
207 	ldr	\dst, [\dst, :lo12:\sym]
208 	.else
209 	adrp	\tmp, \sym
210 	ldr	\dst, [\tmp, :lo12:\sym]
211 	.endif
212 	.endm
213 
214 	/*
215 	 * @src: source register (32 or 64 bit wide)
216 	 * @sym: name of the symbol
217 	 * @tmp: mandatory 64-bit scratch register to calculate the address
218 	 *       while <src> needs to be preserved.
219 	 */
220 	.macro	str_l, src, sym, tmp
221 	adrp	\tmp, \sym
222 	str	\src, [\tmp, :lo12:\sym]
223 	.endm
224 
225 	/*
226 	 * @dst: destination register
227 	 */
228 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
229 	.macro	get_this_cpu_offset, dst
230 	mrs	\dst, tpidr_el2
231 	.endm
232 #else
233 	.macro	get_this_cpu_offset, dst
234 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
235 	mrs	\dst, tpidr_el1
236 alternative_else
237 	mrs	\dst, tpidr_el2
238 alternative_endif
239 	.endm
240 
241 	.macro	set_this_cpu_offset, src
242 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
243 	msr	tpidr_el1, \src
244 alternative_else
245 	msr	tpidr_el2, \src
246 alternative_endif
247 	.endm
248 #endif
249 
250 	/*
251 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
252 	 * @sym: The name of the per-cpu variable
253 	 * @tmp: scratch register
254 	 */
255 	.macro adr_this_cpu, dst, sym, tmp
256 	adrp	\tmp, \sym
257 	add	\dst, \tmp, #:lo12:\sym
258 	get_this_cpu_offset \tmp
259 	add	\dst, \dst, \tmp
260 	.endm
261 
262 	/*
263 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
264 	 * @sym: The name of the per-cpu variable
265 	 * @tmp: scratch register
266 	 */
267 	.macro ldr_this_cpu dst, sym, tmp
268 	adr_l	\dst, \sym
269 	get_this_cpu_offset \tmp
270 	ldr	\dst, [\dst, \tmp]
271 	.endm
272 
273 /*
274  * vma_vm_mm - get mm pointer from vma pointer (vma->vm_mm)
275  */
276 	.macro	vma_vm_mm, rd, rn
277 	ldr	\rd, [\rn, #VMA_VM_MM]
278 	.endm
279 
280 /*
281  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
282  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
283  */
284 	.macro	read_ctr, reg
285 #ifndef __KVM_NVHE_HYPERVISOR__
286 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
287 	mrs	\reg, ctr_el0			// read CTR
288 	nop
289 alternative_else
290 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
291 alternative_endif
292 #else
293 alternative_if_not ARM64_KVM_PROTECTED_MODE
294 	ASM_BUG()
295 alternative_else_nop_endif
296 alternative_cb kvm_compute_final_ctr_el0
297 	movz	\reg, #0
298 	movk	\reg, #0, lsl #16
299 	movk	\reg, #0, lsl #32
300 	movk	\reg, #0, lsl #48
301 alternative_cb_end
302 #endif
303 	.endm
304 
305 
306 /*
307  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
308  * from the CTR register.
309  */
310 	.macro	raw_dcache_line_size, reg, tmp
311 	mrs	\tmp, ctr_el0			// read CTR
312 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
313 	mov	\reg, #4			// bytes per word
314 	lsl	\reg, \reg, \tmp		// actual cache line size
315 	.endm
316 
317 /*
318  * dcache_line_size - get the safe D-cache line size across all CPUs
319  */
320 	.macro	dcache_line_size, reg, tmp
321 	read_ctr	\tmp
322 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
323 	mov		\reg, #4		// bytes per word
324 	lsl		\reg, \reg, \tmp	// actual cache line size
325 	.endm
326 
327 /*
328  * raw_icache_line_size - get the minimum I-cache line size on this CPU
329  * from the CTR register.
330  */
331 	.macro	raw_icache_line_size, reg, tmp
332 	mrs	\tmp, ctr_el0			// read CTR
333 	and	\tmp, \tmp, #0xf		// cache line size encoding
334 	mov	\reg, #4			// bytes per word
335 	lsl	\reg, \reg, \tmp		// actual cache line size
336 	.endm
337 
338 /*
339  * icache_line_size - get the safe I-cache line size across all CPUs
340  */
341 	.macro	icache_line_size, reg, tmp
342 	read_ctr	\tmp
343 	and		\tmp, \tmp, #0xf	// cache line size encoding
344 	mov		\reg, #4		// bytes per word
345 	lsl		\reg, \reg, \tmp	// actual cache line size
346 	.endm
347 
348 /*
349  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
350  */
351 	.macro	tcr_set_t0sz, valreg, t0sz
352 	bfi	\valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
353 	.endm
354 
355 /*
356  * tcr_set_t1sz - update TCR.T1SZ
357  */
358 	.macro	tcr_set_t1sz, valreg, t1sz
359 	bfi	\valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH
360 	.endm
361 
362 /*
363  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
364  * ID_AA64MMFR0_EL1.PARange value
365  *
366  *	tcr:		register with the TCR_ELx value to be updated
367  *	pos:		IPS or PS bitfield position
368  *	tmp{0,1}:	temporary registers
369  */
370 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
371 	mrs	\tmp0, ID_AA64MMFR0_EL1
372 	// Narrow PARange to fit the PS field in TCR_ELx
373 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_PARANGE_SHIFT, #3
374 	mov	\tmp1, #ID_AA64MMFR0_PARANGE_MAX
375 	cmp	\tmp0, \tmp1
376 	csel	\tmp0, \tmp1, \tmp0, hi
377 	bfi	\tcr, \tmp0, \pos, #3
378 	.endm
379 
380 	.macro __dcache_op_workaround_clean_cache, op, addr
381 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
382 	dc	\op, \addr
383 alternative_else
384 	dc	civac, \addr
385 alternative_endif
386 	.endm
387 
388 /*
389  * Macro to perform a data cache maintenance for the interval
390  * [start, end) with dcache line size explicitly provided.
391  *
392  * 	op:		operation passed to dc instruction
393  * 	domain:		domain used in dsb instruciton
394  * 	start:          starting virtual address of the region
395  * 	end:            end virtual address of the region
396  *	linesz:		dcache line size
397  * 	fixup:		optional label to branch to on user fault
398  * 	Corrupts:       start, end, tmp
399  */
400 	.macro dcache_by_myline_op op, domain, start, end, linesz, tmp, fixup
401 	sub	\tmp, \linesz, #1
402 	bic	\start, \start, \tmp
403 .Ldcache_op\@:
404 	.ifc	\op, cvau
405 	__dcache_op_workaround_clean_cache \op, \start
406 	.else
407 	.ifc	\op, cvac
408 	__dcache_op_workaround_clean_cache \op, \start
409 	.else
410 	.ifc	\op, cvap
411 	sys	3, c7, c12, 1, \start	// dc cvap
412 	.else
413 	.ifc	\op, cvadp
414 	sys	3, c7, c13, 1, \start	// dc cvadp
415 	.else
416 	dc	\op, \start
417 	.endif
418 	.endif
419 	.endif
420 	.endif
421 	add	\start, \start, \linesz
422 	cmp	\start, \end
423 	b.lo	.Ldcache_op\@
424 	dsb	\domain
425 
426 	_cond_extable .Ldcache_op\@, \fixup
427 	.endm
428 
429 /*
430  * Macro to perform a data cache maintenance for the interval
431  * [start, end)
432  *
433  * 	op:		operation passed to dc instruction
434  * 	domain:		domain used in dsb instruciton
435  * 	start:          starting virtual address of the region
436  * 	end:            end virtual address of the region
437  * 	fixup:		optional label to branch to on user fault
438  * 	Corrupts:       start, end, tmp1, tmp2
439  */
440 	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
441 	dcache_line_size \tmp1, \tmp2
442 	dcache_by_myline_op \op, \domain, \start, \end, \tmp1, \tmp2, \fixup
443 	.endm
444 
445 /*
446  * Macro to perform an instruction cache maintenance for the interval
447  * [start, end)
448  *
449  * 	start, end:	virtual addresses describing the region
450  *	fixup:		optional label to branch to on user fault
451  * 	Corrupts:	tmp1, tmp2
452  */
453 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
454 	icache_line_size \tmp1, \tmp2
455 	sub	\tmp2, \tmp1, #1
456 	bic	\tmp2, \start, \tmp2
457 .Licache_op\@:
458 	ic	ivau, \tmp2			// invalidate I line PoU
459 	add	\tmp2, \tmp2, \tmp1
460 	cmp	\tmp2, \end
461 	b.lo	.Licache_op\@
462 	dsb	ish
463 	isb
464 
465 	_cond_extable .Licache_op\@, \fixup
466 	.endm
467 
468 /*
469  * To prevent the possibility of old and new partial table walks being visible
470  * in the tlb, switch the ttbr to a zero page when we invalidate the old
471  * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
472  * Even switching to our copied tables will cause a changed output address at
473  * each stage of the walk.
474  */
475 	.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
476 	phys_to_ttbr \tmp, \zero_page
477 	msr	ttbr1_el1, \tmp
478 	isb
479 	tlbi	vmalle1
480 	dsb	nsh
481 	phys_to_ttbr \tmp, \page_table
482 	offset_ttbr1 \tmp, \tmp2
483 	msr	ttbr1_el1, \tmp
484 	isb
485 	.endm
486 
487 /*
488  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
489  */
490 	.macro	reset_pmuserenr_el0, tmpreg
491 	mrs	\tmpreg, id_aa64dfr0_el1
492 	sbfx	\tmpreg, \tmpreg, #ID_AA64DFR0_PMUVER_SHIFT, #4
493 	cmp	\tmpreg, #1			// Skip if no PMU present
494 	b.lt	9000f
495 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
496 9000:
497 	.endm
498 
499 /*
500  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
501  */
502 	.macro	reset_amuserenr_el0, tmpreg
503 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
504 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_AMU_SHIFT, #4
505 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
506 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
507 .Lskip_\@:
508 	.endm
509 /*
510  * copy_page - copy src to dest using temp registers t1-t8
511  */
512 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
513 9998:	ldp	\t1, \t2, [\src]
514 	ldp	\t3, \t4, [\src, #16]
515 	ldp	\t5, \t6, [\src, #32]
516 	ldp	\t7, \t8, [\src, #48]
517 	add	\src, \src, #64
518 	stnp	\t1, \t2, [\dest]
519 	stnp	\t3, \t4, [\dest, #16]
520 	stnp	\t5, \t6, [\dest, #32]
521 	stnp	\t7, \t8, [\dest, #48]
522 	add	\dest, \dest, #64
523 	tst	\src, #(PAGE_SIZE - 1)
524 	b.ne	9998b
525 	.endm
526 
527 /*
528  * Annotate a function as being unsuitable for kprobes.
529  */
530 #ifdef CONFIG_KPROBES
531 #define NOKPROBE(x)				\
532 	.pushsection "_kprobe_blacklist", "aw";	\
533 	.quad	x;				\
534 	.popsection;
535 #else
536 #define NOKPROBE(x)
537 #endif
538 
539 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
540 #define EXPORT_SYMBOL_NOKASAN(name)
541 #else
542 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
543 #endif
544 
545 	/*
546 	 * Emit a 64-bit absolute little endian symbol reference in a way that
547 	 * ensures that it will be resolved at build time, even when building a
548 	 * PIE binary. This requires cooperation from the linker script, which
549 	 * must emit the lo32/hi32 halves individually.
550 	 */
551 	.macro	le64sym, sym
552 	.long	\sym\()_lo32
553 	.long	\sym\()_hi32
554 	.endm
555 
556 	/*
557 	 * mov_q - move an immediate constant into a 64-bit register using
558 	 *         between 2 and 4 movz/movk instructions (depending on the
559 	 *         magnitude and sign of the operand)
560 	 */
561 	.macro	mov_q, reg, val
562 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
563 	movz	\reg, :abs_g1_s:\val
564 	.else
565 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
566 	movz	\reg, :abs_g2_s:\val
567 	.else
568 	movz	\reg, :abs_g3:\val
569 	movk	\reg, :abs_g2_nc:\val
570 	.endif
571 	movk	\reg, :abs_g1_nc:\val
572 	.endif
573 	movk	\reg, :abs_g0_nc:\val
574 	.endm
575 
576 /*
577  * Return the current task_struct.
578  */
579 	.macro	get_current_task, rd
580 	mrs	\rd, sp_el0
581 	.endm
582 
583 /*
584  * Offset ttbr1 to allow for 48-bit kernel VAs set with 52-bit PTRS_PER_PGD.
585  * orr is used as it can cover the immediate value (and is idempotent).
586  * In future this may be nop'ed out when dealing with 52-bit kernel VAs.
587  * 	ttbr: Value of ttbr to set, modified.
588  */
589 	.macro	offset_ttbr1, ttbr, tmp
590 #ifdef CONFIG_ARM64_VA_BITS_52
591 	mrs_s	\tmp, SYS_ID_AA64MMFR2_EL1
592 	and	\tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT)
593 	cbnz	\tmp, .Lskipoffs_\@
594 	orr	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
595 .Lskipoffs_\@ :
596 #endif
597 	.endm
598 
599 /*
600  * Perform the reverse of offset_ttbr1.
601  * bic is used as it can cover the immediate value and, in future, won't need
602  * to be nop'ed out when dealing with 52-bit kernel VAs.
603  */
604 	.macro	restore_ttbr1, ttbr
605 #ifdef CONFIG_ARM64_VA_BITS_52
606 	bic	\ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET
607 #endif
608 	.endm
609 
610 /*
611  * Arrange a physical address in a TTBR register, taking care of 52-bit
612  * addresses.
613  *
614  * 	phys:	physical address, preserved
615  * 	ttbr:	returns the TTBR value
616  */
617 	.macro	phys_to_ttbr, ttbr, phys
618 #ifdef CONFIG_ARM64_PA_BITS_52
619 	orr	\ttbr, \phys, \phys, lsr #46
620 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
621 #else
622 	mov	\ttbr, \phys
623 #endif
624 	.endm
625 
626 	.macro	phys_to_pte, pte, phys
627 #ifdef CONFIG_ARM64_PA_BITS_52
628 	/*
629 	 * We assume \phys is 64K aligned and this is guaranteed by only
630 	 * supporting this configuration with 64K pages.
631 	 */
632 	orr	\pte, \phys, \phys, lsr #36
633 	and	\pte, \pte, #PTE_ADDR_MASK
634 #else
635 	mov	\pte, \phys
636 #endif
637 	.endm
638 
639 	.macro	pte_to_phys, phys, pte
640 #ifdef CONFIG_ARM64_PA_BITS_52
641 	ubfiz	\phys, \pte, #(48 - 16 - 12), #16
642 	bfxil	\phys, \pte, #16, #32
643 	lsl	\phys, \phys, #16
644 #else
645 	and	\phys, \pte, #PTE_ADDR_MASK
646 #endif
647 	.endm
648 
649 /*
650  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
651  */
652 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
653 #ifdef CONFIG_FUJITSU_ERRATUM_010001
654 	mrs	\tmp1, midr_el1
655 
656 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
657 	and	\tmp1, \tmp1, \tmp2
658 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
659 	cmp	\tmp1, \tmp2
660 	b.ne	10f
661 
662 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
663 	bic	\tcr, \tcr, \tmp2
664 10:
665 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
666 	.endm
667 
668 /**
669  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
670  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
671  */
672 	.macro pre_disable_mmu_workaround
673 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
674 	isb
675 #endif
676 	.endm
677 
678 	/*
679 	 * frame_push - Push @regcount callee saved registers to the stack,
680 	 *              starting at x19, as well as x29/x30, and set x29 to
681 	 *              the new value of sp. Add @extra bytes of stack space
682 	 *              for locals.
683 	 */
684 	.macro		frame_push, regcount:req, extra
685 	__frame		st, \regcount, \extra
686 	.endm
687 
688 	/*
689 	 * frame_pop  - Pop the callee saved registers from the stack that were
690 	 *              pushed in the most recent call to frame_push, as well
691 	 *              as x29/x30 and any extra stack space that may have been
692 	 *              allocated.
693 	 */
694 	.macro		frame_pop
695 	__frame		ld
696 	.endm
697 
698 	.macro		__frame_regs, reg1, reg2, op, num
699 	.if		.Lframe_regcount == \num
700 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
701 	.elseif		.Lframe_regcount > \num
702 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
703 	.endif
704 	.endm
705 
706 	.macro		__frame, op, regcount, extra=0
707 	.ifc		\op, st
708 	.if		(\regcount) < 0 || (\regcount) > 10
709 	.error		"regcount should be in the range [0 ... 10]"
710 	.endif
711 	.if		((\extra) % 16) != 0
712 	.error		"extra should be a multiple of 16 bytes"
713 	.endif
714 	.ifdef		.Lframe_regcount
715 	.if		.Lframe_regcount != -1
716 	.error		"frame_push/frame_pop may not be nested"
717 	.endif
718 	.endif
719 	.set		.Lframe_regcount, \regcount
720 	.set		.Lframe_extra, \extra
721 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
722 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
723 	mov		x29, sp
724 	.endif
725 
726 	__frame_regs	x19, x20, \op, 1
727 	__frame_regs	x21, x22, \op, 3
728 	__frame_regs	x23, x24, \op, 5
729 	__frame_regs	x25, x26, \op, 7
730 	__frame_regs	x27, x28, \op, 9
731 
732 	.ifc		\op, ld
733 	.if		.Lframe_regcount == -1
734 	.error		"frame_push/frame_pop may not be nested"
735 	.endif
736 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
737 	.set		.Lframe_regcount, -1
738 	.endif
739 	.endm
740 
741 /*
742  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
743  * in the process. This is called when setting the MMU on.
744  */
745 .macro set_sctlr, sreg, reg
746 	msr	\sreg, \reg
747 	isb
748 	/*
749 	 * Invalidate the local I-cache so that any instructions fetched
750 	 * speculatively from the PoC are discarded, since they may have
751 	 * been dynamically patched at the PoU.
752 	 */
753 	ic	iallu
754 	dsb	nsh
755 	isb
756 .endm
757 
758 .macro set_sctlr_el1, reg
759 	set_sctlr sctlr_el1, \reg
760 .endm
761 
762 .macro set_sctlr_el2, reg
763 	set_sctlr sctlr_el2, \reg
764 .endm
765 
766 	/*
767 	 * Check whether preempt/bh-disabled asm code should yield as soon as
768 	 * it is able. This is the case if we are currently running in task
769 	 * context, and either a softirq is pending, or the TIF_NEED_RESCHED
770 	 * flag is set and re-enabling preemption a single time would result in
771 	 * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
772 	 * stored negated in the top word of the thread_info::preempt_count
773 	 * field)
774 	 */
775 	.macro		cond_yield, lbl:req, tmp:req, tmp2:req
776 	get_current_task \tmp
777 	ldr		\tmp, [\tmp, #TSK_TI_PREEMPT]
778 	/*
779 	 * If we are serving a softirq, there is no point in yielding: the
780 	 * softirq will not be preempted no matter what we do, so we should
781 	 * run to completion as quickly as we can.
782 	 */
783 	tbnz		\tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
784 #ifdef CONFIG_PREEMPTION
785 	sub		\tmp, \tmp, #PREEMPT_DISABLE_OFFSET
786 	cbz		\tmp, \lbl
787 #endif
788 	adr_l		\tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
789 	get_this_cpu_offset	\tmp2
790 	ldr		w\tmp, [\tmp, \tmp2]
791 	cbnz		w\tmp, \lbl	// yield on pending softirq in task context
792 .Lnoyield_\@:
793 	.endm
794 
795 /*
796  * Branch Target Identifier (BTI)
797  */
798 	.macro  bti, targets
799 	.equ	.L__bti_targets_c, 34
800 	.equ	.L__bti_targets_j, 36
801 	.equ	.L__bti_targets_jc,38
802 	hint	#.L__bti_targets_\targets
803 	.endm
804 
805 /*
806  * This macro emits a program property note section identifying
807  * architecture features which require special handling, mainly for
808  * use in assembly files included in the VDSO.
809  */
810 
811 #define NT_GNU_PROPERTY_TYPE_0  5
812 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
813 
814 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
815 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
816 
817 #ifdef CONFIG_ARM64_BTI_KERNEL
818 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
819 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
820 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
821 #endif
822 
823 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
824 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
825 	.pushsection .note.gnu.property, "a"
826 	.align  3
827 	.long   2f - 1f
828 	.long   6f - 3f
829 	.long   NT_GNU_PROPERTY_TYPE_0
830 1:      .string "GNU"
831 2:
832 	.align  3
833 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
834 	.long   5f - 4f
835 4:
836 	/*
837 	 * This is described with an array of char in the Linux API
838 	 * spec but the text and all other usage (including binutils,
839 	 * clang and GCC) treat this as a 32 bit value so no swizzling
840 	 * is required for big endian.
841 	 */
842 	.long   \feat
843 5:
844 	.align  3
845 6:
846 	.popsection
847 .endm
848 
849 #else
850 .macro emit_aarch64_feature_1_and, feat=0
851 .endm
852 
853 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
854 
855 	.macro __mitigate_spectre_bhb_loop      tmp
856 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
857 alternative_cb  spectre_bhb_patch_loop_iter
858 	mov	\tmp, #32		// Patched to correct the immediate
859 alternative_cb_end
860 .Lspectre_bhb_loop\@:
861 	b	. + 4
862 	subs	\tmp, \tmp, #1
863 	b.ne	.Lspectre_bhb_loop\@
864 	sb
865 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
866 	.endm
867 
868 	.macro mitigate_spectre_bhb_loop	tmp
869 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
870 alternative_cb	spectre_bhb_patch_loop_mitigation_enable
871 	b	.L_spectre_bhb_loop_done\@	// Patched to NOP
872 alternative_cb_end
873 	__mitigate_spectre_bhb_loop	\tmp
874 .L_spectre_bhb_loop_done\@:
875 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
876 	.endm
877 
878 	/* Save/restores x0-x3 to the stack */
879 	.macro __mitigate_spectre_bhb_fw
880 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
881 	stp	x0, x1, [sp, #-16]!
882 	stp	x2, x3, [sp, #-16]!
883 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
884 alternative_cb	smccc_patch_fw_mitigation_conduit
885 	nop					// Patched to SMC/HVC #0
886 alternative_cb_end
887 	ldp	x2, x3, [sp], #16
888 	ldp	x0, x1, [sp], #16
889 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
890 	.endm
891 
892 	.macro mitigate_spectre_bhb_clear_insn
893 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
894 alternative_cb	spectre_bhb_patch_clearbhb
895 	/* Patched to NOP when not supported */
896 	clearbhb
897 	isb
898 alternative_cb_end
899 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
900 	.endm
901 #endif	/* __ASM_ASSEMBLER_H */
902