xref: /linux/arch/arm64/include/asm/assembler.h (revision e2683c8868d03382da7e1ce8453b543a043066d1)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Based on arch/arm/include/asm/assembler.h, arch/arm/mm/proc-macros.S
4  *
5  * Copyright (C) 1996-2000 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 #ifndef __ASSEMBLER__
9 #error "Only include this from assembly code"
10 #endif
11 
12 #ifndef __ASM_ASSEMBLER_H
13 #define __ASM_ASSEMBLER_H
14 
15 #include <linux/export.h>
16 
17 #include <asm/alternative.h>
18 #include <asm/asm-bug.h>
19 #include <asm/asm-extable.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/cpufeature.h>
22 #include <asm/cputype.h>
23 #include <asm/debug-monitors.h>
24 #include <asm/page.h>
25 #include <asm/pgtable-hwdef.h>
26 #include <asm/ptrace.h>
27 #include <asm/thread_info.h>
28 
29 	/*
30 	 * Provide a wxN alias for each wN register so what we can paste a xN
31 	 * reference after a 'w' to obtain the 32-bit version.
32 	 */
33 	.irp	n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30
34 	wx\n	.req	w\n
35 	.endr
36 
37 	.macro disable_daif
38 	msr	daifset, #0xf
39 	.endm
40 
41 /*
42  * Save/restore interrupts.
43  */
44 	.macro save_and_disable_daif, flags
45 	mrs	\flags, daif
46 	msr	daifset, #0xf
47 	.endm
48 
49 	.macro	save_and_disable_irq, flags
50 	mrs	\flags, daif
51 	msr	daifset, #3
52 	.endm
53 
54 	.macro	restore_irq, flags
55 	msr	daif, \flags
56 	.endm
57 
58 	.macro	disable_step_tsk, flgs, tmp
59 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
60 	mrs	\tmp, mdscr_el1
61 	bic	\tmp, \tmp, #MDSCR_EL1_SS
62 	msr	mdscr_el1, \tmp
63 	isb	// Take effect before a subsequent clear of DAIF.D
64 9990:
65 	.endm
66 
67 	/* call with daif masked */
68 	.macro	enable_step_tsk, flgs, tmp
69 	tbz	\flgs, #TIF_SINGLESTEP, 9990f
70 	mrs	\tmp, mdscr_el1
71 	orr	\tmp, \tmp, #MDSCR_EL1_SS
72 	msr	mdscr_el1, \tmp
73 9990:
74 	.endm
75 
76 /*
77  * RAS Error Synchronization barrier
78  */
79 	.macro  esb
80 #ifdef CONFIG_ARM64_RAS_EXTN
81 	hint    #16
82 #else
83 	nop
84 #endif
85 	.endm
86 
87 /*
88  * Value prediction barrier
89  */
90 	.macro	csdb
91 	hint	#20
92 	.endm
93 
94 /*
95  * Clear Branch History instruction
96  */
97 	.macro clearbhb
98 	hint	#22
99 	.endm
100 
101 /*
102  * Speculation barrier
103  */
104 	.macro	sb
105 alternative_if_not ARM64_HAS_SB
106 	dsb	nsh
107 	isb
108 alternative_else
109 	SB_BARRIER_INSN
110 	nop
111 alternative_endif
112 	.endm
113 
114 /*
115  * NOP sequence
116  */
117 	.macro	nops, num
118 	.rept	\num
119 	nop
120 	.endr
121 	.endm
122 
123 /*
124  * Register aliases.
125  */
126 lr	.req	x30		// link register
127 
128 /*
129  * Vector entry
130  */
131 	 .macro	ventry	label
132 	.align	7
133 	b	\label
134 	.endm
135 
136 /*
137  * Select code when configured for BE.
138  */
139 #ifdef CONFIG_CPU_BIG_ENDIAN
140 #define CPU_BE(code...) code
141 #else
142 #define CPU_BE(code...)
143 #endif
144 
145 /*
146  * Select code when configured for LE.
147  */
148 #ifdef CONFIG_CPU_BIG_ENDIAN
149 #define CPU_LE(code...)
150 #else
151 #define CPU_LE(code...) code
152 #endif
153 
154 /*
155  * Define a macro that constructs a 64-bit value by concatenating two
156  * 32-bit registers. Note that on big endian systems the order of the
157  * registers is swapped.
158  */
159 #ifndef CONFIG_CPU_BIG_ENDIAN
160 	.macro	regs_to_64, rd, lbits, hbits
161 #else
162 	.macro	regs_to_64, rd, hbits, lbits
163 #endif
164 	orr	\rd, \lbits, \hbits, lsl #32
165 	.endm
166 
167 /*
168  * Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
169  * <symbol> is within the range +/- 4 GB of the PC.
170  */
171 	/*
172 	 * @dst: destination register (64 bit wide)
173 	 * @sym: name of the symbol
174 	 */
175 	.macro	adr_l, dst, sym
176 	adrp	\dst, \sym
177 	add	\dst, \dst, :lo12:\sym
178 	.endm
179 
180 	/*
181 	 * @dst: destination register (32 or 64 bit wide)
182 	 * @sym: name of the symbol
183 	 * @tmp: optional 64-bit scratch register to be used if <dst> is a
184 	 *       32-bit wide register, in which case it cannot be used to hold
185 	 *       the address
186 	 */
187 	.macro	ldr_l, dst, sym, tmp=
188 	.ifb	\tmp
189 	adrp	\dst, \sym
190 	ldr	\dst, [\dst, :lo12:\sym]
191 	.else
192 	adrp	\tmp, \sym
193 	ldr	\dst, [\tmp, :lo12:\sym]
194 	.endif
195 	.endm
196 
197 	/*
198 	 * @src: source register (32 or 64 bit wide)
199 	 * @sym: name of the symbol
200 	 * @tmp: mandatory 64-bit scratch register to calculate the address
201 	 *       while <src> needs to be preserved.
202 	 */
203 	.macro	str_l, src, sym, tmp
204 	adrp	\tmp, \sym
205 	str	\src, [\tmp, :lo12:\sym]
206 	.endm
207 
208 	/*
209 	 * @dst: destination register
210 	 */
211 #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
212 	.macro	get_this_cpu_offset, dst
213 	mrs	\dst, tpidr_el2
214 	.endm
215 #else
216 	.macro	get_this_cpu_offset, dst
217 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
218 	mrs	\dst, tpidr_el1
219 alternative_else
220 	mrs	\dst, tpidr_el2
221 alternative_endif
222 	.endm
223 
224 	.macro	set_this_cpu_offset, src
225 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
226 	msr	tpidr_el1, \src
227 alternative_else
228 	msr	tpidr_el2, \src
229 alternative_endif
230 	.endm
231 #endif
232 
233 	/*
234 	 * @dst: Result of per_cpu(sym, smp_processor_id()) (can be SP)
235 	 * @sym: The name of the per-cpu variable
236 	 * @tmp: scratch register
237 	 */
238 	.macro adr_this_cpu, dst, sym, tmp
239 	adrp	\tmp, \sym
240 	add	\dst, \tmp, #:lo12:\sym
241 	get_this_cpu_offset \tmp
242 	add	\dst, \dst, \tmp
243 	.endm
244 
245 	/*
246 	 * @dst: Result of READ_ONCE(per_cpu(sym, smp_processor_id()))
247 	 * @sym: The name of the per-cpu variable
248 	 * @tmp: scratch register
249 	 */
250 	.macro ldr_this_cpu dst, sym, tmp
251 	adr_l	\dst, \sym
252 	get_this_cpu_offset \tmp
253 	ldr	\dst, [\dst, \tmp]
254 	.endm
255 
256 /*
257  * read_ctr - read CTR_EL0. If the system has mismatched register fields,
258  * provide the system wide safe value from arm64_ftr_reg_ctrel0.sys_val
259  */
260 	.macro	read_ctr, reg
261 #ifndef __KVM_NVHE_HYPERVISOR__
262 alternative_if_not ARM64_MISMATCHED_CACHE_TYPE
263 	mrs	\reg, ctr_el0			// read CTR
264 	nop
265 alternative_else
266 	ldr_l	\reg, arm64_ftr_reg_ctrel0 + ARM64_FTR_SYSVAL
267 alternative_endif
268 #else
269 alternative_if_not ARM64_KVM_PROTECTED_MODE
270 	ASM_BUG()
271 alternative_else_nop_endif
272 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_compute_final_ctr_el0
273 	movz	\reg, #0
274 	movk	\reg, #0, lsl #16
275 	movk	\reg, #0, lsl #32
276 	movk	\reg, #0, lsl #48
277 alternative_cb_end
278 #endif
279 	.endm
280 
281 
282 /*
283  * raw_dcache_line_size - get the minimum D-cache line size on this CPU
284  * from the CTR register.
285  */
286 	.macro	raw_dcache_line_size, reg, tmp
287 	mrs	\tmp, ctr_el0			// read CTR
288 	ubfm	\tmp, \tmp, #16, #19		// cache line size encoding
289 	mov	\reg, #4			// bytes per word
290 	lsl	\reg, \reg, \tmp		// actual cache line size
291 	.endm
292 
293 /*
294  * dcache_line_size - get the safe D-cache line size across all CPUs
295  */
296 	.macro	dcache_line_size, reg, tmp
297 	read_ctr	\tmp
298 	ubfm		\tmp, \tmp, #16, #19	// cache line size encoding
299 	mov		\reg, #4		// bytes per word
300 	lsl		\reg, \reg, \tmp	// actual cache line size
301 	.endm
302 
303 /*
304  * raw_icache_line_size - get the minimum I-cache line size on this CPU
305  * from the CTR register.
306  */
307 	.macro	raw_icache_line_size, reg, tmp
308 	mrs	\tmp, ctr_el0			// read CTR
309 	and	\tmp, \tmp, #0xf		// cache line size encoding
310 	mov	\reg, #4			// bytes per word
311 	lsl	\reg, \reg, \tmp		// actual cache line size
312 	.endm
313 
314 /*
315  * icache_line_size - get the safe I-cache line size across all CPUs
316  */
317 	.macro	icache_line_size, reg, tmp
318 	read_ctr	\tmp
319 	and		\tmp, \tmp, #0xf	// cache line size encoding
320 	mov		\reg, #4		// bytes per word
321 	lsl		\reg, \reg, \tmp	// actual cache line size
322 	.endm
323 
324 /*
325  * tcr_set_t0sz - update TCR.T0SZ so that we can load the ID map
326  */
327 	.macro	tcr_set_t0sz, valreg, t0sz
328 	bfi	\valreg, \t0sz, #TCR_EL1_T0SZ_SHIFT, #TCR_EL1_T0SZ_WIDTH
329 	.endm
330 
331 /*
332  * tcr_set_t1sz - update TCR.T1SZ
333  */
334 	.macro	tcr_set_t1sz, valreg, t1sz
335 	bfi	\valreg, \t1sz, #TCR_EL1_T1SZ_SHIFT, #TCR_EL1_T1SZ_WIDTH
336 	.endm
337 
338 /*
339  * tcr_compute_pa_size - set TCR.(I)PS to the highest supported
340  * ID_AA64MMFR0_EL1.PARange value
341  *
342  *	tcr:		register with the TCR_ELx value to be updated
343  *	pos:		IPS or PS bitfield position
344  *	tmp{0,1}:	temporary registers
345  */
346 	.macro	tcr_compute_pa_size, tcr, pos, tmp0, tmp1
347 	mrs	\tmp0, ID_AA64MMFR0_EL1
348 	// Narrow PARange to fit the PS field in TCR_ELx
349 	ubfx	\tmp0, \tmp0, #ID_AA64MMFR0_EL1_PARANGE_SHIFT, #3
350 	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_MAX
351 #ifdef CONFIG_ARM64_LPA2
352 alternative_if_not ARM64_HAS_VA52
353 	mov	\tmp1, #ID_AA64MMFR0_EL1_PARANGE_48
354 alternative_else_nop_endif
355 #endif
356 	cmp	\tmp0, \tmp1
357 	csel	\tmp0, \tmp1, \tmp0, hi
358 	bfi	\tcr, \tmp0, \pos, #3
359 	.endm
360 
361 	.macro __dcache_op_workaround_clean_cache, op, addr
362 alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
363 	dc	\op, \addr
364 alternative_else
365 	dc	civac, \addr
366 alternative_endif
367 	.endm
368 
369 /*
370  * Macro to perform a data cache maintenance for the interval
371  * [start, end) with dcache line size explicitly provided.
372  *
373  * 	op:		operation passed to dc instruction
374  * 	start:          starting virtual address of the region
375  * 	end:            end virtual address of the region
376  *	linesz:		dcache line size
377  * 	fixup:		optional label to branch to on user fault
378  * 	Corrupts:       start, end, tmp
379  */
380 	.macro dcache_by_myline_op_nosync op, start, end, linesz, tmp, fixup
381 	sub	\tmp, \linesz, #1
382 	bic	\start, \start, \tmp
383 alternative_if ARM64_WORKAROUND_4311569
384 	mov	\tmp, \start
385 alternative_else_nop_endif
386 .Ldcache_op\@:
387 	.ifc	\op, cvau
388 	__dcache_op_workaround_clean_cache \op, \start
389 	.else
390 	.ifc	\op, cvac
391 	__dcache_op_workaround_clean_cache \op, \start
392 	.else
393 	.ifc	\op, cvap
394 	sys	3, c7, c12, 1, \start	// dc cvap
395 	.else
396 	.ifc	\op, cvadp
397 	sys	3, c7, c13, 1, \start	// dc cvadp
398 	.else
399 	dc	\op, \start
400 	.endif
401 	.endif
402 	.endif
403 	.endif
404 	add	\start, \start, \linesz
405 	cmp	\start, \end
406 	b.lo	.Ldcache_op\@
407 alternative_if ARM64_WORKAROUND_4311569
408 	.ifnc	\op, cvau
409 	mov	\start, \tmp
410 	mov	\tmp, xzr
411 	cbnz	\start, .Ldcache_op\@
412 	.endif
413 alternative_else_nop_endif
414 
415 	_cond_uaccess_extable .Ldcache_op\@, \fixup
416 	.endm
417 
418 /*
419  * Macro to perform a data cache maintenance for the interval
420  * [start, end) without waiting for completion
421  *
422  * 	op:		operation passed to dc instruction
423  * 	start:          starting virtual address of the region
424  * 	end:            end virtual address of the region
425  * 	fixup:		optional label to branch to on user fault
426  * 	Corrupts:       start, end, tmp1, tmp2
427  */
428 	.macro dcache_by_line_op_nosync op, start, end, tmp1, tmp2, fixup
429 	dcache_line_size \tmp1, \tmp2
430 	dcache_by_myline_op_nosync \op, \start, \end, \tmp1, \tmp2, \fixup
431 	.endm
432 
433 /*
434  * Macro to perform a data cache maintenance for the interval
435  * [start, end) and wait for completion
436  *
437  * 	op:		operation passed to dc instruction
438  * 	domain:		domain used in dsb instruction
439  * 	start:          starting virtual address of the region
440  * 	end:            end virtual address of the region
441  * 	fixup:		optional label to branch to on user fault
442  * 	Corrupts:       start, end, tmp1, tmp2
443  */
444 	.macro dcache_by_line_op op, domain, start, end, tmp1, tmp2, fixup
445 	dcache_by_line_op_nosync \op, \start, \end, \tmp1, \tmp2, \fixup
446 	dsb \domain
447 	.endm
448 
449 /*
450  * Macro to perform an instruction cache maintenance for the interval
451  * [start, end)
452  *
453  * 	start, end:	virtual addresses describing the region
454  *	fixup:		optional label to branch to on user fault
455  * 	Corrupts:	tmp1, tmp2
456  */
457 	.macro invalidate_icache_by_line start, end, tmp1, tmp2, fixup
458 	icache_line_size \tmp1, \tmp2
459 	sub	\tmp2, \tmp1, #1
460 	bic	\tmp2, \start, \tmp2
461 .Licache_op\@:
462 	ic	ivau, \tmp2			// invalidate I line PoU
463 	add	\tmp2, \tmp2, \tmp1
464 	cmp	\tmp2, \end
465 	b.lo	.Licache_op\@
466 	dsb	ish
467 	isb
468 
469 	_cond_uaccess_extable .Licache_op\@, \fixup
470 	.endm
471 
472 /*
473  * load_ttbr1 - install @pgtbl as a TTBR1 page table
474  * pgtbl preserved
475  * tmp1/tmp2 clobbered, either may overlap with pgtbl
476  */
477 	.macro		load_ttbr1, pgtbl, tmp1, tmp2
478 	phys_to_ttbr	\tmp1, \pgtbl
479 	offset_ttbr1 	\tmp1, \tmp2
480 	msr		ttbr1_el1, \tmp1
481 	isb
482 	.endm
483 
484 /*
485  * To prevent the possibility of old and new partial table walks being visible
486  * in the tlb, switch the ttbr to a zero page when we invalidate the old
487  * records. D4.7.1 'General TLB maintenance requirements' in ARM DDI 0487A.i
488  * Even switching to our copied tables will cause a changed output address at
489  * each stage of the walk.
490  */
491 	.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2
492 	phys_to_ttbr \tmp, \zero_page
493 	msr	ttbr1_el1, \tmp
494 	isb
495 	tlbi	vmalle1
496 	dsb	nsh
497 	load_ttbr1 \page_table, \tmp, \tmp2
498 	.endm
499 
500 /*
501  * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
502  */
503 	.macro	reset_pmuserenr_el0, tmpreg
504 	mrs	\tmpreg, id_aa64dfr0_el1
505 	ubfx	\tmpreg, \tmpreg, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
506 	cmp	\tmpreg, #ID_AA64DFR0_EL1_PMUVer_NI
507 	ccmp	\tmpreg, #ID_AA64DFR0_EL1_PMUVer_IMP_DEF, #4, ne
508 	b.eq	9000f				// Skip if no PMU present or IMP_DEF
509 	msr	pmuserenr_el0, xzr		// Disable PMU access from EL0
510 9000:
511 	.endm
512 
513 /*
514  * reset_amuserenr_el0 - reset AMUSERENR_EL0 if AMUv1 present
515  */
516 	.macro	reset_amuserenr_el0, tmpreg
517 	mrs	\tmpreg, id_aa64pfr0_el1	// Check ID_AA64PFR0_EL1
518 	ubfx	\tmpreg, \tmpreg, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
519 	cbz	\tmpreg, .Lskip_\@		// Skip if no AMU present
520 	msr_s	SYS_AMUSERENR_EL0, xzr		// Disable AMU access from EL0
521 .Lskip_\@:
522 	.endm
523 /*
524  * copy_page - copy src to dest using temp registers t1-t8
525  */
526 	.macro copy_page dest:req src:req t1:req t2:req t3:req t4:req t5:req t6:req t7:req t8:req
527 9998:	ldp	\t1, \t2, [\src]
528 	ldp	\t3, \t4, [\src, #16]
529 	ldp	\t5, \t6, [\src, #32]
530 	ldp	\t7, \t8, [\src, #48]
531 	add	\src, \src, #64
532 	stnp	\t1, \t2, [\dest]
533 	stnp	\t3, \t4, [\dest, #16]
534 	stnp	\t5, \t6, [\dest, #32]
535 	stnp	\t7, \t8, [\dest, #48]
536 	add	\dest, \dest, #64
537 	tst	\src, #(PAGE_SIZE - 1)
538 	b.ne	9998b
539 	.endm
540 
541 /*
542  * Annotate a function as being unsuitable for kprobes.
543  */
544 #ifdef CONFIG_KPROBES
545 #define NOKPROBE(x)				\
546 	.pushsection "_kprobe_blacklist", "aw";	\
547 	.quad	x;				\
548 	.popsection;
549 #else
550 #define NOKPROBE(x)
551 #endif
552 
553 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
554 #define EXPORT_SYMBOL_NOKASAN(name)
555 #else
556 #define EXPORT_SYMBOL_NOKASAN(name)	EXPORT_SYMBOL(name)
557 #endif
558 
559 	/*
560 	 * Emit a 64-bit absolute little endian symbol reference in a way that
561 	 * ensures that it will be resolved at build time, even when building a
562 	 * PIE binary. This requires cooperation from the linker script, which
563 	 * must emit the lo32/hi32 halves individually.
564 	 */
565 	.macro	le64sym, sym
566 	.long	\sym\()_lo32
567 	.long	\sym\()_hi32
568 	.endm
569 
570 	/*
571 	 * mov_q - move an immediate constant into a 64-bit register using
572 	 *         between 2 and 4 movz/movk instructions (depending on the
573 	 *         magnitude and sign of the operand)
574 	 */
575 	.macro	mov_q, reg, val
576 	.if (((\val) >> 31) == 0 || ((\val) >> 31) == 0x1ffffffff)
577 	movz	\reg, :abs_g1_s:\val
578 	.else
579 	.if (((\val) >> 47) == 0 || ((\val) >> 47) == 0x1ffff)
580 	movz	\reg, :abs_g2_s:\val
581 	.else
582 	movz	\reg, :abs_g3:\val
583 	movk	\reg, :abs_g2_nc:\val
584 	.endif
585 	movk	\reg, :abs_g1_nc:\val
586 	.endif
587 	movk	\reg, :abs_g0_nc:\val
588 	.endm
589 
590 /*
591  * Return the current task_struct.
592  */
593 	.macro	get_current_task, rd
594 	mrs	\rd, sp_el0
595 	.endm
596 
597 /*
598  * If the kernel is built for 52-bit virtual addressing but the hardware only
599  * supports 48 bits, we cannot program the pgdir address into TTBR1 directly,
600  * but we have to add an offset so that the TTBR1 address corresponds with the
601  * pgdir entry that covers the lowest 48-bit addressable VA.
602  *
603  * Note that this trick is only used for LVA/64k pages - LPA2/4k pages uses an
604  * additional paging level, and on LPA2/16k pages, we would end up with a root
605  * level table with only 2 entries, which is suboptimal in terms of TLB
606  * utilization, so there we fall back to 47 bits of translation if LPA2 is not
607  * supported.
608  *
609  * orr is used as it can cover the immediate value (and is idempotent).
610  * 	ttbr: Value of ttbr to set, modified.
611  */
612 	.macro	offset_ttbr1, ttbr, tmp
613 #if defined(CONFIG_ARM64_VA_BITS_52) && !defined(CONFIG_ARM64_LPA2)
614 	mrs	\tmp, tcr_el1
615 	and	\tmp, \tmp, #TCR_EL1_T1SZ_MASK
616 	cmp	\tmp, #TCR_T1SZ(VA_BITS_MIN)
617 	orr	\tmp, \ttbr, #TTBR1_BADDR_4852_OFFSET
618 	csel	\ttbr, \tmp, \ttbr, eq
619 #endif
620 	.endm
621 
622 /*
623  * Arrange a physical address in a TTBR register, taking care of 52-bit
624  * addresses.
625  *
626  * 	phys:	physical address, preserved
627  * 	ttbr:	returns the TTBR value
628  */
629 	.macro	phys_to_ttbr, ttbr, phys
630 #ifdef CONFIG_ARM64_PA_BITS_52
631 	orr	\ttbr, \phys, \phys, lsr #46
632 	and	\ttbr, \ttbr, #TTBR_BADDR_MASK_52
633 #else
634 	mov	\ttbr, \phys
635 #endif
636 	.endm
637 
638 	.macro	phys_to_pte, pte, phys
639 #ifdef CONFIG_ARM64_PA_BITS_52
640 	orr	\pte, \phys, \phys, lsr #PTE_ADDR_HIGH_SHIFT
641 	and	\pte, \pte, #PHYS_TO_PTE_ADDR_MASK
642 #else
643 	mov	\pte, \phys
644 #endif
645 	.endm
646 
647 /*
648  * tcr_clear_errata_bits - Clear TCR bits that trigger an errata on this CPU.
649  */
650 	.macro	tcr_clear_errata_bits, tcr, tmp1, tmp2
651 #ifdef CONFIG_FUJITSU_ERRATUM_010001
652 	mrs	\tmp1, midr_el1
653 
654 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001_MASK
655 	and	\tmp1, \tmp1, \tmp2
656 	mov_q	\tmp2, MIDR_FUJITSU_ERRATUM_010001
657 	cmp	\tmp1, \tmp2
658 	b.ne	10f
659 
660 	mov_q	\tmp2, TCR_CLEAR_FUJITSU_ERRATUM_010001
661 	bic	\tcr, \tcr, \tmp2
662 10:
663 #endif /* CONFIG_FUJITSU_ERRATUM_010001 */
664 	.endm
665 
666 /**
667  * Errata workaround prior to disable MMU. Insert an ISB immediately prior
668  * to executing the MSR that will change SCTLR_ELn[M] from a value of 1 to 0.
669  */
670 	.macro pre_disable_mmu_workaround
671 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_E1041
672 	isb
673 #endif
674 	.endm
675 
676 	/*
677 	 * frame_push - Push @regcount callee saved registers to the stack,
678 	 *              starting at x19, as well as x29/x30, and set x29 to
679 	 *              the new value of sp. Add @extra bytes of stack space
680 	 *              for locals.
681 	 */
682 	.macro		frame_push, regcount:req, extra
683 	__frame		st, \regcount, \extra
684 	.endm
685 
686 	/*
687 	 * frame_pop  - Pop the callee saved registers from the stack that were
688 	 *              pushed in the most recent call to frame_push, as well
689 	 *              as x29/x30 and any extra stack space that may have been
690 	 *              allocated.
691 	 */
692 	.macro		frame_pop
693 	__frame		ld
694 	.endm
695 
696 	.macro		__frame_regs, reg1, reg2, op, num
697 	.if		.Lframe_regcount == \num
698 	\op\()r		\reg1, [sp, #(\num + 1) * 8]
699 	.elseif		.Lframe_regcount > \num
700 	\op\()p		\reg1, \reg2, [sp, #(\num + 1) * 8]
701 	.endif
702 	.endm
703 
704 	.macro		__frame, op, regcount, extra=0
705 	.ifc		\op, st
706 	.if		(\regcount) < 0 || (\regcount) > 10
707 	.error		"regcount should be in the range [0 ... 10]"
708 	.endif
709 	.if		((\extra) % 16) != 0
710 	.error		"extra should be a multiple of 16 bytes"
711 	.endif
712 	.ifdef		.Lframe_regcount
713 	.if		.Lframe_regcount != -1
714 	.error		"frame_push/frame_pop may not be nested"
715 	.endif
716 	.endif
717 	.set		.Lframe_regcount, \regcount
718 	.set		.Lframe_extra, \extra
719 	.set		.Lframe_local_offset, ((\regcount + 3) / 2) * 16
720 	stp		x29, x30, [sp, #-.Lframe_local_offset - .Lframe_extra]!
721 	mov		x29, sp
722 	.endif
723 
724 	__frame_regs	x19, x20, \op, 1
725 	__frame_regs	x21, x22, \op, 3
726 	__frame_regs	x23, x24, \op, 5
727 	__frame_regs	x25, x26, \op, 7
728 	__frame_regs	x27, x28, \op, 9
729 
730 	.ifc		\op, ld
731 	.if		.Lframe_regcount == -1
732 	.error		"frame_push/frame_pop may not be nested"
733 	.endif
734 	ldp		x29, x30, [sp], #.Lframe_local_offset + .Lframe_extra
735 	.set		.Lframe_regcount, -1
736 	.endif
737 	.endm
738 
739 /*
740  * Set SCTLR_ELx to the @reg value, and invalidate the local icache
741  * in the process. This is called when setting the MMU on.
742  */
743 .macro set_sctlr, sreg, reg
744 	msr	\sreg, \reg
745 	isb
746 	/*
747 	 * Invalidate the local I-cache so that any instructions fetched
748 	 * speculatively from the PoC are discarded, since they may have
749 	 * been dynamically patched at the PoU.
750 	 */
751 	ic	iallu
752 	dsb	nsh
753 	isb
754 .endm
755 
756 .macro set_sctlr_el1, reg
757 	set_sctlr sctlr_el1, \reg
758 .endm
759 
760 .macro set_sctlr_el2, reg
761 	set_sctlr sctlr_el2, \reg
762 .endm
763 
764 /*
765  * Branch Target Identifier (BTI)
766  */
767 	.macro  bti, targets
768 	.equ	.L__bti_targets_c, 34
769 	.equ	.L__bti_targets_j, 36
770 	.equ	.L__bti_targets_jc,38
771 	hint	#.L__bti_targets_\targets
772 	.endm
773 
774 /*
775  * This macro emits a program property note section identifying
776  * architecture features which require special handling, mainly for
777  * use in assembly files included in the VDSO.
778  */
779 
780 #define NT_GNU_PROPERTY_TYPE_0  5
781 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND      0xc0000000
782 
783 #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI      (1U << 0)
784 #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC      (1U << 1)
785 
786 #ifdef CONFIG_ARM64_BTI_KERNEL
787 #define GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT		\
788 		((GNU_PROPERTY_AARCH64_FEATURE_1_BTI |	\
789 		  GNU_PROPERTY_AARCH64_FEATURE_1_PAC))
790 #endif
791 
792 #ifdef GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
793 .macro emit_aarch64_feature_1_and, feat=GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT
794 	.pushsection .note.gnu.property, "a"
795 	.align  3
796 	.long   2f - 1f
797 	.long   6f - 3f
798 	.long   NT_GNU_PROPERTY_TYPE_0
799 1:      .string "GNU"
800 2:
801 	.align  3
802 3:      .long   GNU_PROPERTY_AARCH64_FEATURE_1_AND
803 	.long   5f - 4f
804 4:
805 	/*
806 	 * This is described with an array of char in the Linux API
807 	 * spec but the text and all other usage (including binutils,
808 	 * clang and GCC) treat this as a 32 bit value so no swizzling
809 	 * is required for big endian.
810 	 */
811 	.long   \feat
812 5:
813 	.align  3
814 6:
815 	.popsection
816 .endm
817 
818 #else
819 .macro emit_aarch64_feature_1_and, feat=0
820 .endm
821 
822 #endif /* GNU_PROPERTY_AARCH64_FEATURE_1_DEFAULT */
823 
824 	.macro __mitigate_spectre_bhb_loop      tmp
825 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
826 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_iter
827 	mov	\tmp, #32		// Patched to correct the immediate
828 alternative_cb_end
829 .Lspectre_bhb_loop\@:
830 	b	. + 4
831 	subs	\tmp, \tmp, #1
832 	b.ne	.Lspectre_bhb_loop\@
833 	sb
834 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
835 	.endm
836 
837 	.macro mitigate_spectre_bhb_loop	tmp
838 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
839 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_loop_mitigation_enable
840 	b	.L_spectre_bhb_loop_done\@	// Patched to NOP
841 alternative_cb_end
842 	__mitigate_spectre_bhb_loop	\tmp
843 .L_spectre_bhb_loop_done\@:
844 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
845 	.endm
846 
847 	/* Save/restores x0-x3 to the stack */
848 	.macro __mitigate_spectre_bhb_fw
849 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
850 	stp	x0, x1, [sp, #-16]!
851 	stp	x2, x3, [sp, #-16]!
852 	mov	w0, #ARM_SMCCC_ARCH_WORKAROUND_3
853 alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit
854 	nop					// Patched to SMC/HVC #0
855 alternative_cb_end
856 	ldp	x2, x3, [sp], #16
857 	ldp	x0, x1, [sp], #16
858 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
859 	.endm
860 
861 	.macro mitigate_spectre_bhb_clear_insn
862 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
863 alternative_cb ARM64_ALWAYS_SYSTEM, spectre_bhb_patch_clearbhb
864 	/* Patched to NOP when not supported */
865 	clearbhb
866 	isb
867 alternative_cb_end
868 #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */
869 	.endm
870 #endif	/* __ASM_ASSEMBLER_H */
871