xref: /linux/arch/powerpc/include/asm/ppc_asm.h (revision dfc4ae3372182a168146745def03d877f31fcf2f)
1 /*
2  * Copyright (C) 1995-1999 Gary Thomas, Paul Mackerras, Cort Dougan.
3  */
4 #ifndef _ASM_POWERPC_PPC_ASM_H
5 #define _ASM_POWERPC_PPC_ASM_H
6 
7 #include <linux/stringify.h>
8 #include <asm/asm-compat.h>
9 #include <asm/processor.h>
10 #include <asm/ppc-opcode.h>
11 #include <asm/firmware.h>
12 #include <asm/feature-fixups.h>
13 
14 #ifdef __ASSEMBLY__
15 
16 #define SZL			(BITS_PER_LONG/8)
17 
18 /*
19  * Stuff for accurate CPU time accounting.
20  * These macros handle transitions between user and system state
21  * in exception entry and exit and accumulate time to the
22  * user_time and system_time fields in the paca.
23  */
24 
25 #ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
26 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)
27 #else
28 #define ACCOUNT_CPU_USER_EXIT(ptr, ra, rb)				\
29 	MFTB(ra);			/* get timebase */		\
30 	PPC_LL	rb, ACCOUNT_STARTTIME(ptr);				\
31 	PPC_STL	ra, ACCOUNT_STARTTIME_USER(ptr);			\
32 	subf	rb,rb,ra;		/* subtract start value */	\
33 	PPC_LL	ra, ACCOUNT_SYSTEM_TIME(ptr);				\
34 	add	ra,ra,rb;		/* add on to system time */	\
35 	PPC_STL	ra, ACCOUNT_SYSTEM_TIME(ptr)
36 #endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
37 
38 /*
39  * Macros for storing registers into and loading registers from
40  * exception frames.
41  */
42 #ifdef __powerpc64__
43 #define SAVE_GPR(n, base)	std	n,GPR0+8*(n)(base)
44 #define REST_GPR(n, base)	ld	n,GPR0+8*(n)(base)
45 #define SAVE_NVGPRS(base)	SAVE_8GPRS(14, base); SAVE_10GPRS(22, base)
46 #define REST_NVGPRS(base)	REST_8GPRS(14, base); REST_10GPRS(22, base)
47 #else
48 #define SAVE_GPR(n, base)	stw	n,GPR0+4*(n)(base)
49 #define REST_GPR(n, base)	lwz	n,GPR0+4*(n)(base)
50 #define SAVE_NVGPRS(base)	stmw	13, GPR0+4*13(base)
51 #define REST_NVGPRS(base)	lmw	13, GPR0+4*13(base)
52 #endif
53 
54 #define SAVE_2GPRS(n, base)	SAVE_GPR(n, base); SAVE_GPR(n+1, base)
55 #define SAVE_4GPRS(n, base)	SAVE_2GPRS(n, base); SAVE_2GPRS(n+2, base)
56 #define SAVE_8GPRS(n, base)	SAVE_4GPRS(n, base); SAVE_4GPRS(n+4, base)
57 #define SAVE_10GPRS(n, base)	SAVE_8GPRS(n, base); SAVE_2GPRS(n+8, base)
58 #define REST_2GPRS(n, base)	REST_GPR(n, base); REST_GPR(n+1, base)
59 #define REST_4GPRS(n, base)	REST_2GPRS(n, base); REST_2GPRS(n+2, base)
60 #define REST_8GPRS(n, base)	REST_4GPRS(n, base); REST_4GPRS(n+4, base)
61 #define REST_10GPRS(n, base)	REST_8GPRS(n, base); REST_2GPRS(n+8, base)
62 
63 #define SAVE_FPR(n, base)	stfd	n,8*TS_FPRWIDTH*(n)(base)
64 #define SAVE_2FPRS(n, base)	SAVE_FPR(n, base); SAVE_FPR(n+1, base)
65 #define SAVE_4FPRS(n, base)	SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base)
66 #define SAVE_8FPRS(n, base)	SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base)
67 #define SAVE_16FPRS(n, base)	SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base)
68 #define SAVE_32FPRS(n, base)	SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base)
69 #define REST_FPR(n, base)	lfd	n,8*TS_FPRWIDTH*(n)(base)
70 #define REST_2FPRS(n, base)	REST_FPR(n, base); REST_FPR(n+1, base)
71 #define REST_4FPRS(n, base)	REST_2FPRS(n, base); REST_2FPRS(n+2, base)
72 #define REST_8FPRS(n, base)	REST_4FPRS(n, base); REST_4FPRS(n+4, base)
73 #define REST_16FPRS(n, base)	REST_8FPRS(n, base); REST_8FPRS(n+8, base)
74 #define REST_32FPRS(n, base)	REST_16FPRS(n, base); REST_16FPRS(n+16, base)
75 
76 #define SAVE_VR(n,b,base)	li b,16*(n);  stvx n,base,b
77 #define SAVE_2VRS(n,b,base)	SAVE_VR(n,b,base); SAVE_VR(n+1,b,base)
78 #define SAVE_4VRS(n,b,base)	SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base)
79 #define SAVE_8VRS(n,b,base)	SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base)
80 #define SAVE_16VRS(n,b,base)	SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base)
81 #define SAVE_32VRS(n,b,base)	SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base)
82 #define REST_VR(n,b,base)	li b,16*(n); lvx n,base,b
83 #define REST_2VRS(n,b,base)	REST_VR(n,b,base); REST_VR(n+1,b,base)
84 #define REST_4VRS(n,b,base)	REST_2VRS(n,b,base); REST_2VRS(n+2,b,base)
85 #define REST_8VRS(n,b,base)	REST_4VRS(n,b,base); REST_4VRS(n+4,b,base)
86 #define REST_16VRS(n,b,base)	REST_8VRS(n,b,base); REST_8VRS(n+8,b,base)
87 #define REST_32VRS(n,b,base)	REST_16VRS(n,b,base); REST_16VRS(n+16,b,base)
88 
89 #ifdef __BIG_ENDIAN__
90 #define STXVD2X_ROT(n,b,base)		STXVD2X(n,b,base)
91 #define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base)
92 #else
93 #define STXVD2X_ROT(n,b,base)		XXSWAPD(n,n);		\
94 					STXVD2X(n,b,base);	\
95 					XXSWAPD(n,n)
96 
97 #define LXVD2X_ROT(n,b,base)		LXVD2X(n,b,base);	\
98 					XXSWAPD(n,n)
99 #endif
100 /* Save the lower 32 VSRs in the thread VSR region */
101 #define SAVE_VSR(n,b,base)	li b,16*(n);  STXVD2X_ROT(n,R##base,R##b)
102 #define SAVE_2VSRS(n,b,base)	SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base)
103 #define SAVE_4VSRS(n,b,base)	SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base)
104 #define SAVE_8VSRS(n,b,base)	SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base)
105 #define SAVE_16VSRS(n,b,base)	SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base)
106 #define SAVE_32VSRS(n,b,base)	SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base)
107 #define REST_VSR(n,b,base)	li b,16*(n); LXVD2X_ROT(n,R##base,R##b)
108 #define REST_2VSRS(n,b,base)	REST_VSR(n,b,base); REST_VSR(n+1,b,base)
109 #define REST_4VSRS(n,b,base)	REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base)
110 #define REST_8VSRS(n,b,base)	REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base)
111 #define REST_16VSRS(n,b,base)	REST_8VSRS(n,b,base); REST_8VSRS(n+8,b,base)
112 #define REST_32VSRS(n,b,base)	REST_16VSRS(n,b,base); REST_16VSRS(n+16,b,base)
113 
114 /*
115  * b = base register for addressing, o = base offset from register of 1st EVR
116  * n = first EVR, s = scratch
117  */
118 #define SAVE_EVR(n,s,b,o)	evmergehi s,s,n; stw s,o+4*(n)(b)
119 #define SAVE_2EVRS(n,s,b,o)	SAVE_EVR(n,s,b,o); SAVE_EVR(n+1,s,b,o)
120 #define SAVE_4EVRS(n,s,b,o)	SAVE_2EVRS(n,s,b,o); SAVE_2EVRS(n+2,s,b,o)
121 #define SAVE_8EVRS(n,s,b,o)	SAVE_4EVRS(n,s,b,o); SAVE_4EVRS(n+4,s,b,o)
122 #define SAVE_16EVRS(n,s,b,o)	SAVE_8EVRS(n,s,b,o); SAVE_8EVRS(n+8,s,b,o)
123 #define SAVE_32EVRS(n,s,b,o)	SAVE_16EVRS(n,s,b,o); SAVE_16EVRS(n+16,s,b,o)
124 #define REST_EVR(n,s,b,o)	lwz s,o+4*(n)(b); evmergelo n,s,n
125 #define REST_2EVRS(n,s,b,o)	REST_EVR(n,s,b,o); REST_EVR(n+1,s,b,o)
126 #define REST_4EVRS(n,s,b,o)	REST_2EVRS(n,s,b,o); REST_2EVRS(n+2,s,b,o)
127 #define REST_8EVRS(n,s,b,o)	REST_4EVRS(n,s,b,o); REST_4EVRS(n+4,s,b,o)
128 #define REST_16EVRS(n,s,b,o)	REST_8EVRS(n,s,b,o); REST_8EVRS(n+8,s,b,o)
129 #define REST_32EVRS(n,s,b,o)	REST_16EVRS(n,s,b,o); REST_16EVRS(n+16,s,b,o)
130 
131 /* Macros to adjust thread priority for hardware multithreading */
132 #define HMT_VERY_LOW	or	31,31,31	# very low priority
133 #define HMT_LOW		or	1,1,1
134 #define HMT_MEDIUM_LOW  or	6,6,6		# medium low priority
135 #define HMT_MEDIUM	or	2,2,2
136 #define HMT_MEDIUM_HIGH or	5,5,5		# medium high priority
137 #define HMT_HIGH	or	3,3,3
138 #define HMT_EXTRA_HIGH	or	7,7,7		# power7 only
139 
140 #ifdef CONFIG_PPC64
141 #define ULONG_SIZE 	8
142 #else
143 #define ULONG_SIZE	4
144 #endif
145 #define __VCPU_GPR(n)	(VCPU_GPRS + (n * ULONG_SIZE))
146 #define VCPU_GPR(n)	__VCPU_GPR(__REG_##n)
147 
148 #ifdef __KERNEL__
149 
150 /*
151  * We use __powerpc64__ here because we want the compat VDSO to use the 32-bit
152  * version below in the else case of the ifdef.
153  */
154 #ifdef __powerpc64__
155 
156 #define STACKFRAMESIZE 256
157 #define __STK_REG(i)   (112 + ((i)-14)*8)
158 #define STK_REG(i)     __STK_REG(__REG_##i)
159 
160 #ifdef PPC64_ELF_ABI_v2
161 #define STK_GOT		24
162 #define __STK_PARAM(i)	(32 + ((i)-3)*8)
163 #else
164 #define STK_GOT		40
165 #define __STK_PARAM(i)	(48 + ((i)-3)*8)
166 #endif
167 #define STK_PARAM(i)	__STK_PARAM(__REG_##i)
168 
169 #ifdef PPC64_ELF_ABI_v2
170 
171 #define _GLOBAL(name) \
172 	.align 2 ; \
173 	.type name,@function; \
174 	.globl name; \
175 name:
176 
177 #define _GLOBAL_TOC(name) \
178 	.align 2 ; \
179 	.type name,@function; \
180 	.globl name; \
181 name: \
182 0:	addis r2,r12,(.TOC.-0b)@ha; \
183 	addi r2,r2,(.TOC.-0b)@l; \
184 	.localentry name,.-name
185 
186 #define DOTSYM(a)	a
187 
188 #else
189 
190 #define XGLUE(a,b) a##b
191 #define GLUE(a,b) XGLUE(a,b)
192 
193 #define _GLOBAL(name) \
194 	.align 2 ; \
195 	.globl name; \
196 	.globl GLUE(.,name); \
197 	.pushsection ".opd","aw"; \
198 name: \
199 	.quad GLUE(.,name); \
200 	.quad .TOC.@tocbase; \
201 	.quad 0; \
202 	.popsection; \
203 	.type GLUE(.,name),@function; \
204 GLUE(.,name):
205 
206 #define _GLOBAL_TOC(name) _GLOBAL(name)
207 
208 #define DOTSYM(a)	GLUE(.,a)
209 
210 #endif
211 
212 #else /* 32-bit */
213 
214 #define _ENTRY(n)	\
215 	.globl n;	\
216 n:
217 
218 #define _GLOBAL(n)	\
219 	.stabs __stringify(n:F-1),N_FUN,0,0,n;\
220 	.globl n;	\
221 n:
222 
223 #define _GLOBAL_TOC(name) _GLOBAL(name)
224 
225 #define DOTSYM(a)	a
226 
227 #endif
228 
229 /*
230  * __kprobes (the C annotation) puts the symbol into the .kprobes.text
231  * section, which gets emitted at the end of regular text.
232  *
233  * _ASM_NOKPROBE_SYMBOL and NOKPROBE_SYMBOL just adds the symbol to
234  * a blacklist. The former is for core kprobe functions/data, the
235  * latter is for those that incdentially must be excluded from probing
236  * and allows them to be linked at more optimal location within text.
237  */
238 #ifdef CONFIG_KPROBES
239 #define _ASM_NOKPROBE_SYMBOL(entry)			\
240 	.pushsection "_kprobe_blacklist","aw";		\
241 	PPC_LONG (entry) ;				\
242 	.popsection
243 #else
244 #define _ASM_NOKPROBE_SYMBOL(entry)
245 #endif
246 
247 #define FUNC_START(name)	_GLOBAL(name)
248 #define FUNC_END(name)
249 
250 /*
251  * LOAD_REG_IMMEDIATE(rn, expr)
252  *   Loads the value of the constant expression 'expr' into register 'rn'
253  *   using immediate instructions only.  Use this when it's important not
254  *   to reference other data (i.e. on ppc64 when the TOC pointer is not
255  *   valid) and when 'expr' is a constant or absolute address.
256  *
257  * LOAD_REG_ADDR(rn, name)
258  *   Loads the address of label 'name' into register 'rn'.  Use this when
259  *   you don't particularly need immediate instructions only, but you need
260  *   the whole address in one register (e.g. it's a structure address and
261  *   you want to access various offsets within it).  On ppc32 this is
262  *   identical to LOAD_REG_IMMEDIATE.
263  *
264  * LOAD_REG_ADDR_PIC(rn, name)
265  *   Loads the address of label 'name' into register 'run'. Use this when
266  *   the kernel doesn't run at the linked or relocated address. Please
267  *   note that this macro will clobber the lr register.
268  *
269  * LOAD_REG_ADDRBASE(rn, name)
270  * ADDROFF(name)
271  *   LOAD_REG_ADDRBASE loads part of the address of label 'name' into
272  *   register 'rn'.  ADDROFF(name) returns the remainder of the address as
273  *   a constant expression.  ADDROFF(name) is a signed expression < 16 bits
274  *   in size, so is suitable for use directly as an offset in load and store
275  *   instructions.  Use this when loading/storing a single word or less as:
276  *      LOAD_REG_ADDRBASE(rX, name)
277  *      ld	rY,ADDROFF(name)(rX)
278  */
279 
280 /* Be careful, this will clobber the lr register. */
281 #define LOAD_REG_ADDR_PIC(reg, name)		\
282 	bl	0f;				\
283 0:	mflr	reg;				\
284 	addis	reg,reg,(name - 0b)@ha;		\
285 	addi	reg,reg,(name - 0b)@l;
286 
287 #if defined(__powerpc64__) && defined(HAVE_AS_ATHIGH)
288 #define __AS_ATHIGH high
289 #else
290 #define __AS_ATHIGH h
291 #endif
292 
293 .macro __LOAD_REG_IMMEDIATE_32 r, x
294 	.if (\x) >= 0x8000 || (\x) < -0x8000
295 		lis \r, (\x)@__AS_ATHIGH
296 		.if (\x) & 0xffff != 0
297 			ori \r, \r, (\x)@l
298 		.endif
299 	.else
300 		li \r, (\x)@l
301 	.endif
302 .endm
303 
304 .macro __LOAD_REG_IMMEDIATE r, x
305 	.if (\x) >= 0x80000000 || (\x) < -0x80000000
306 		__LOAD_REG_IMMEDIATE_32 \r, (\x) >> 32
307 		sldi	\r, \r, 32
308 		.if (\x) & 0xffff0000 != 0
309 			oris \r, \r, (\x)@__AS_ATHIGH
310 		.endif
311 		.if (\x) & 0xffff != 0
312 			ori \r, \r, (\x)@l
313 		.endif
314 	.else
315 		__LOAD_REG_IMMEDIATE_32 \r, \x
316 	.endif
317 .endm
318 
319 #ifdef __powerpc64__
320 
321 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE reg, expr
322 
323 #define LOAD_REG_IMMEDIATE_SYM(reg, tmp, expr)	\
324 	lis	tmp, (expr)@highest;		\
325 	lis	reg, (expr)@__AS_ATHIGH;	\
326 	ori	tmp, tmp, (expr)@higher;	\
327 	ori	reg, reg, (expr)@l;		\
328 	rldimi	reg, tmp, 32, 0
329 
330 #define LOAD_REG_ADDR(reg,name)			\
331 	ld	reg,name@got(r2)
332 
333 #define LOAD_REG_ADDRBASE(reg,name)	LOAD_REG_ADDR(reg,name)
334 #define ADDROFF(name)			0
335 
336 /* offsets for stack frame layout */
337 #define LRSAVE	16
338 
339 #else /* 32-bit */
340 
341 #define LOAD_REG_IMMEDIATE(reg, expr) __LOAD_REG_IMMEDIATE_32 reg, expr
342 
343 #define LOAD_REG_IMMEDIATE_SYM(reg,expr)		\
344 	lis	reg,(expr)@ha;		\
345 	addi	reg,reg,(expr)@l;
346 
347 #define LOAD_REG_ADDR(reg,name)		LOAD_REG_IMMEDIATE_SYM(reg, name)
348 
349 #define LOAD_REG_ADDRBASE(reg, name)	lis	reg,name@ha
350 #define ADDROFF(name)			name@l
351 
352 /* offsets for stack frame layout */
353 #define LRSAVE	4
354 
355 #endif
356 
357 /* various errata or part fixups */
358 #if defined(CONFIG_PPC_CELL) || defined(CONFIG_PPC_FSL_BOOK3E)
359 #define MFTB(dest)			\
360 90:	mfspr dest, SPRN_TBRL;		\
361 BEGIN_FTR_SECTION_NESTED(96);		\
362 	cmpwi dest,0;			\
363 	beq-  90b;			\
364 END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96)
365 #else
366 #define MFTB(dest)			MFTBL(dest)
367 #endif
368 
369 #ifdef CONFIG_PPC_8xx
370 #define MFTBL(dest)			mftb dest
371 #define MFTBU(dest)			mftbu dest
372 #else
373 #define MFTBL(dest)			mfspr dest, SPRN_TBRL
374 #define MFTBU(dest)			mfspr dest, SPRN_TBRU
375 #endif
376 
377 #ifndef CONFIG_SMP
378 #define TLBSYNC
379 #else
380 #define TLBSYNC		tlbsync; sync
381 #endif
382 
383 #ifdef CONFIG_PPC64
384 #define MTOCRF(FXM, RS)			\
385 	BEGIN_FTR_SECTION_NESTED(848);	\
386 	mtcrf	(FXM), RS;		\
387 	FTR_SECTION_ELSE_NESTED(848);	\
388 	mtocrf (FXM), RS;		\
389 	ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_NOEXECUTE, 848)
390 #endif
391 
392 /*
393  * This instruction is not implemented on the PPC 603 or 601; however, on
394  * the 403GCX and 405GP tlbia IS defined and tlbie is not.
395  * All of these instructions exist in the 8xx, they have magical powers,
396  * and they must be used.
397  */
398 
399 #if !defined(CONFIG_4xx) && !defined(CONFIG_PPC_8xx)
400 #define tlbia					\
401 	li	r4,1024;			\
402 	mtctr	r4;				\
403 	lis	r4,KERNELBASE@h;		\
404 	.machine push;				\
405 	.machine "power4";			\
406 0:	tlbie	r4;				\
407 	.machine pop;				\
408 	addi	r4,r4,0x1000;			\
409 	bdnz	0b
410 #endif
411 
412 
413 #ifdef CONFIG_IBM440EP_ERR42
414 #define PPC440EP_ERR42 isync
415 #else
416 #define PPC440EP_ERR42
417 #endif
418 
419 /* The following stops all load and store data streams associated with stream
420  * ID (ie. streams created explicitly).  The embedded and server mnemonics for
421  * dcbt are different so this must only be used for server.
422  */
423 #define DCBT_BOOK3S_STOP_ALL_STREAM_IDS(scratch)	\
424        lis     scratch,0x60000000@h;			\
425        dcbt    0,scratch,0b01010
426 
427 /*
428  * toreal/fromreal/tophys/tovirt macros. 32-bit BookE makes them
429  * keep the address intact to be compatible with code shared with
430  * 32-bit classic.
431  *
432  * On the other hand, I find it useful to have them behave as expected
433  * by their name (ie always do the addition) on 64-bit BookE
434  */
435 #if defined(CONFIG_BOOKE) && !defined(CONFIG_PPC64)
436 #define toreal(rd)
437 #define fromreal(rd)
438 
439 /*
440  * We use addis to ensure compatibility with the "classic" ppc versions of
441  * these macros, which use rs = 0 to get the tophys offset in rd, rather than
442  * converting the address in r0, and so this version has to do that too
443  * (i.e. set register rd to 0 when rs == 0).
444  */
445 #define tophys(rd,rs)				\
446 	addis	rd,rs,0
447 
448 #define tovirt(rd,rs)				\
449 	addis	rd,rs,0
450 
451 #elif defined(CONFIG_PPC64)
452 #define toreal(rd)		/* we can access c000... in real mode */
453 #define fromreal(rd)
454 
455 #define tophys(rd,rs)                           \
456 	clrldi	rd,rs,2
457 
458 #define tovirt(rd,rs)                           \
459 	rotldi	rd,rs,16;			\
460 	ori	rd,rd,((KERNELBASE>>48)&0xFFFF);\
461 	rotldi	rd,rd,48
462 #else
463 #define toreal(rd)	tophys(rd,rd)
464 #define fromreal(rd)	tovirt(rd,rd)
465 
466 #define tophys(rd, rs)	addis	rd, rs, -PAGE_OFFSET@h
467 #define tovirt(rd, rs)	addis	rd, rs, PAGE_OFFSET@h
468 #endif
469 
470 #ifdef CONFIG_PPC_BOOK3S_64
471 #define MTMSRD(r)	mtmsrd	r
472 #define MTMSR_EERI(reg)	mtmsrd	reg,1
473 #else
474 #define MTMSRD(r)	mtmsr	r
475 #define MTMSR_EERI(reg)	mtmsr	reg
476 #endif
477 
478 #endif /* __KERNEL__ */
479 
480 /* The boring bits... */
481 
482 /* Condition Register Bit Fields */
483 
484 #define	cr0	0
485 #define	cr1	1
486 #define	cr2	2
487 #define	cr3	3
488 #define	cr4	4
489 #define	cr5	5
490 #define	cr6	6
491 #define	cr7	7
492 
493 
494 /*
495  * General Purpose Registers (GPRs)
496  *
497  * The lower case r0-r31 should be used in preference to the upper
498  * case R0-R31 as they provide more error checking in the assembler.
499  * Use R0-31 only when really nessesary.
500  */
501 
502 #define	r0	%r0
503 #define	r1	%r1
504 #define	r2	%r2
505 #define	r3	%r3
506 #define	r4	%r4
507 #define	r5	%r5
508 #define	r6	%r6
509 #define	r7	%r7
510 #define	r8	%r8
511 #define	r9	%r9
512 #define	r10	%r10
513 #define	r11	%r11
514 #define	r12	%r12
515 #define	r13	%r13
516 #define	r14	%r14
517 #define	r15	%r15
518 #define	r16	%r16
519 #define	r17	%r17
520 #define	r18	%r18
521 #define	r19	%r19
522 #define	r20	%r20
523 #define	r21	%r21
524 #define	r22	%r22
525 #define	r23	%r23
526 #define	r24	%r24
527 #define	r25	%r25
528 #define	r26	%r26
529 #define	r27	%r27
530 #define	r28	%r28
531 #define	r29	%r29
532 #define	r30	%r30
533 #define	r31	%r31
534 
535 
536 /* Floating Point Registers (FPRs) */
537 
538 #define	fr0	0
539 #define	fr1	1
540 #define	fr2	2
541 #define	fr3	3
542 #define	fr4	4
543 #define	fr5	5
544 #define	fr6	6
545 #define	fr7	7
546 #define	fr8	8
547 #define	fr9	9
548 #define	fr10	10
549 #define	fr11	11
550 #define	fr12	12
551 #define	fr13	13
552 #define	fr14	14
553 #define	fr15	15
554 #define	fr16	16
555 #define	fr17	17
556 #define	fr18	18
557 #define	fr19	19
558 #define	fr20	20
559 #define	fr21	21
560 #define	fr22	22
561 #define	fr23	23
562 #define	fr24	24
563 #define	fr25	25
564 #define	fr26	26
565 #define	fr27	27
566 #define	fr28	28
567 #define	fr29	29
568 #define	fr30	30
569 #define	fr31	31
570 
571 /* AltiVec Registers (VPRs) */
572 
573 #define	v0	0
574 #define	v1	1
575 #define	v2	2
576 #define	v3	3
577 #define	v4	4
578 #define	v5	5
579 #define	v6	6
580 #define	v7	7
581 #define	v8	8
582 #define	v9	9
583 #define	v10	10
584 #define	v11	11
585 #define	v12	12
586 #define	v13	13
587 #define	v14	14
588 #define	v15	15
589 #define	v16	16
590 #define	v17	17
591 #define	v18	18
592 #define	v19	19
593 #define	v20	20
594 #define	v21	21
595 #define	v22	22
596 #define	v23	23
597 #define	v24	24
598 #define	v25	25
599 #define	v26	26
600 #define	v27	27
601 #define	v28	28
602 #define	v29	29
603 #define	v30	30
604 #define	v31	31
605 
606 /* VSX Registers (VSRs) */
607 
608 #define	vs0	0
609 #define	vs1	1
610 #define	vs2	2
611 #define	vs3	3
612 #define	vs4	4
613 #define	vs5	5
614 #define	vs6	6
615 #define	vs7	7
616 #define	vs8	8
617 #define	vs9	9
618 #define	vs10	10
619 #define	vs11	11
620 #define	vs12	12
621 #define	vs13	13
622 #define	vs14	14
623 #define	vs15	15
624 #define	vs16	16
625 #define	vs17	17
626 #define	vs18	18
627 #define	vs19	19
628 #define	vs20	20
629 #define	vs21	21
630 #define	vs22	22
631 #define	vs23	23
632 #define	vs24	24
633 #define	vs25	25
634 #define	vs26	26
635 #define	vs27	27
636 #define	vs28	28
637 #define	vs29	29
638 #define	vs30	30
639 #define	vs31	31
640 #define	vs32	32
641 #define	vs33	33
642 #define	vs34	34
643 #define	vs35	35
644 #define	vs36	36
645 #define	vs37	37
646 #define	vs38	38
647 #define	vs39	39
648 #define	vs40	40
649 #define	vs41	41
650 #define	vs42	42
651 #define	vs43	43
652 #define	vs44	44
653 #define	vs45	45
654 #define	vs46	46
655 #define	vs47	47
656 #define	vs48	48
657 #define	vs49	49
658 #define	vs50	50
659 #define	vs51	51
660 #define	vs52	52
661 #define	vs53	53
662 #define	vs54	54
663 #define	vs55	55
664 #define	vs56	56
665 #define	vs57	57
666 #define	vs58	58
667 #define	vs59	59
668 #define	vs60	60
669 #define	vs61	61
670 #define	vs62	62
671 #define	vs63	63
672 
673 /* SPE Registers (EVPRs) */
674 
675 #define	evr0	0
676 #define	evr1	1
677 #define	evr2	2
678 #define	evr3	3
679 #define	evr4	4
680 #define	evr5	5
681 #define	evr6	6
682 #define	evr7	7
683 #define	evr8	8
684 #define	evr9	9
685 #define	evr10	10
686 #define	evr11	11
687 #define	evr12	12
688 #define	evr13	13
689 #define	evr14	14
690 #define	evr15	15
691 #define	evr16	16
692 #define	evr17	17
693 #define	evr18	18
694 #define	evr19	19
695 #define	evr20	20
696 #define	evr21	21
697 #define	evr22	22
698 #define	evr23	23
699 #define	evr24	24
700 #define	evr25	25
701 #define	evr26	26
702 #define	evr27	27
703 #define	evr28	28
704 #define	evr29	29
705 #define	evr30	30
706 #define	evr31	31
707 
708 /* some stab codes */
709 #define N_FUN	36
710 #define N_RSYM	64
711 #define N_SLINE	68
712 #define N_SO	100
713 
714 #define RFSCV	.long 0x4c0000a4
715 
716 /*
717  * Create an endian fixup trampoline
718  *
719  * This starts with a "tdi 0,0,0x48" instruction which is
720  * essentially a "trap never", and thus akin to a nop.
721  *
722  * The opcode for this instruction read with the wrong endian
723  * however results in a b . + 8
724  *
725  * So essentially we use that trick to execute the following
726  * trampoline in "reverse endian" if we are running with the
727  * MSR_LE bit set the "wrong" way for whatever endianness the
728  * kernel is built for.
729  */
730 
731 #ifdef CONFIG_PPC_BOOK3E
732 #define FIXUP_ENDIAN
733 #else
734 /*
735  * This version may be used in HV or non-HV context.
736  * MSR[EE] must be disabled.
737  */
738 #define FIXUP_ENDIAN						   \
739 	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
740 	b     191f;	  /* Skip trampoline if endian is good	*/ \
741 	.long 0xa600607d; /* mfmsr r11				*/ \
742 	.long 0x01006b69; /* xori r11,r11,1			*/ \
743 	.long 0x00004039; /* li r10,0				*/ \
744 	.long 0x6401417d; /* mtmsrd r10,1			*/ \
745 	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
746 	.long 0xa602487d; /* mflr r10				*/ \
747 	.long 0x14004a39; /* addi r10,r10,20			*/ \
748 	.long 0xa6035a7d; /* mtsrr0 r10				*/ \
749 	.long 0xa6037b7d; /* mtsrr1 r11				*/ \
750 	.long 0x2400004c; /* rfid				*/ \
751 191:
752 
753 /*
754  * This version that may only be used with MSR[HV]=1
755  * - Does not clear MSR[RI], so more robust.
756  * - Slightly smaller and faster.
757  */
758 #define FIXUP_ENDIAN_HV						   \
759 	tdi   0,0,0x48;	  /* Reverse endian of b . + 8		*/ \
760 	b     191f;	  /* Skip trampoline if endian is good	*/ \
761 	.long 0xa600607d; /* mfmsr r11				*/ \
762 	.long 0x01006b69; /* xori r11,r11,1			*/ \
763 	.long 0x05009f42; /* bcl 20,31,$+4			*/ \
764 	.long 0xa602487d; /* mflr r10				*/ \
765 	.long 0x14004a39; /* addi r10,r10,20			*/ \
766 	.long 0xa64b5a7d; /* mthsrr0 r10			*/ \
767 	.long 0xa64b7b7d; /* mthsrr1 r11			*/ \
768 	.long 0x2402004c; /* hrfid				*/ \
769 191:
770 
771 #endif /* !CONFIG_PPC_BOOK3E */
772 
773 #endif /*  __ASSEMBLY__ */
774 
775 /*
776  * Helper macro for exception table entries
777  */
778 #define EX_TABLE(_fault, _target)		\
779 	stringify_in_c(.section __ex_table,"a";)\
780 	stringify_in_c(.balign 4;)		\
781 	stringify_in_c(.long (_fault) - . ;)	\
782 	stringify_in_c(.long (_target) - . ;)	\
783 	stringify_in_c(.previous)
784 
785 #ifdef CONFIG_PPC_FSL_BOOK3E
786 #define BTB_FLUSH(reg)			\
787 	lis reg,BUCSR_INIT@h;		\
788 	ori reg,reg,BUCSR_INIT@l;	\
789 	mtspr SPRN_BUCSR,reg;		\
790 	isync;
791 #else
792 #define BTB_FLUSH(reg)
793 #endif /* CONFIG_PPC_FSL_BOOK3E */
794 
795 #endif /* _ASM_POWERPC_PPC_ASM_H */
796