xref: /linux/arch/sparc/kernel/helpers.S (revision a33f32244d8550da8b4a26e277ce07d5c6d158b5)
1	.align	32
2	.globl	__flushw_user
3	.type	__flushw_user,#function
4__flushw_user:
5	rdpr	%otherwin, %g1
6	brz,pn	%g1, 2f
7	 clr	%g2
81:	save	%sp, -128, %sp
9	rdpr	%otherwin, %g1
10	brnz,pt	%g1, 1b
11	 add	%g2, 1, %g2
121:	sub	%g2, 1, %g2
13	brnz,pt	%g2, 1b
14	 restore %g0, %g0, %g0
152:	retl
16	 nop
17	.size	__flushw_user,.-__flushw_user
18
19	/* Flush %fp and %i7 to the stack for all register
20	 * windows active inside of the cpu.  This allows
21	 * show_stack_trace() to avoid using an expensive
22	 * 'flushw'.
23	 */
24	.globl		stack_trace_flush
25	.type		stack_trace_flush,#function
26stack_trace_flush:
27	rdpr		%pstate, %o0
28	wrpr		%o0, PSTATE_IE, %pstate
29
30	rdpr		%cwp, %g1
31	rdpr		%canrestore, %g2
32	sub		%g1, 1, %g3
33
341:	brz,pn		%g2, 2f
35	 sub		%g2, 1, %g2
36	wrpr		%g3, %cwp
37	stx		%fp, [%sp + STACK_BIAS + RW_V9_I6]
38	stx		%i7, [%sp + STACK_BIAS + RW_V9_I7]
39	ba,pt		%xcc, 1b
40	 sub		%g3, 1, %g3
41
422:	wrpr		%g1, %cwp
43	wrpr		%o0, %pstate
44
45	retl
46	 nop
47	.size		stack_trace_flush,.-stack_trace_flush
48
49#ifdef CONFIG_PERF_EVENTS
50	.globl		perf_arch_fetch_caller_regs
51	.type		perf_arch_fetch_caller_regs,#function
52perf_arch_fetch_caller_regs:
53	/* We always read the %pstate into %o5 since we will use
54	 * that to construct a fake %tstate to store into the regs.
55	 */
56	rdpr		%pstate, %o5
57	brz,pn		%o2, 50f
58	 mov		%o2, %g7
59
60	/* Turn off interrupts while we walk around the register
61	 * window by hand.
62	 */
63	wrpr		%o5, PSTATE_IE, %pstate
64
65	/* The %canrestore tells us how many register windows are
66	 * still live in the chip above us, past that we have to
67	 * walk the frame as saved on the stack.   We stash away
68	 * the %cwp in %g1 so we can return back to the original
69	 * register window.
70	 */
71	rdpr		%cwp, %g1
72	rdpr		%canrestore, %g2
73	sub		%g1, 1, %g3
74
75	/* We have the skip count in %g7, if it hits zero then
76	 * %fp/%i7 are the registers we need.  Otherwise if our
77	 * %canrestore count maintained in %g2 hits zero we have
78	 * to start traversing the stack.
79	 */
8010:	brz,pn		%g2, 4f
81	 sub		%g2, 1, %g2
82	wrpr		%g3, %cwp
83	subcc		%g7, 1, %g7
84	bne,pt		%xcc, 10b
85	 sub		%g3, 1, %g3
86
87	/* We found the values we need in the cpu's register
88	 * windows.
89	 */
90	mov		%fp, %g3
91	ba,pt		%xcc, 3f
92	 mov		%i7, %g2
93
9450:	mov		%fp, %g3
95	ba,pt		%xcc, 2f
96	 mov		%i7, %g2
97
98	/* We hit the end of the valid register windows in the
99	 * cpu, start traversing the stack frame.
100	 */
1014:	mov		%fp, %g3
102
10320:	ldx		[%g3 + STACK_BIAS + RW_V9_I7], %g2
104	subcc		%g7, 1, %g7
105	bne,pn		%xcc, 20b
106	 ldx		[%g3 + STACK_BIAS + RW_V9_I6], %g3
107
108	/* Restore the current register window position and
109	 * re-enable interrupts.
110	 */
1113:	wrpr		%g1, %cwp
112	wrpr		%o5, %pstate
113
1142:	stx		%g3, [%o0 + PT_V9_FP]
115	sllx		%o5, 8, %o5
116	stx		%o5, [%o0 + PT_V9_TSTATE]
117	stx		%g2, [%o0 + PT_V9_TPC]
118	add		%g2, 4, %g2
119	retl
120	 stx		%g2, [%o0 + PT_V9_TNPC]
121	.size		perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs
122#endif /* CONFIG_PERF_EVENTS */
123
124#ifdef CONFIG_SMP
125	.globl		hard_smp_processor_id
126	.type		hard_smp_processor_id,#function
127hard_smp_processor_id:
128#endif
129	.globl		real_hard_smp_processor_id
130	.type		real_hard_smp_processor_id,#function
131real_hard_smp_processor_id:
132	__GET_CPUID(%o0)
133	retl
134	 nop
135#ifdef CONFIG_SMP
136	.size		hard_smp_processor_id,.-hard_smp_processor_id
137#endif
138	.size		real_hard_smp_processor_id,.-real_hard_smp_processor_id
139