xref: /titanic_52/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s (revision 03831d35f7499c87d51205817c93e9a8d42c4bae)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License").  You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22/*
23 * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24 * Use is subject to license terms.
25 */
26
27#pragma ident	"%Z%%M%	%I%	%E% SMI"
28
29/*
30 * SFMMU primitives.  These primitives should only be used by sfmmu
31 * routines.
32 */
33
34#if defined(lint)
35#include <sys/types.h>
36#else	/* lint */
37#include "assym.h"
38#endif	/* lint */
39
40#include <sys/asm_linkage.h>
41#include <sys/machtrap.h>
42#include <sys/machasi.h>
43#include <sys/sun4asi.h>
44#include <sys/pte.h>
45#include <sys/mmu.h>
46#include <vm/hat_sfmmu.h>
47#include <vm/seg_spt.h>
48#include <sys/machparam.h>
49#include <sys/privregs.h>
50#include <sys/scb.h>
51#include <sys/intreg.h>
52#include <sys/machthread.h>
53#include <sys/clock.h>
54#include <sys/trapstat.h>
55
56/*
57 * sfmmu related subroutines
58 */
59
60#if defined (lint)
61
62/* ARGSUSED */
63void
64sfmmu_ctx_steal_tl1(uint64_t sctx, uint64_t rctx)
65{}
66
67/* ARGSUSED */
68void
69sfmmu_raise_tsb_exception(uint64_t sctx, uint64_t rctx)
70{}
71
72int
73sfmmu_getctx_pri()
74{ return(0); }
75
76int
77sfmmu_getctx_sec()
78{ return(0); }
79
80/* ARGSUSED */
81void
82sfmmu_setctx_sec(int ctx)
83{}
84
85/* ARGSUSED */
86void
87sfmmu_load_mmustate(sfmmu_t *sfmmup)
88{
89}
90
91#else	/* lint */
92
93/*
94 * 1. If stealing ctx, flush all TLB entries whose ctx is ctx-being-stolen.
95 * 2. If processor is running in the ctx-being-stolen, set the
96 *    context to the resv context. That is
97 *    If processor in User-mode - pri/sec-ctx both set to ctx-being-stolen,
98 *		change both pri/sec-ctx registers to resv ctx.
99 *    If processor in Kernel-mode - pri-ctx is 0, sec-ctx is ctx-being-stolen,
100 *		just change sec-ctx register to resv ctx. When it returns to
101 *		kernel-mode, user_rtt will change pri-ctx.
102 *
103 * Note: For multiple page size TLB, no need to set page sizes for
104 *       DEMAP context.
105 *
106 * %g1 = ctx being stolen (victim)
107 * %g2 = invalid ctx to replace victim with
108 */
109	ENTRY(sfmmu_ctx_steal_tl1)
110	/*
111	 * Flush TLBs.
112	 */
113
114	/* flush context from the tlb via HV call */
115	mov	%o0, %g3
116	mov	%o1, %g4
117	mov	%o2, %g5
118	mov	%o3, %g6
119	mov	%o5, %g7
120
121	mov	%g1, %o2	! ctx#
122	mov	%g0, %o0	! Current CPU only (use NULL)
123	mov	%g0, %o1	! Current CPU only (use NULL)
124	mov	MAP_ITLB | MAP_DTLB, %o3
125	mov	MMU_DEMAP_CTX, %o5
126	ta	FAST_TRAP
127	brnz,a,pn %o0, ptl1_panic
128	  mov	PTL1_BAD_HCALL, %g1
129
130	mov	%g3, %o0
131	mov	%g4, %o1
132	mov	%g5, %o2
133	mov	%g6, %o3
134	mov	%g7, %o5
135
136	/* fall through to the code below */
137
138	/*
139	 * We enter here if we're just raising a TSB miss
140	 * exception, without switching MMU contexts.  In
141	 * this case, there is no need to flush the TLB.
142	 */
143	ALTENTRY(sfmmu_raise_tsb_exception)
144	!
145	! %g1 = ctx being stolen (victim)
146	! %g2 = invalid ctx to replace victim with
147	!
148	! if (sec-ctx != victim) {
149	!	return
150	! } else {
151	!	if (pri-ctx == victim) {
152	!		write INVALID_CONTEXT to sec-ctx
153	!		write INVALID_CONTEXT to pri-ctx
154	!	} else {
155	!		write INVALID_CONTEXT to sec-ctx
156	!	}
157	! }
158	!
159	cmp	%g1, NUM_LOCKED_CTXS
160	blt,a,pn %icc, ptl1_panic		/* can't steal locked ctx */
161	  mov	PTL1_BAD_CTX_STEAL, %g1
162	set	CTXREG_CTX_MASK, %g6
163	set	MMU_SCONTEXT, %g3
164	ldxa	[%g3]ASI_MMU_CTX, %g5		/* get sec-ctx */
165	and	%g5, %g6, %g5
166	cmp	%g5, %g1			/* is it the victim? */
167	bne,pn	%icc, 2f			/* was our sec-ctx a victim? */
168	  mov	MMU_PCONTEXT, %g7
169	ldxa	[%g7]ASI_MMU_CTX, %g4		/* get pri-ctx */
170	and	%g4, %g6, %g4
171	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid ctx */
172	membar	#Sync
173	cmp	%g1, %g4			/* is it the victim? */
174	bne 	%icc, 3f			/* nope, no need to change it */
175	  nop
176	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid ctx */
177	/* next instruction is retry so no membar sync */
1783:
179	membar	#Sync
180	/* TSB program must be cleared - walkers do not check a context. */
181	mov	%o0, %g3
182	mov	%o1, %g4
183	mov	%o5, %g7
184	clr	%o0
185	clr	%o1
186	mov	MMU_TSB_CTXNON0, %o5
187	ta	FAST_TRAP
188	brnz,a,pn %o0, ptl1_panic
189	  mov	PTL1_BAD_HCALL, %g1
190	mov	%g3, %o0
191	mov	%g4, %o1
192	mov	%g7, %o5
1932:
194	retry
195	SET_SIZE(sfmmu_ctx_steal_tl1)
196
197	ENTRY_NP(sfmmu_getctx_pri)
198	set	MMU_PCONTEXT, %o0
199	set	CTXREG_CTX_MASK, %o1
200	ldxa	[%o0]ASI_MMU_CTX, %o0
201	retl
202	and	%o0, %o1, %o0
203	SET_SIZE(sfmmu_getctx_pri)
204
205	ENTRY_NP(sfmmu_getctx_sec)
206	set	MMU_SCONTEXT, %o0
207	set	CTXREG_CTX_MASK, %o1
208	ldxa	[%o0]ASI_MMU_CTX, %o0
209	retl
210	and	%o0, %o1, %o0
211	SET_SIZE(sfmmu_getctx_sec)
212
213	/*
214	 * Set the secondary context register for this process.
215	 * %o0 = context number for this process.
216	 */
217	ENTRY_NP(sfmmu_setctx_sec)
218	/*
219	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
220	 * But we can also get called from C with interrupts enabled. So,
221	 * we need to check first. Also, resume saves state in %o3 and %o5
222	 * so we can't use those registers here.
223	 */
224
225	/* If interrupts are not disabled, then disable them */
226	rdpr	%pstate, %g1
227	btst	PSTATE_IE, %g1
228	bnz,a,pt %icc, 1f
229	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
2301:
231	mov	MMU_SCONTEXT, %o1
232	sethi	%hi(FLUSH_ADDR), %o4
233	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
234	flush	%o4
235
236	btst	PSTATE_IE, %g1
237	bnz,a,pt %icc, 1f
238	wrpr	%g0, %g1, %pstate		/* enable interrupts */
2391:	retl
240	nop
241	SET_SIZE(sfmmu_setctx_sec)
242
243	/*
244	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
245	 * returns the detection value in %o0.
246	 */
247	ENTRY_NP(sfmmu_setup_4lp)
248	set	ktsb_phys, %o2
249	mov	1, %o1
250	st	%o1, [%o2]
251	retl
252	mov	%o1, %o0
253	SET_SIZE(sfmmu_setup_4lp)
254
255	/*
256	 * Called to load MMU registers and tsbmiss area
257	 * for the active process.  This function should
258	 * only be called from TL=0.
259	 *
260	 * %o0 - hat pointer
261	 */
262	ENTRY_NP(sfmmu_load_mmustate)
263	/*
264	 * From resume we call sfmmu_load_mmustate with interrupts disabled.
265	 * But we can also get called from C with interrupts enabled. So,
266	 * we need to check first. Also, resume saves state in %o5 and we
267	 * can't use this register here.
268	 */
269
270	sethi	%hi(ksfmmup), %o3
271	ldx	[%o3 + %lo(ksfmmup)], %o3
272	cmp	%o3, %o0
273	be,pn	%xcc, 3f			! if kernel as, do nothing
274	  nop
275
276	/* If interrupts are not disabled, then disable them */
277	rdpr	%pstate, %g1
278	btst	PSTATE_IE, %g1
279	bnz,a,pt %icc, 1f
280	wrpr	%g1, PSTATE_IE, %pstate		! disable interrupts
2811:
282	/*
283	 * We need to set up the TSB base register, tsbmiss
284	 * area, and pass the TSB information into the hypervisor
285	 */
286	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
287	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
288
289	/* create/set first UTSBREG */
290	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
291	SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
292
293	brz,pt	%g2, 2f
294	  mov	-1, %o2				! use -1 if no second TSB
295
296	/* make 2nd UTSBREG */
297	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
2982:
299	SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
300
301#ifdef DEBUG
302	/* check if hypervisor/hardware should handle user TSB */
303	sethi	%hi(hv_use_non0_tsb), %o2
304	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
305	brz,pn	%o2, 5f
306	nop
307#endif /* DEBUG */
308	CPU_ADDR(%o2, %o4)	! load CPU struct addr to %o2 using %o4
309	ldub    [%o2 + CPU_TSTAT_FLAGS], %o1	! load cpu_tstat_flag to %o1
310	lduh	[%o0 + SFMMU_CNUM], %o2
311	mov	%o5, %o4			! preserve %o5 for resume
312	mov	%o0, %o3			! preserve %o0
313	btst	TSTAT_TLB_STATS, %o1
314	bnz,a,pn %icc, 4f			! ntsb = 0 if TLB stats enabled
315	  clr	%o0
316	cmp	%o2, INVALID_CONTEXT
317	be,a,pn	%icc, 4f
318	  clr	%o0				! ntsb = 0 for invalid ctx
319	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
3204:
321	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
322	mov	MMU_TSB_CTXNON0, %o5
323	ta	FAST_TRAP			! set TSB info for user process
324	brnz,a,pn %o0, panic_bad_hcall
325	mov	MMU_TSB_CTXNON0, %o1
326	mov	%o3, %o0			! restore %o0
327	mov	%o4, %o5			! restore %o5
3285:
329	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
330	CPU_TSBMISS_AREA(%o2, %o3)		! we need to access from
331	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
332	lduh	[%o0 + SFMMU_FLAGS], %o3	! per-CPU tsbmiss area.
333	stx	%o0, [%o2 + TSBMISS_UHATID]
334	stuh	%o3, [%o2 + TSBMISS_HATFLAGS]
335
336	btst	PSTATE_IE, %g1
337	bnz,a,pt %icc, 3f
338	wrpr	%g0, %g1, %pstate		! enable interrupts
3393:	retl
340	nop
341	SET_SIZE(sfmmu_load_mmustate)
342
343#endif /* lint */
344
345#if defined(lint)
346
347/* Prefetch "struct tsbe" while walking TSBs */
348/*ARGSUSED*/
349void
350prefetch_tsbe_read(struct tsbe *tsbep)
351{}
352
353/* Prefetch the tsbe that we are about to write */
354/*ARGSUSED*/
355void
356prefetch_tsbe_write(struct tsbe *tsbep)
357{}
358
359#else /* lint */
360
361	ENTRY(prefetch_tsbe_read)
362	retl
363	nop
364	SET_SIZE(prefetch_tsbe_read)
365
366	ENTRY(prefetch_tsbe_write)
367	retl
368	nop
369	SET_SIZE(prefetch_tsbe_write)
370#endif /* lint */
371