xref: /titanic_41/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s (revision 989f28072d20c73ae0955d6a1e3e2fc74831cb39)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/seg_spt.h>
45#include <sys/machparam.h>
46#include <sys/privregs.h>
47#include <sys/scb.h>
48#include <sys/intreg.h>
49#include <sys/machthread.h>
50#include <sys/clock.h>
51#include <sys/trapstat.h>
52
53/*
54 * sfmmu related subroutines
55 */
56
57#if defined (lint)
58
59/* ARGSUSED */
60void
61sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
62{}
63
64int
65sfmmu_getctx_pri()
66{ return(0); }
67
68int
69sfmmu_getctx_sec()
70{ return(0); }
71
72/* ARGSUSED */
73void
74sfmmu_setctx_sec(uint_t ctx)
75{}
76
77/* ARGSUSED */
78void
79sfmmu_load_mmustate(sfmmu_t *sfmmup)
80{
81}
82
83#else	/* lint */
84
85/*
86 * Invalidate either the context of a specific victim or any process
87 * currently running on this CPU.
88 *
89 * %g1 = sfmmup whose ctx is being stolen (victim)
90 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
91 * Note %g1 is the only input argument used by this xcall handler.
92 */
93
94	ENTRY(sfmmu_raise_tsb_exception)
95	!
96	! if (victim == INVALID_CONTEXT) {
97	!	if (sec-ctx > INVALID_CONTEXT)
98	!		write INVALID_CONTEXT to sec-ctx
99	!	if (pri-ctx > INVALID_CONTEXT)
100	!		write INVALID_CONTEXT to pri-ctx
101	!
102	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
103	!	return
104	! } else {
105	!	if (sec-ctx > INVALID_CONTEXT)
106	!		write INVALID_CONTEXT to sec-ctx
107	!
108	!	if (pri-ctx > INVALID_CONTEXT)
109	!		write INVALID_CONTEXT to pri-ctx
110	! }
111	!
112
113	sethi   %hi(ksfmmup), %g3
114	ldx	[%g3 + %lo(ksfmmup)], %g3
115	cmp	%g1, %g3
116	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
117	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
118
119	set	INVALID_CONTEXT, %g2
120
121	cmp	%g1, INVALID_CONTEXT
122	bne,pt	%xcc, 1f			/* called from wrap_around? */
123	  mov	MMU_SCONTEXT, %g3
124
125	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = sec-ctx */
126	cmp	%g5, INVALID_CONTEXT		/* kernel  or invalid ctx ? */
127	ble,pn	%xcc, 0f			/* yes, no need to change */
128	  mov	MMU_PCONTEXT, %g7
129
130	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
131	membar	#Sync
132
1330:
134	ldxa	[%g7]ASI_MMU_CTX, %g5		/* %g5 = pri-ctx */
135	cmp	%g5, INVALID_CONTEXT		/* kernel or invalid ctx? */
136	ble,pn	%xcc, 6f			/* yes, no need to change */
137	  nop
138
139	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
140	membar	#Sync
141
1426:	/* flushall tlb */
143	mov	%o0, %g3
144	mov	%o1, %g4
145	mov	%o2, %g6
146	mov	%o5, %g7
147
148        mov     %g0, %o0        ! XXX no cpu list yet
149        mov     %g0, %o1        ! XXX no cpu list yet
150        mov     MAP_ITLB | MAP_DTLB, %o2
151        mov     MMU_DEMAP_ALL, %o5
152        ta      FAST_TRAP
153        brz,pt  %o0, 5f
154          nop
155     	ba ptl1_panic		/* bad HV call */
156	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
1575:
158	mov	%g3, %o0
159	mov	%g4, %o1
160	mov	%g6, %o2
161	mov	%g7, %o5
162
163	ba	3f
164	  nop
1651:
166	/*
167	 * %g1 = sfmmup
168	 * %g2 = INVALID_CONTEXT
169	 * %g3 = MMU_SCONTEXT
170	 */
171	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
172	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
173
174	cmp	%g5, %g1			/* is it the victim? */
175	bne,pt	%xcc, 2f			/* is our sec-ctx a victim? */
176	  nop
177
178	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = sec-ctx */
179	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
180	ble,pn  %xcc, 0f                        /* yes, no need to change */
181	  mov	MMU_PCONTEXT, %g7
182
183	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
184	membar	#Sync
185
1860:
187	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pri-ctx */
188	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
189	ble 	%icc, 3f			/* no need to change pri-ctx */
190	  nop
191	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
192	membar	#Sync
193
1943:
195	/* TSB program must be cleared - walkers do not check a context. */
196	mov	%o0, %g3
197	mov	%o1, %g4
198	mov	%o5, %g7
199	clr	%o0
200	clr	%o1
201	mov	MMU_TSB_CTXNON0, %o5
202	ta	FAST_TRAP
203	brnz,a,pn %o0, ptl1_panic
204	  mov	PTL1_BAD_HCALL, %g1
205	mov	%g3, %o0
206	mov	%g4, %o1
207	mov	%g7, %o5
2082:
209	retry
210	SET_SIZE(sfmmu_raise_tsb_exception)
211
212	ENTRY_NP(sfmmu_getctx_pri)
213	set	MMU_PCONTEXT, %o0
214	retl
215	ldxa	[%o0]ASI_MMU_CTX, %o0
216	SET_SIZE(sfmmu_getctx_pri)
217
218	ENTRY_NP(sfmmu_getctx_sec)
219	set	MMU_SCONTEXT, %o0
220	retl
221	ldxa	[%o0]ASI_MMU_CTX, %o0
222	SET_SIZE(sfmmu_getctx_sec)
223
224	/*
225	 * Set the secondary context register for this process.
226	 * %o0 = context number
227	 */
228	ENTRY_NP(sfmmu_setctx_sec)
229	/*
230	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
231	 * But we can also get called from C with interrupts enabled. So,
232	 * we need to check first.
233	 */
234
235	/* If interrupts are not disabled, then disable them */
236	rdpr	%pstate, %g1
237	btst	PSTATE_IE, %g1
238	bnz,a,pt %icc, 1f
239	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
2401:
241	mov	MMU_SCONTEXT, %o1
242	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
243	membar	#Sync
244        /*
245         * if the routine is entered with intr enabled, then enable intr now.
246         * otherwise, keep intr disabled, return without enabing intr.
247         * %g1 - old intr state
248         */
249        btst    PSTATE_IE, %g1
250        bnz,a,pt %icc, 2f
251        wrpr    %g0, %g1, %pstate               /* enable interrupts */
2522:      retl
253        nop
254        SET_SIZE(sfmmu_setctx_sec)
255
256	/*
257	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
258	 * returns the detection value in %o0.
259	 */
260	ENTRY_NP(sfmmu_setup_4lp)
261	set	ktsb_phys, %o2
262	mov	1, %o1
263	st	%o1, [%o2]
264	retl
265	mov	%o1, %o0
266	SET_SIZE(sfmmu_setup_4lp)
267
268	/*
269	 * Called to load MMU registers and tsbmiss area
270	 * for the active process.  This function should
271	 * only be called from TL=0.
272	 *
273	 * %o0 - hat pointer
274	 */
275	ENTRY_NP(sfmmu_load_mmustate)
276
277#ifdef DEBUG
278	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
279#endif /* DEBUG */
280
281	sethi	%hi(ksfmmup), %o3
282	ldx	[%o3 + %lo(ksfmmup)], %o3
283	cmp	%o3, %o0
284	be,pn	%xcc, 7f			! if kernel as, do nothing
285	  nop
286
287	set     MMU_SCONTEXT, %o3
288        ldxa    [%o3]ASI_MMU_CTX, %o5
289
290	cmp	%o5, INVALID_CONTEXT		! ctx is invalid?
291	bne,pt	%icc, 1f
292	  nop
293
294	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
295	stx	%o0, [%o2 + TSBMISS_UHATID]
296	stx	%g0, [%o2 +  TSBMISS_SHARED_UHATID]
297#ifdef DEBUG
298	/* check if hypervisor/hardware should handle user TSB */
299	sethi	%hi(hv_use_non0_tsb), %o2
300	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
301	brz,pn	%o2, 0f
302	  nop
303#endif /* DEBUG */
304	clr	%o0				! ntsb = 0 for invalid ctx
305	clr	%o1				! HV_TSB_INFO_PA = 0 if inv ctx
306	mov	MMU_TSB_CTXNON0, %o5
307	ta	FAST_TRAP			! set TSB info for user process
308	brnz,a,pn %o0, panic_bad_hcall
309	  mov	MMU_TSB_CTXNON0, %o1
3100:
311	retl
312	  nop
3131:
314	/*
315	 * We need to set up the TSB base register, tsbmiss
316	 * area, and pass the TSB information into the hypervisor
317	 */
318	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
319	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
320
321	/* create/set first UTSBREG */
322	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
323	SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
324
325	brz,pt	%g2, 2f
326	  mov	-1, %o2				! use -1 if no second TSB
327
328	/* make 2nd UTSBREG */
329	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
3302:
331	SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
332
333        /* make 3rd and 4th TSB */
334	CPU_TSBMISS_AREA(%o4, %o3)		! %o4 = tsbmiss area
335
336	ldx	[%o0 + SFMMU_SCDP], %g2		! %g2 = sfmmu_scd
337	brz,pt	%g2, 3f
338	  mov	-1, %o2				! use -1 if no third TSB
339
340	ldx	[%g2 + SCD_SFMMUP], %g3		! %g3 = scdp->scd_sfmmup
341	ldx	[%g3 + SFMMU_TSB], %o1		! %o1 = first scd tsbinfo
342	brz,pn %o1, 9f
343	  nop					! panic if no third TSB
344
345	/* make 3rd UTSBREG */
346	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
3473:
348	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2)
349
350	brz,pt	%g2, 4f
351	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
352
353	brz,pt	%o1, 4f
354	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
355	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second scd tsbinfo
356	brz,pt	%g2, 4f
357	  mov	-1, %o2				! use -1 if no 4th TSB
358
359	/* make 4th UTSBREG */
360	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
3614:
362	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2)
363
364#ifdef DEBUG
365	/* check if hypervisor/hardware should handle user TSB */
366	sethi	%hi(hv_use_non0_tsb), %o2
367	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
368	brz,pn	%o2, 6f
369	  nop
370#endif /* DEBUG */
371	CPU_ADDR(%o2, %o4)	! load CPU struct addr to %o2 using %o4
372	ldub    [%o2 + CPU_TSTAT_FLAGS], %o1	! load cpu_tstat_flag to %o1
373
374	mov	%o0, %o3			! preserve %o0
375	btst	TSTAT_TLB_STATS, %o1
376	bnz,a,pn %icc, 5f			! ntsb = 0 if TLB stats enabled
377	  clr	%o0
378
379	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
3805:
381	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
382	mov	MMU_TSB_CTXNON0, %o5
383	ta	FAST_TRAP			! set TSB info for user process
384	brnz,a,pn %o0, panic_bad_hcall
385	mov	MMU_TSB_CTXNON0, %o1
386	mov	%o3, %o0			! restore %o0
3876:
388	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
389	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
390	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
391	ldub	[%o0 + SFMMU_TTEFLAGS], %o3	! per-CPU tsbmiss area.
392	ldub	[%o0 + SFMMU_RTTEFLAGS], %o4
393	ldx	[%o0 + SFMMU_SRDP], %o1
394	stx	%o0, [%o2 + TSBMISS_UHATID]
395	stub	%o3, [%o2 + TSBMISS_UTTEFLAGS]
396	stub	%o4,  [%o2 + TSBMISS_URTTEFLAGS]
397	stx	%o1, [%o2 +  TSBMISS_SHARED_UHATID]
398	brz,pn	%o1, 7f				! check for sfmmu_srdp
399	  add	%o0, SFMMU_HMERMAP, %o1
400	add	%o2, TSBMISS_SHMERMAP, %o2
401	mov	SFMMU_HMERGNMAP_WORDS, %o3
402						! set tsbmiss shmermap
403	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
404
405	ldx	[%o0 + SFMMU_SCDP], %o4		! %o4 = sfmmu_scd
406	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
407	mov	SFMMU_HMERGNMAP_WORDS, %o3
408	brnz,pt	%o4, 8f				! check for sfmmu_scdp else
409	  add	%o2, TSBMISS_SCDSHMERMAP, %o2	! zero tsbmiss scd_shmermap
410	ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
4117:
412	retl
413	nop
4148:						! set tsbmiss scd_shmermap
415	add	%o4, SCD_HMERMAP, %o1
416	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
417	retl
418	  nop
4199:
420	sethi   %hi(panicstr), %g1		! panic if no 3rd TSB
421        ldx     [%g1 + %lo(panicstr)], %g1
422        tst     %g1
423
424        bnz,pn  %xcc, 7b
425          nop
426
427        sethi   %hi(sfmmu_panic10), %o0
428        call    panic
429          or      %o0, %lo(sfmmu_panic10), %o0
430
431	SET_SIZE(sfmmu_load_mmustate)
432
433#endif /* lint */
434
435#if defined(lint)
436
437/* Prefetch "struct tsbe" while walking TSBs */
438/*ARGSUSED*/
439void
440prefetch_tsbe_read(struct tsbe *tsbep)
441{}
442
443/* Prefetch the tsbe that we are about to write */
444/*ARGSUSED*/
445void
446prefetch_tsbe_write(struct tsbe *tsbep)
447{}
448
449#else /* lint */
450
451	ENTRY(prefetch_tsbe_read)
452	retl
453	nop
454	SET_SIZE(prefetch_tsbe_read)
455
456	ENTRY(prefetch_tsbe_write)
457	retl
458	nop
459	SET_SIZE(prefetch_tsbe_write)
460#endif /* lint */
461