xref: /titanic_41/usr/src/uts/sun4v/vm/mach_sfmmu_asm.s (revision aa2aa9a662539940ddbc8610da5a3a874ebd7503)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23 * Use is subject to license terms.
24 */
25
26/*
27 * SFMMU primitives.  These primitives should only be used by sfmmu
28 * routines.
29 */
30
31#if defined(lint)
32#include <sys/types.h>
33#else	/* lint */
34#include "assym.h"
35#endif	/* lint */
36
37#include <sys/asm_linkage.h>
38#include <sys/machtrap.h>
39#include <sys/machasi.h>
40#include <sys/sun4asi.h>
41#include <sys/pte.h>
42#include <sys/mmu.h>
43#include <vm/hat_sfmmu.h>
44#include <vm/mach_sfmmu.h>
45#include <vm/seg_spt.h>
46#include <sys/machparam.h>
47#include <sys/privregs.h>
48#include <sys/scb.h>
49#include <sys/intreg.h>
50#include <sys/machthread.h>
51#include <sys/clock.h>
52#include <sys/trapstat.h>
53#include <sys/rock_hypervisor_api.h>
54
55/*
56 * sfmmu related subroutines
57 */
58
59#if defined (lint)
60
61/* ARGSUSED */
62void
63sfmmu_raise_tsb_exception(uint64_t sfmmup, uint64_t rctx)
64{}
65
66int
67sfmmu_getctx_pri()
68{ return(0); }
69
70int
71sfmmu_getctx_sec()
72{ return(0); }
73
74/* ARGSUSED */
75void
76sfmmu_setctx_sec(uint_t ctx)
77{}
78
79/* ARGSUSED */
80void
81sfmmu_load_mmustate(sfmmu_t *sfmmup)
82{}
83
84#else	/* lint */
85
86/*
87 * Invalidate either the context of a specific victim or any process
88 * currently running on this CPU.
89 *
90 * %g1 = sfmmup whose ctx is being stolen (victim)
91 *	 when called from sfmmu_wrap_around, %g1 == INVALID_CONTEXT.
92 * Note %g1 is the only input argument used by this xcall handler.
93 */
94
95	ENTRY(sfmmu_raise_tsb_exception)
96	!
97	! if (victim == INVALID_CONTEXT) {
98	!	if (sec-ctx > INVALID_CONTEXT)
99	!		write INVALID_CONTEXT to sec-ctx
100	!	if (pri-ctx > INVALID_CONTEXT)
101	!		write INVALID_CONTEXT to pri-ctx
102	!
103	! } else if (current CPU tsbmiss->usfmmup != victim sfmmup) {
104	!	return
105	! } else {
106	!	if (sec-ctx > INVALID_CONTEXT)
107	!		write INVALID_CONTEXT to sec-ctx
108	!
109	!	if (pri-ctx > INVALID_CONTEXT)
110	!		write INVALID_CONTEXT to pri-ctx
111	! }
112	!
113
114	sethi   %hi(ksfmmup), %g3
115	ldx	[%g3 + %lo(ksfmmup)], %g3
116	cmp	%g1, %g3
117	be,a,pn %xcc, ptl1_panic	/* can't invalidate kernel ctx */
118	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
119
120	set	INVALID_CONTEXT, %g2
121
122	cmp	%g1, INVALID_CONTEXT
123	bne,pt	%xcc, 1f			/* called from wrap_around? */
124	  mov	MMU_SCONTEXT, %g3
125
126	ldxa	[%g3]ASI_MMU_CTX, %g5		/* %g5 = sec-ctx */
127	cmp	%g5, INVALID_CONTEXT		/* kernel  or invalid ctx ? */
128	ble,pn	%xcc, 0f			/* yes, no need to change */
129	  mov	MMU_PCONTEXT, %g7
130
131	stxa	%g2, [%g3]ASI_MMU_CTX		/* set invalid ctx */
132	membar	#Sync
133
1340:
135	ldxa	[%g7]ASI_MMU_CTX, %g5		/* %g5 = pri-ctx */
136	cmp	%g5, INVALID_CONTEXT		/* kernel or invalid ctx? */
137	ble,pn	%xcc, 6f			/* yes, no need to change */
138	  nop
139
140	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
141	membar	#Sync
142
1436:	/* flushall tlb */
144	mov	%o0, %g3
145	mov	%o1, %g4
146	mov	%o2, %g6
147	mov	%o5, %g7
148
149        mov     %g0, %o0        ! XXX no cpu list yet
150        mov     %g0, %o1        ! XXX no cpu list yet
151        mov     MAP_ITLB | MAP_DTLB, %o2
152        mov     MMU_DEMAP_ALL, %o5
153        ta      FAST_TRAP
154        brz,pt  %o0, 5f
155          nop
156     	ba ptl1_panic		/* bad HV call */
157	  mov	PTL1_BAD_RAISE_TSBEXCP, %g1
1585:
159	mov	%g3, %o0
160	mov	%g4, %o1
161	mov	%g6, %o2
162	mov	%g7, %o5
163
164	ba	3f
165	  nop
1661:
167	/*
168	 * %g1 = sfmmup
169	 * %g2 = INVALID_CONTEXT
170	 * %g3 = MMU_SCONTEXT
171	 */
172	CPU_TSBMISS_AREA(%g5, %g6)		/* load cpu tsbmiss area */
173	ldx	[%g5 + TSBMISS_UHATID], %g5     /* load usfmmup */
174
175	cmp	%g5, %g1			/* is it the victim? */
176	bne,pt	%xcc, 2f			/* is our sec-ctx a victim? */
177	  nop
178
179	ldxa    [%g3]ASI_MMU_CTX, %g5           /* %g5 = sec-ctx */
180	cmp     %g5, INVALID_CONTEXT            /* kernel  or invalid ctx ? */
181	ble,pn  %xcc, 0f                        /* yes, no need to change */
182	  mov	MMU_PCONTEXT, %g7
183
184	stxa	%g2, [%g3]ASI_MMU_CTX		/* set sec-ctx to invalid */
185	membar	#Sync
186
1870:
188	ldxa	[%g7]ASI_MMU_CTX, %g4		/* %g4 = pri-ctx */
189	cmp	%g4, INVALID_CONTEXT		/* is pri-ctx the victim? */
190	ble 	%icc, 3f			/* no need to change pri-ctx */
191	  nop
192	stxa	%g2, [%g7]ASI_MMU_CTX		/* set pri-ctx to invalid  */
193	membar	#Sync
194
1953:
196	/* TSB program must be cleared - walkers do not check a context. */
197	mov	%o0, %g3
198	mov	%o1, %g4
199	mov	%o5, %g7
200	clr	%o0
201	clr	%o1
202	mov	MMU_TSB_CTXNON0, %o5
203	ta	FAST_TRAP
204	brnz,a,pn %o0, ptl1_panic
205	  mov	PTL1_BAD_HCALL, %g1
206	mov	%g3, %o0
207	mov	%g4, %o1
208	mov	%g7, %o5
2092:
210	retry
211	SET_SIZE(sfmmu_raise_tsb_exception)
212
213	ENTRY_NP(sfmmu_getctx_pri)
214	set	MMU_PCONTEXT, %o0
215	retl
216	ldxa	[%o0]ASI_MMU_CTX, %o0
217	SET_SIZE(sfmmu_getctx_pri)
218
219	ENTRY_NP(sfmmu_getctx_sec)
220	set	MMU_SCONTEXT, %o0
221	retl
222	ldxa	[%o0]ASI_MMU_CTX, %o0
223	SET_SIZE(sfmmu_getctx_sec)
224
225	/*
226	 * Set the secondary context register for this process.
227	 * %o0 = context number
228	 */
229	ENTRY_NP(sfmmu_setctx_sec)
230	/*
231	 * From resume we call sfmmu_setctx_sec with interrupts disabled.
232	 * But we can also get called from C with interrupts enabled. So,
233	 * we need to check first.
234	 */
235
236	/* If interrupts are not disabled, then disable them */
237	rdpr	%pstate, %g1
238	btst	PSTATE_IE, %g1
239	bnz,a,pt %icc, 1f
240	wrpr	%g1, PSTATE_IE, %pstate		/* disable interrupts */
2411:
242	mov	MMU_SCONTEXT, %o1
243	stxa	%o0, [%o1]ASI_MMU_CTX		/* set 2nd context reg. */
244	membar	#Sync
245        /*
246         * if the routine is entered with intr enabled, then enable intr now.
247         * otherwise, keep intr disabled, return without enabing intr.
248         * %g1 - old intr state
249         */
250        btst    PSTATE_IE, %g1
251        bnz,a,pt %icc, 2f
252        wrpr    %g0, %g1, %pstate               /* enable interrupts */
2532:      retl
254        nop
255        SET_SIZE(sfmmu_setctx_sec)
256
257	/*
258	 * set ktsb_phys to 1 if the processor supports ASI_QUAD_LDD_PHYS.
259	 * returns the detection value in %o0.
260	 */
261	ENTRY_NP(sfmmu_setup_4lp)
262	set	ktsb_phys, %o2
263	mov	1, %o1
264	st	%o1, [%o2]
265	retl
266	mov	%o1, %o0
267	SET_SIZE(sfmmu_setup_4lp)
268
269	/*
270	 * Called to load MMU registers and tsbmiss area
271	 * for the active process.  This function should
272	 * only be called from TL=0.
273	 *
274	 * %o0 - hat pointer
275	 */
276	ENTRY_NP(sfmmu_load_mmustate)
277
278#ifdef DEBUG
279	PANIC_IF_INTR_ENABLED_PSTR(msfmmu_ei_l1, %g1)
280#endif /* DEBUG */
281
282	sethi	%hi(ksfmmup), %o3
283	ldx	[%o3 + %lo(ksfmmup)], %o3
284	cmp	%o3, %o0
285	be,pn	%xcc, 8f			! if kernel as, do nothing
286	  nop
287
288	set     MMU_SCONTEXT, %o3
289        ldxa    [%o3]ASI_MMU_CTX, %o5
290
291	cmp	%o5, INVALID_CONTEXT		! ctx is invalid?
292	bne,pt	%icc, 1f
293	  nop
294
295	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
296	stx	%o0, [%o2 + TSBMISS_UHATID]
297	stx	%g0, [%o2 +  TSBMISS_SHARED_UHATID]
298#ifdef DEBUG
299	/* check if hypervisor/hardware should handle user TSB */
300	sethi	%hi(hv_use_non0_tsb), %o2
301	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
302	brz,pn	%o2, 0f
303	  nop
304#endif /* DEBUG */
305	clr	%o0				! ntsb = 0 for invalid ctx
306	clr	%o1				! HV_TSB_INFO_PA = 0 if inv ctx
307	mov	MMU_TSB_CTXNON0, %o5
308	ta	FAST_TRAP			! set TSB info for user process
309	brnz,a,pn %o0, panic_bad_hcall
310	  mov	MMU_TSB_CTXNON0, %o1
3110:
312	retl
313	  nop
3141:
315	/*
316	 * We need to set up the TSB base register, tsbmiss
317	 * area, and pass the TSB information into the hypervisor
318	 */
319	ldx	[%o0 + SFMMU_TSB], %o1		! %o1 = first tsbinfo
320	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second tsbinfo
321
322	/* create/set first UTSBREG */
323	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
324	SET_UTSBREG(SCRATCHPAD_UTSBREG1, %o2, %o3)
325
326	brz,pt	%g2, 2f
327	  mov	-1, %o2				! use -1 if no second TSB
328
329	/* make 2nd UTSBREG */
330	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
3312:
332	SET_UTSBREG(SCRATCHPAD_UTSBREG2, %o2, %o3)
333
334        /* make 3rd and 4th TSB */
335	CPU_TSBMISS_AREA(%o4, %o3)		! %o4 = tsbmiss area
336
337	ldx	[%o0 + SFMMU_SCDP], %g2		! %g2 = sfmmu_scd
338	brz,pt	%g2, 3f
339	  mov	-1, %o2				! use -1 if no third TSB
340
341	ldx	[%g2 + SCD_SFMMUP], %g3		! %g3 = scdp->scd_sfmmup
342	ldx	[%g3 + SFMMU_TSB], %o1		! %o1 = first scd tsbinfo
343	brz,pn %o1, 1f
344	  nop					! panic if no third TSB
345
346	/* make 3rd UTSBREG */
347	MAKE_UTSBREG(%o1, %o2, %o3)		! %o2 = user tsbreg
3483:
349	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR, %o2)
350
351	brz,pt	%g2, 4f
352	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
353
354	brz,pt	%o1, 4f
355	  mov	-1, %o2				! use -1 if no 3rd or 4th TSB
356	ldx	[%o1 + TSBINFO_NEXTPTR], %g2	! %g2 = second scd tsbinfo
357	brz,pt	%g2, 4f
358	  mov	-1, %o2				! use -1 if no 4th TSB
359
360	/* make 4th UTSBREG */
361	MAKE_UTSBREG(%g2, %o2, %o3)		! %o2 = user tsbreg
3624:
363	SET_UTSBREG_SHCTX(%o4, TSBMISS_TSBSCDPTR4M, %o2)
364
365#ifdef DEBUG
366	/* check if hypervisor/hardware should handle user TSB */
367	sethi	%hi(hv_use_non0_tsb), %o2
368	ld	[%o2 + %lo(hv_use_non0_tsb)], %o2
369	brz,pn	%o2, 6f
370	  nop
371#endif /* DEBUG */
372	CPU_ADDR(%o2, %o4)	! load CPU struct addr to %o2 using %o4
373	ldub    [%o2 + CPU_TSTAT_FLAGS], %o1	! load cpu_tstat_flag to %o1
374
375	mov	%o0, %o3			! preserve %o0
376	btst	TSTAT_TLB_STATS, %o1
377	bnz,a,pn %icc, 5f			! ntsb = 0 if TLB stats enabled
378	  clr	%o0
379
380	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_CNT], %o0
3815:
382	ldx	[%o3 + SFMMU_HVBLOCK + HV_TSB_INFO_PA], %o1
383	mov	MMU_TSB_CTXNON0, %o5
384	ta	FAST_TRAP			! set TSB info for user process
385	brnz,a,pn %o0, panic_bad_hcall
386	  mov	MMU_TSB_CTXNON0, %o1
387	mov	%o3, %o0			! restore saved sfmmup to %o0
3886:
389	/*
390	 * If the TLB pagesize register is supported and pgsz_search_on is set
391	 * then we patch out the following branch instruction.
392	 */
393	.global sfmmu_pgsz_load_mmustate_patch
394sfmmu_pgsz_load_mmustate_patch:
395	ba,a	7f				! branch around pgsz search hcall
396	mov	%o0, %o3			! preserve sfmmup in %o3
397	ldx	[%o3 + SFMMU_PGSZ_ORDER + HV_PGSZ_ORDER_PA], %o0
398	mov	TLB_SO_ID, %o1			! flags apply to I and D
399	mov	MMU_SET_NONPRIV_SEARCH, %o5
400	ta	FAST_TRAP			! set page size search order
401	brnz,a,pn %o0, panic_bad_hcall
402	  mov	MMU_SET_NONPRIV_SEARCH, %o1
403	mov	%o3, %o0			! restore saved sfmmup to %o0
4047:
405	mov	%o1, %o5			! preserve pgsz_search_on
406	ldx	[%o0 + SFMMU_ISMBLKPA], %o1	! copy members of sfmmu
407	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
408	stx	%o1, [%o2 + TSBMISS_ISMBLKPA]	! sfmmu_tsb_miss into the
409	ldub	[%o0 + SFMMU_TTEFLAGS], %o3	! per-CPU tsbmiss area.
410	ldub	[%o0 + SFMMU_RTTEFLAGS], %o4
411	ldx	[%o0 + SFMMU_SRDP], %o1
412	stx	%o0, [%o2 + TSBMISS_UHATID]
413	stub	%o3, [%o2 + TSBMISS_UTTEFLAGS]
414	stub	%o4,  [%o2 + TSBMISS_URTTEFLAGS]
415	stx	%o1, [%o2 +  TSBMISS_SHARED_UHATID]
416	brz,pn	%o1, 8f				! check for sfmmu_srdp
417	  add	%o0, SFMMU_HMERMAP, %o1
418	add	%o2, TSBMISS_SHMERMAP, %o2
419	mov	SFMMU_HMERGNMAP_WORDS, %o3
420						! set tsbmiss shmermap
421	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_shme_mmustate)
422
423	ldx	[%o0 + SFMMU_SCDP], %o4		! %o4 = sfmmu_scd
424	CPU_TSBMISS_AREA(%o2, %o3)		! %o2 = tsbmiss area
425	mov	SFMMU_HMERGNMAP_WORDS, %o3
426	brnz,pt	%o4, 9f				! check for sfmmu_scdp else
427	  nop
428	add	%o2, TSBMISS_SCDSHMERMAP, %o2	! zero tsbmiss scd_shmermap
429	ZERO_REGION_MAP(%o2, %o3, zero_scd_mmustate)
4308:
431	retl
432	nop
4339:
434	brz,a	%o5, 0f				! test pgsz_search_on
435	  or	%g0, TLB_ALL_SHARED_PGSZ, %o1	! enable all page sizes
436	ldub	[%o0 + SFMMU_PGSZ_MAP], %o1
4370:
438	stub	%o1, [%o2 + TSBMISS_PGSZ_BITMAP] ! set tsbmiss pgsz bitmap
439	add	%o2, TSBMISS_SCDSHMERMAP, %o2	! set tsbmiss scd_shmermap
440	add	%o4, SCD_HMERMAP, %o1
441	SET_REGION_MAP(%o1, %o2, %o3, %o4, load_scd_mmustate)
442
443	retl
444	  nop
4451:
446	sethi   %hi(panicstr), %g1		! panic if no 3rd TSB
447        ldx     [%g1 + %lo(panicstr)], %g1
448        tst     %g1
449
450        bnz,pn  %xcc, 8b
451          nop
452
453        sethi   %hi(sfmmu_panic10), %o0
454        call    panic
455          or      %o0, %lo(sfmmu_panic10), %o0
456	SET_SIZE(sfmmu_load_mmustate)
457
458#endif /* lint */
459
460#if defined(lint)
461
462/* Prefetch "struct tsbe" while walking TSBs */
463/*ARGSUSED*/
464void
465prefetch_tsbe_read(struct tsbe *tsbep)
466{}
467
468/* Prefetch the tsbe that we are about to write */
469/*ARGSUSED*/
470void
471prefetch_tsbe_write(struct tsbe *tsbep)
472{}
473
474#else /* lint */
475
476	ENTRY(prefetch_tsbe_read)
477	retl
478	nop
479	SET_SIZE(prefetch_tsbe_read)
480
481	ENTRY(prefetch_tsbe_write)
482	retl
483	nop
484	SET_SIZE(prefetch_tsbe_write)
485#endif /* lint */
486