xref: /titanic_44/usr/src/uts/sfmmu/vm/hat_sfmmu.h (revision 587032cf0967234b39ccb50adca936a367841063)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * VM - Hardware Address Translation management.
28  *
29  * This file describes the contents of the sun-reference-mmu(sfmmu)-
30  * specific hat data structures and the sfmmu-specific hat procedures.
31  * The machine-independent interface is described in <vm/hat.h>.
32  */
33 
34 #ifndef	_VM_HAT_SFMMU_H
35 #define	_VM_HAT_SFMMU_H
36 
37 #pragma ident	"%Z%%M%	%I%	%E% SMI"
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #ifndef _ASM
44 
45 #include <sys/types.h>
46 
47 #endif /* _ASM */
48 
49 #ifdef	_KERNEL
50 
51 #include <sys/pte.h>
52 #include <vm/mach_sfmmu.h>
53 #include <sys/mmu.h>
54 
55 /*
56  * Don't alter these without considering changes to ism_map_t.
57  */
58 #define	DEFAULT_ISM_PAGESIZE		MMU_PAGESIZE4M
59 #define	ISM_PG_SIZE(ism_vbshift)	(1 << ism_vbshift)
60 #define	ISM_SZ_MASK(ism_vbshift)	(ISM_PG_SIZE(ism_vbshift) - 1)
61 #define	ISM_MAP_SLOTS	8	/* Change this carefully. */
62 
63 #ifndef _ASM
64 
65 #include <sys/t_lock.h>
66 #include <vm/hat.h>
67 #include <vm/seg.h>
68 #include <sys/machparam.h>
69 #include <sys/systm.h>
70 #include <sys/x_call.h>
71 #include <vm/page.h>
72 #include <sys/ksynch.h>
73 
74 typedef struct hat sfmmu_t;
75 
76 /*
77  * SFMMU attributes for hat_memload/hat_devload
78  */
79 #define	SFMMU_UNCACHEPTTE	0x01000000	/* unencache in physical $ */
80 #define	SFMMU_UNCACHEVTTE	0x02000000	/* unencache in virtual $ */
81 #define	SFMMU_SIDEFFECT		0x04000000	/* set side effect bit */
82 #define	SFMMU_LOAD_ALLATTR	(HAT_PROT_MASK | HAT_ORDER_MASK |	\
83 		HAT_ENDIAN_MASK | HAT_NOFAULT | HAT_NOSYNC |		\
84 		SFMMU_UNCACHEPTTE | SFMMU_UNCACHEVTTE | SFMMU_SIDEFFECT)
85 
86 
87 /*
88  * sfmmu flags for hat_memload/hat_devload
89  */
90 #define	SFMMU_NO_TSBLOAD	0x08000000	/* do not preload tsb */
91 #define	SFMMU_LOAD_ALLFLAG	(HAT_LOAD | HAT_LOAD_LOCK |		\
92 		HAT_LOAD_ADV | HAT_LOAD_CONTIG | HAT_LOAD_NOCONSIST |	\
93 		HAT_LOAD_SHARE | HAT_LOAD_REMAP | SFMMU_NO_TSBLOAD |	\
94 		HAT_RELOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_TEXT)
95 
96 /*
97  * sfmmu internal flag to hat_pageunload that spares locked mappings
98  */
99 #define	SFMMU_KERNEL_RELOC	0x8000
100 
101 /*
102  * mode for sfmmu_chgattr
103  */
104 #define	SFMMU_SETATTR	0x0
105 #define	SFMMU_CLRATTR	0x1
106 #define	SFMMU_CHGATTR	0x2
107 
108 /*
109  * sfmmu specific flags for page_t
110  */
111 #define	P_PNC	0x8		/* non-caching is permanent bit */
112 #define	P_TNC	0x10		/* non-caching is temporary bit */
113 #define	P_KPMS	0x20		/* kpm mapped small (vac alias prevention) */
114 #define	P_KPMC	0x40		/* kpm conflict page (vac alias prevention) */
115 
116 #define	PP_GENERIC_ATTR(pp)	((pp)->p_nrm & (P_MOD | P_REF | P_RO))
117 #define	PP_ISMOD(pp)		((pp)->p_nrm & P_MOD)
118 #define	PP_ISREF(pp)		((pp)->p_nrm & P_REF)
119 #define	PP_ISRO(pp)		((pp)->p_nrm & P_RO)
120 #define	PP_ISNC(pp)		((pp)->p_nrm & (P_PNC|P_TNC))
121 #define	PP_ISPNC(pp)		((pp)->p_nrm & P_PNC)
122 #define	PP_ISTNC(pp)		((pp)->p_nrm & P_TNC)
123 #define	PP_ISKPMS(pp)		((pp)->p_nrm & P_KPMS)
124 #define	PP_ISKPMC(pp)		((pp)->p_nrm & P_KPMC)
125 
126 #define	PP_SETMOD(pp)		((pp)->p_nrm |= P_MOD)
127 #define	PP_SETREF(pp)		((pp)->p_nrm |= P_REF)
128 #define	PP_SETREFMOD(pp)	((pp)->p_nrm |= (P_REF|P_MOD))
129 #define	PP_SETRO(pp)		((pp)->p_nrm |= P_RO)
130 #define	PP_SETREFRO(pp)		((pp)->p_nrm |= (P_REF|P_RO))
131 #define	PP_SETPNC(pp)		((pp)->p_nrm |= P_PNC)
132 #define	PP_SETTNC(pp)		((pp)->p_nrm |= P_TNC)
133 #define	PP_SETKPMS(pp)		((pp)->p_nrm |= P_KPMS)
134 #define	PP_SETKPMC(pp)		((pp)->p_nrm |= P_KPMC)
135 
136 #define	PP_CLRMOD(pp)		((pp)->p_nrm &= ~P_MOD)
137 #define	PP_CLRREF(pp)		((pp)->p_nrm &= ~P_REF)
138 #define	PP_CLRREFMOD(pp)	((pp)->p_nrm &= ~(P_REF|P_MOD))
139 #define	PP_CLRRO(pp)		((pp)->p_nrm &= ~P_RO)
140 #define	PP_CLRPNC(pp)		((pp)->p_nrm &= ~P_PNC)
141 #define	PP_CLRTNC(pp)		((pp)->p_nrm &= ~P_TNC)
142 #define	PP_CLRKPMS(pp)		((pp)->p_nrm &= ~P_KPMS)
143 #define	PP_CLRKPMC(pp)		((pp)->p_nrm &= ~P_KPMC)
144 
145 /*
146  * All shared memory segments attached with the SHM_SHARE_MMU flag (ISM)
147  * will be constrained to a 4M, 32M or 256M alignment. Also since every newly-
148  * created ISM segment is created out of a new address space at base va
149  * of 0 we don't need to store it.
150  */
151 #define	ISM_ALIGN(shift)	(1 << shift)	/* base va aligned to <n>M  */
152 #define	ISM_ALIGNED(shift, va)	(((uintptr_t)va & (ISM_ALIGN(shift) - 1)) == 0)
153 #define	ISM_SHIFT(shift, x)	((uintptr_t)x >> (shift))
154 
155 /*
156  * Pad locks out to cache sub-block boundaries to prevent
157  * false sharing, so several processes don't contend for
158  * the same line if they aren't using the same lock.  Since
159  * this is a typedef we also have a bit of freedom in
160  * changing lock implementations later if we decide it
161  * is necessary.
162  */
163 typedef struct hat_lock {
164 	kmutex_t hl_mutex;
165 	uchar_t hl_pad[64 - sizeof (kmutex_t)];
166 } hatlock_t;
167 
168 #define	HATLOCK_MUTEXP(hatlockp)	(&((hatlockp)->hl_mutex))
169 
170 /*
171  * All segments mapped with ISM are guaranteed to be 4M, 32M or 256M aligned.
172  * Also size is guaranteed to be in 4M, 32M or 256M chunks.
173  * ism_seg consists of the following members:
174  * [XX..22] base address of ism segment. XX is 63 or 31 depending whether
175  *	caddr_t is 64 bits or 32 bits.
176  * [21..0] size of segment.
177  *
178  * NOTE: Don't alter this structure without changing defines above and
179  * the tsb_miss and protection handlers.
180  */
181 typedef struct ism_map {
182 	uintptr_t	imap_seg;  	/* base va + sz of ISM segment */
183 	ushort_t	imap_vb_shift;	/* mmu_pageshift for ism page size */
184 	ushort_t	imap_hatflags;	/* primary ism page size */
185 	uint_t		imap_sz_mask;	/* mmu_pagemask for ism page size */
186 	sfmmu_t		*imap_ismhat; 	/* hat id of dummy ISM as */
187 	struct ism_ment	*imap_ment;	/* pointer to mapping list entry */
188 } ism_map_t;
189 
190 #define	ism_start(map)	((caddr_t)((map).imap_seg & \
191 				~ISM_SZ_MASK((map).imap_vb_shift)))
192 #define	ism_size(map)	((map).imap_seg & ISM_SZ_MASK((map).imap_vb_shift))
193 #define	ism_end(map)	((caddr_t)(ism_start(map) + (ism_size(map) * \
194 				ISM_PG_SIZE((map).imap_vb_shift))))
195 /*
196  * ISM mapping entry. Used to link all hat's sharing a ism_hat.
197  * Same function as the p_mapping list for a page.
198  */
199 typedef struct ism_ment {
200 	sfmmu_t		*iment_hat;	/* back pointer to hat_share() hat */
201 	caddr_t		iment_base_va;	/* hat's va base for this ism seg */
202 	struct ism_ment	*iment_next;	/* next ism map entry */
203 	struct ism_ment	*iment_prev;	/* prev ism map entry */
204 } ism_ment_t;
205 
206 /*
207  * ISM segment block. One will be hung off the sfmmu structure if a
208  * a process uses ISM.  More will be linked using ismblk_next if more
209  * than ISM_MAP_SLOTS segments are attached to this proc.
210  *
211  * All modifications to fields in this structure will be protected
212  * by the hat mutex.  In order to avoid grabbing this lock in low level
213  * routines (tsb miss/protection handlers and vatopfn) while not
214  * introducing any race conditions with hat_unshare, we will set
215  * CTX_ISM_BUSY bit in the ctx struct. Any mmu traps that occur
216  * for this ctx while this bit is set will be handled in sfmmu_tsb_excption
217  * where it will synchronize behind the hat mutex.
218  */
219 typedef struct ism_blk {
220 	ism_map_t		iblk_maps[ISM_MAP_SLOTS];
221 	struct ism_blk		*iblk_next;
222 	uint64_t		iblk_nextpa;
223 } ism_blk_t;
224 
225 /*
226  * TSB access information.  All fields are protected by the process's
227  * hat lock.
228  */
229 
230 struct tsb_info {
231 	caddr_t		tsb_va;		/* tsb base virtual address */
232 	uint64_t	tsb_pa;		/* tsb base physical address */
233 	struct tsb_info	*tsb_next;	/* next tsb used by this process */
234 	uint16_t	tsb_szc;	/* tsb size code */
235 	uint16_t	tsb_flags;	/* flags for this tsb; see below */
236 	uint_t		tsb_ttesz_mask;	/* page size masks; see below */
237 
238 	tte_t		tsb_tte;	/* tte to lock into DTLB */
239 	sfmmu_t		*tsb_sfmmu;	/* sfmmu */
240 	kmem_cache_t	*tsb_cache;	/* cache from which mem allocated */
241 	vmem_t		*tsb_vmp;	/* vmem arena from which mem alloc'd */
242 };
243 
244 /*
245  * Values for "tsb_ttesz_mask" bitmask.
246  */
247 #define	TSB8K	(1 << TTE8K)
248 #define	TSB64K  (1 << TTE64K)
249 #define	TSB512K (1 << TTE512K)
250 #define	TSB4M   (1 << TTE4M)
251 #define	TSB32M  (1 << TTE32M)
252 #define	TSB256M (1 << TTE256M)
253 
254 /*
255  * Values for "tsb_flags" field.
256  */
257 #define	TSB_RELOC_FLAG		0x1
258 #define	TSB_FLUSH_NEEDED	0x2
259 #define	TSB_SWAPPED	0x4
260 
261 /*
262  * The platform dependent hat structure.
263  * tte counts should be protected by cas.
264  * cpuset is protected by cas.
265  *
266  * Note that sfmmu_xhat_provider MUST be the first element.
267  */
268 struct hat {
269 	void		*sfmmu_xhat_provider;	/* NULL for CPU hat */
270 	cpuset_t	sfmmu_cpusran;	/* cpu bit mask for efficient xcalls */
271 	struct	as	*sfmmu_as;	/* as this hat provides mapping for */
272 	ulong_t		sfmmu_ttecnt[MMU_PAGE_SIZES]; /* per sz tte counts */
273 	ulong_t		sfmmu_ismttecnt[MMU_PAGE_SIZES]; /* est. ism ttes */
274 	union _h_un {
275 		ism_blk_t	*sfmmu_iblkp;  /* maps to ismhat(s) */
276 		ism_ment_t	*sfmmu_imentp; /* ism hat's mapping list */
277 	} h_un;
278 	uint_t		sfmmu_free:1;	/* hat to be freed - set on as_free */
279 	uint_t		sfmmu_ismhat:1;	/* hat is dummy ism hatid */
280 	uint_t		sfmmu_ctxflushed:1;	/* ctx has been flushed */
281 	uchar_t		sfmmu_rmstat;	/* refmod stats refcnt */
282 	uchar_t		sfmmu_clrstart;	/* start color bin for page coloring */
283 	ushort_t	sfmmu_clrbin;	/* per as phys page coloring bin */
284 	short		sfmmu_cnum;	/* context number */
285 	ushort_t	sfmmu_flags;	/* flags */
286 	struct tsb_info	*sfmmu_tsb;	/* list of per as tsbs */
287 	uint64_t	sfmmu_ismblkpa; /* pa of sfmmu_iblkp, or -1 */
288 	kcondvar_t	sfmmu_tsb_cv;	/* signals TSB swapin or relocation */
289 	uchar_t		sfmmu_cext;	/* context page size encoding */
290 	uint8_t		sfmmu_pgsz[MMU_PAGE_SIZES];  /* ranking for MMU */
291 #ifdef sun4v
292 	struct hv_tsb_block sfmmu_hvblock;
293 #endif
294 };
295 
296 #define	sfmmu_iblk	h_un.sfmmu_iblkp
297 #define	sfmmu_iment	h_un.sfmmu_imentp
298 
299 /*
300  * bit mask for managing vac conflicts on large pages.
301  * bit 1 is for uncache flag.
302  * bits 2 through min(num of cache colors + 1,31) are
303  * for cache colors that have already been flushed.
304  */
305 #define	CACHE_UNCACHE		1
306 #define	CACHE_NUM_COLOR		(shm_alignment >> MMU_PAGESHIFT)
307 
308 #define	CACHE_VCOLOR_MASK(vcolor)	(2 << (vcolor & (CACHE_NUM_COLOR - 1)))
309 
310 #define	CacheColor_IsFlushed(flag, vcolor) \
311 					((flag) & CACHE_VCOLOR_MASK(vcolor))
312 
313 #define	CacheColor_SetFlushed(flag, vcolor) \
314 					((flag) |= CACHE_VCOLOR_MASK(vcolor))
315 /*
316  * Flags passed to sfmmu_page_cache to flush page from vac or not.
317  */
318 #define	CACHE_FLUSH	0
319 #define	CACHE_NO_FLUSH	1
320 
321 /*
322  * Flags passed to sfmmu_tlbcache_demap
323  */
324 #define	FLUSH_NECESSARY_CPUS	0
325 #define	FLUSH_ALL_CPUS		1
326 
327 /*
328  * Software context structure.  The size of this structure is currently
329  * hardwired into the tsb miss handlers in assembly code through the
330  * CTX_SZ_SHIFT define.  Since this define is used in a shift we should keep
331  * this structure a power of two.
332  *
333  * ctx_flags:
334  * Bit 0 : Free flag.
335  */
336 struct ctx {
337 	union _ctx_un {
338 		sfmmu_t *ctx_sfmmup;	/* back pointer to hat id */
339 		struct ctx *ctx_freep;	/* next ctx in freelist */
340 	} ctx_un;
341 	krwlock_t	ctx_rwlock;	/* protect context from stealer */
342 	uint32_t	ctx_flags;	/* flags */
343 	uint8_t		pad[12];
344 };
345 
346 #define	ctx_sfmmu	ctx_un.ctx_sfmmup
347 #define	ctx_free	ctx_un.ctx_freep
348 
349 #ifdef	DEBUG
350 /*
351  * For debugging purpose only. Maybe removed later.
352  */
353 struct ctx_trace {
354 	sfmmu_t		*sc_sfmmu_stolen;
355 	sfmmu_t		*sc_sfmmu_stealing;
356 	clock_t		sc_time;
357 	ushort_t	sc_type;
358 	ushort_t	sc_cnum;
359 };
360 #define	CTX_TRC_STEAL	0x1
361 #define	CTX_TRC_FREE	0x0
362 #define	TRSIZE	0x400
363 #define	NEXT_CTXTR(ptr)	(((ptr) >= ctx_trace_last) ? \
364 		ctx_trace_first : ((ptr) + 1))
365 #define	TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type) \
366 	mutex_enter(mutex);						\
367 	(ptr)->sc_sfmmu_stolen = (stolen_sfmmu);			\
368 	(ptr)->sc_sfmmu_stealing = (stealing_sfmmu);			\
369 	(ptr)->sc_cnum = (cnum);					\
370 	(ptr)->sc_type = (type);					\
371 	(ptr)->sc_time = lbolt;						\
372 	(ptr) = NEXT_CTXTR(ptr);					\
373 	num_ctx_stolen += (type);					\
374 	mutex_exit(mutex);
375 #else
376 
377 #define	TRACE_CTXS(mutex, ptr, cnum, stolen_sfmmu, stealing_sfmmu, type)
378 
379 #endif	/* DEBUG */
380 
381 #endif	/* !_ASM */
382 
383 /*
384  * Macros for sfmmup->sfmmu_flags access.  The macros that change the flags
385  * ASSERT() that we're holding the HAT lock before changing the flags;
386  * however callers that read the flags may do so without acquiring the lock
387  * in a fast path, and then recheck the flag after acquiring the lock in
388  * a slow path.
389  */
390 #define	SFMMU_FLAGS_ISSET(sfmmup, flags) \
391 	(((sfmmup)->sfmmu_flags & (flags)) == (flags))
392 
393 #define	SFMMU_FLAGS_CLEAR(sfmmup, flags) \
394 	(ASSERT(sfmmu_hat_lock_held((sfmmup))), \
395 	(sfmmup)->sfmmu_flags &= ~(flags))
396 
397 #define	SFMMU_FLAGS_SET(sfmmup, flags) \
398 	(ASSERT(sfmmu_hat_lock_held((sfmmup))), \
399 	(sfmmup)->sfmmu_flags |= (flags))
400 
401 /*
402  * sfmmu HAT flags
403  */
404 #define	HAT_64K_FLAG	0x01
405 #define	HAT_512K_FLAG	0x02
406 #define	HAT_4M_FLAG	0x04
407 #define	HAT_32M_FLAG	0x08
408 #define	HAT_256M_FLAG	0x10
409 #define	HAT_4MTEXT_FLAG	0x80
410 #define	HAT_SWAPPED	0x100	/* swapped out */
411 #define	HAT_SWAPIN	0x200	/* swapping in */
412 #define	HAT_BUSY	0x400	/* replacing TSB(s) */
413 #define	HAT_ISMBUSY	0x800	/* adding/removing/traversing ISM maps */
414 
415 #define	HAT_LGPG_FLAGS						\
416 	(HAT_64K_FLAG | HAT_512K_FLAG | HAT_4M_FLAG |		\
417 	    HAT_32M_FLAG | HAT_256M_FLAG)
418 
419 #define	HAT_FLAGS_MASK						\
420 	(HAT_LGPG_FLAGS | HAT_4MTEXT_FLAG | HAT_SWAPPED |	\
421 	    HAT_SWAPIN | HAT_BUSY | HAT_ISMBUSY)
422 
423 /*
424  * Context flags
425  */
426 #define	CTX_FREE_FLAG		0x1
427 #define	CTX_FLAGS_MASK		0x1
428 
429 #define	CTX_SET_FLAGS(ctx, flag)					\
430 {									\
431 	uint32_t old, new;						\
432 									\
433 	do {								\
434 		new = old = (ctx)->ctx_flags;				\
435 		new &= CTX_FLAGS_MASK;					\
436 		new |= flag;						\
437 		new = cas32(&(ctx)->ctx_flags, old, new);		\
438 	} while (new != old);						\
439 }
440 
441 #define	CTX_CLEAR_FLAGS(ctx, flag)					\
442 {									\
443 	uint32_t old, new;						\
444 									\
445 	do {								\
446 		new = old = (ctx)->ctx_flags;				\
447 		new &= CTX_FLAGS_MASK & ~(flag);			\
448 		new = cas32(&(ctx)->ctx_flags, old, new);		\
449 	} while (new != old);						\
450 }
451 
452 #define	ctxtoctxnum(ctx)	((ushort_t)((ctx) - ctxs))
453 
454 /*
455  * Defines needed for ctx stealing.
456  */
457 #define	GET_CTX_RETRY_CNT	100
458 
459 /*
460  * Starting with context 0, the first NUM_LOCKED_CTXS contexts
461  * are locked so that sfmmu_getctx can't steal any of these
462  * contexts.  At the time this software was being developed, the
463  * only context that needs to be locked is context 0 (the kernel
464  * context), and context 1 (reserved for stolen context). So this constant
465  * was originally defined to be 2.
466  */
467 #define	NUM_LOCKED_CTXS 2
468 #define	INVALID_CONTEXT	1
469 
470 #ifndef	_ASM
471 
472 /*
473  * Kernel page relocation stuff.
474  */
475 struct sfmmu_callback {
476 	int key;
477 	int (*prehandler)(caddr_t, uint_t, uint_t, void *);
478 	int (*posthandler)(caddr_t, uint_t, uint_t, void *, pfn_t);
479 	int (*errhandler)(caddr_t, uint_t, uint_t, void *);
480 	int capture_cpus;
481 };
482 
483 extern int sfmmu_max_cb_id;
484 extern struct sfmmu_callback *sfmmu_cb_table;
485 
486 extern int hat_kpr_enabled;
487 
488 struct pa_hment;
489 
490 /*
491  * RFE: With multihat gone we gain back an int.  We could use this to
492  * keep ref bits on a per cpu basis to eliminate xcalls.
493  */
494 struct sf_hment {
495 	tte_t hme_tte;			/* tte for this hment */
496 
497 	union {
498 		struct page *page;	/* what page this maps */
499 		struct pa_hment *data;	/* pa_hment */
500 	} sf_hment_un;
501 
502 	struct	sf_hment *hme_next;	/* next hment */
503 	struct	sf_hment *hme_prev;	/* prev hment */
504 };
505 
506 struct pa_hment {
507 	caddr_t		addr;		/* va */
508 	uint_t		len;		/* bytes */
509 	ushort_t	flags;		/* internal flags */
510 	ushort_t	refcnt;		/* reference count */
511 	id_t		cb_id;		/* callback id, table index */
512 	void		*pvt;		/* handler's private data */
513 	struct sf_hment	sfment;		/* corresponding dummy sf_hment */
514 };
515 
516 #define	hme_page		sf_hment_un.page
517 #define	hme_data		sf_hment_un.data
518 #define	hme_size(sfhmep)	((int)(TTE_CSZ(&(sfhmep)->hme_tte)))
519 #define	PAHME_SZ		(sizeof (struct pa_hment))
520 #define	SFHME_SZ		(sizeof (struct sf_hment))
521 
522 #define	IS_PAHME(hme)	((hme)->hme_tte.ll == 0)
523 
524 /*
525  * hmeblk_tag structure
526  * structure used to obtain a match on a hme_blk.  Currently consists of
527  * the address of the sfmmu struct (or hatid), the base page address of the
528  * hme_blk, and the rehash count.  The rehash count is actually only 2 bits
529  * and has the following meaning:
530  * 1 = 8k or 64k hash sequence.
531  * 2 = 512k hash sequence.
532  * 3 = 4M hash sequence.
533  * We require this count because we don't want to get a false hit on a 512K or
534  * 4M rehash with a base address corresponding to a 8k or 64k hmeblk.
535  * Note:  The ordering and size of the hmeblk_tag members are implictly known
536  * by the tsb miss handlers written in assembly.  Do not change this structure
537  * without checking those routines.  See HTAG_SFMMUPSZ define.
538  */
539 
540 typedef union {
541 	struct {
542 		uint64_t	hblk_basepg: 51, /* hme_blk base pg # */
543 				hblk_rehash: 13; /* rehash number */
544 		sfmmu_t		*sfmmup;
545 	} hblk_tag_un;
546 	uint64_t		htag_tag[2];
547 } hmeblk_tag;
548 
549 #define	htag_id		hblk_tag_un.sfmmup
550 #define	htag_bspage	hblk_tag_un.hblk_basepg
551 #define	htag_rehash	hblk_tag_un.hblk_rehash
552 
553 #define	HTAGS_EQ(tag1, tag2)	(((tag1.htag_tag[0] ^ tag2.htag_tag[0]) | \
554 				(tag1.htag_tag[1] ^ tag2.htag_tag[1])) == 0)
555 #define	HME_REHASH(sfmmup)						\
556 	((sfmmup)->sfmmu_ttecnt[TTE512K] != 0 ||			\
557 	(sfmmup)->sfmmu_ttecnt[TTE4M] != 0 ||				\
558 	(sfmmup)->sfmmu_ttecnt[TTE32M] != 0 ||				\
559 	(sfmmup)->sfmmu_ttecnt[TTE256M] != 0)
560 
561 #endif /* !_ASM */
562 
563 #define	NHMENTS		8		/* # of hments in an 8k hme_blk */
564 					/* needs to be multiple of 2 */
565 #ifndef	_ASM
566 
567 #ifdef	HBLK_TRACE
568 
569 #define	HBLK_LOCK		1
570 #define	HBLK_UNLOCK		0
571 #define	HBLK_STACK_DEPTH	6
572 #define	HBLK_AUDIT_CACHE_SIZE	16
573 #define	HBLK_LOCK_PATTERN	0xaaaaaaaa
574 #define	HBLK_UNLOCK_PATTERN	0xbbbbbbbb
575 
576 struct hblk_lockcnt_audit {
577 	int		flag;		/* lock or unlock */
578 	kthread_id_t	thread;
579 	int		depth;
580 	pc_t		stack[HBLK_STACK_DEPTH];
581 };
582 
583 #endif	/* HBLK_TRACE */
584 
585 
586 /*
587  * Hment block structure.
588  * The hme_blk is the node data structure which the hash structure
589  * mantains. An hme_blk can have 2 different sizes depending on the
590  * number of hments it implicitly contains.  When dealing with 64K, 512K,
591  * or 4M hments there is one hment per hme_blk.  When dealing with
592  * 8k hments we allocate an hme_blk plus an additional 7 hments to
593  * give us a total of 8 (NHMENTS) hments that can be referenced through a
594  * hme_blk.
595  *
596  * The hmeblk structure contains 2 tte reference counters used to determine if
597  * it is ok to free up the hmeblk.  Both counters have to be zero in order
598  * to be able to free up hmeblk.  They are protected by cas.
599  * hblk_hmecnt is the number of hments present on pp mapping lists.
600  * hblk_vcnt reflects number of valid ttes in hmeblk.
601  *
602  * The hmeblk now also has per tte lock cnts.  This is required because
603  * the counts can be high and there are not enough bits in the tte. When
604  * physio is fixed to not lock the translations we should be able to move
605  * the lock cnt back to the tte.  See bug id 1198554.
606  *
607  * Note that xhat_hme_blk's layout follows this structure: hme_blk_misc
608  * and sf_hment are at the same offsets in both structures. Whenever
609  * hme_blk is changed, xhat_hme_blk may need to be updated as well.
610  */
611 
612 struct hme_blk_misc {
613 	ushort_t locked_cnt;	/* HAT_LOAD_LOCK ref cnt */
614 	uint_t	notused:10;
615 	uint_t	xhat_bit:1;	/* set for an xhat hme_blk */
616 	uint_t	shadow_bit:1;	/* set for a shadow hme_blk */
617 	uint_t	nucleus_bit:1;	/* set for a nucleus hme_blk */
618 	uint_t	ttesize:3;	/* contains ttesz of hmeblk */
619 };
620 
621 struct hme_blk {
622 	uint64_t	hblk_nextpa;	/* physical address for hash list */
623 
624 	hmeblk_tag	hblk_tag;	/* tag used to obtain an hmeblk match */
625 
626 	struct hme_blk	*hblk_next;	/* on free list or on hash list */
627 					/* protected by hash lock */
628 
629 	struct hme_blk	*hblk_shadow;	/* pts to shadow hblk */
630 					/* protected by hash lock */
631 	uint_t		hblk_span;	/* span of memory hmeblk maps */
632 
633 	struct hme_blk_misc	hblk_misc;
634 
635 	union {
636 		struct {
637 			ushort_t hblk_hmecount;	/* hment on mlists counter */
638 			ushort_t hblk_validcnt;	/* valid tte reference count */
639 		} hblk_counts;
640 		uint_t		hblk_shadow_mask;
641 	} hblk_un;
642 
643 #ifdef	HBLK_TRACE
644 	kmutex_t	hblk_audit_lock;	/* lock to protect index */
645 	uint_t		hblk_audit_index;	/* index into audit_cache */
646 	struct	hblk_lockcnt_audit hblk_audit_cache[HBLK_AUDIT_CACHE_SIZE];
647 #endif	/* HBLK_AUDIT */
648 
649 	struct sf_hment hblk_hme[1];	/* hment array */
650 };
651 
652 #define	hblk_lckcnt	hblk_misc.locked_cnt
653 #define	hblk_xhat_bit   hblk_misc.xhat_bit
654 #define	hblk_shw_bit	hblk_misc.shadow_bit
655 #define	hblk_nuc_bit	hblk_misc.nucleus_bit
656 #define	hblk_ttesz	hblk_misc.ttesize
657 #define	hblk_hmecnt	hblk_un.hblk_counts.hblk_hmecount
658 #define	hblk_vcnt	hblk_un.hblk_counts.hblk_validcnt
659 #define	hblk_shw_mask	hblk_un.hblk_shadow_mask
660 
661 #define	MAX_HBLK_LCKCNT	0xFFFF
662 #define	HMEBLK_ALIGN	0x8		/* hmeblk has to be double aligned */
663 
664 #ifdef	HBLK_TRACE
665 
666 #define	HBLK_STACK_TRACE(hmeblkp, lock)					\
667 {									\
668 	int flag = lock;	/* to pacify lint */			\
669 	int audit_index;						\
670 									\
671 	mutex_enter(&hmeblkp->hblk_audit_lock);				\
672 	audit_index = hmeblkp->hblk_audit_index;			\
673 	hmeblkp->hblk_audit_index = ((hmeblkp->hblk_audit_index + 1) &	\
674 	    (HBLK_AUDIT_CACHE_SIZE - 1));				\
675 	mutex_exit(&hmeblkp->hblk_audit_lock);				\
676 									\
677 	if (flag)							\
678 		hmeblkp->hblk_audit_cache[audit_index].flag =		\
679 		    HBLK_LOCK_PATTERN;					\
680 	else								\
681 		hmeblkp->hblk_audit_cache[audit_index].flag =		\
682 		    HBLK_UNLOCK_PATTERN;				\
683 									\
684 	hmeblkp->hblk_audit_cache[audit_index].thread = curthread;	\
685 	hmeblkp->hblk_audit_cache[audit_index].depth =			\
686 	    getpcstack(hmeblkp->hblk_audit_cache[audit_index].stack,	\
687 	    HBLK_STACK_DEPTH);						\
688 }
689 
690 #else
691 
692 #define	HBLK_STACK_TRACE(hmeblkp, lock)
693 
694 #endif	/* HBLK_TRACE */
695 
696 #define	HMEHASH_FACTOR	16	/* used to calc # of buckets in hme hash */
697 
698 /*
699  * A maximum number of user hmeblks is defined in order to place an upper
700  * limit on how much nucleus memory is required and to avoid overflowing the
701  * tsbmiss uhashsz and khashsz data areas. The number below corresponds to
702  * the number of buckets required, for an average hash chain length of 4 on
703  * a 16TB machine.
704  */
705 
706 #define	MAX_UHME_BUCKETS	(0x1 << 30)
707 #define	MAX_KHME_BUCKETS	(0x1 << 30)
708 
709 /*
710  * The minimum number of kernel hash buckets.
711  */
712 #define	MIN_KHME_BUCKETS	0x800
713 
714 /*
715  * The number of hash buckets must be a power of 2. If the initial calculated
716  * value is less than USER_BUCKETS_THRESHOLD we round up to the next greater
717  * power of 2, otherwise we round down to avoid huge over allocations.
718  */
719 #define	USER_BUCKETS_THRESHOLD	(1<<22)
720 
721 #define	MAX_NUCUHME_BUCKETS	0x4000
722 #define	MAX_NUCKHME_BUCKETS	0x2000
723 
724 /*
725  * There are 2 locks in the hmehash bucket.  The hmehash_mutex is
726  * a regular mutex used to make sure operations on a hash link are only
727  * done by one thread.  Any operation which comes into the hat with
728  * a <vaddr, as> will grab the hmehash_mutex.  Normally one would expect
729  * the tsb miss handlers to grab the hash lock to make sure the hash list
730  * is consistent while we traverse it.  Unfortunately this can lead to
731  * deadlocks or recursive mutex enters since it is possible for
732  * someone holding the lock to take a tlb/tsb miss.
733  * To solve this problem we have added the hmehash_listlock.  This lock
734  * is only grabbed by the tsb miss handlers, vatopfn, and while
735  * adding/removing a hmeblk from the hash list. The code is written to
736  * guarantee we won't take a tlb miss while holding this lock.
737  */
738 struct hmehash_bucket {
739 	kmutex_t	hmehash_mutex;
740 	uint64_t	hmeh_nextpa;	/* physical address for hash list */
741 	struct hme_blk *hmeblkp;
742 	uint_t		hmeh_listlock;
743 };
744 
745 #endif /* !_ASM */
746 
747 
748 /*
749  * The tsb miss handlers written in assembly know that sfmmup
750  * is a 64 bit ptr.
751  *
752  * The bspage and re-hash part is 64 bits, with the sfmmup being another 64
753  * bits.
754  */
755 #define	HTAG_SFMMUPSZ		0	/* Not really used for LP64 */
756 #define	HTAG_REHASHSZ		13
757 
758 /*
759  * Assembly routines need to be able to get to ttesz
760  */
761 #define	HBLK_SZMASK		0x7
762 
763 #ifndef _ASM
764 
765 /*
766  * Returns the number of bytes that an hmeblk spans given its tte size
767  */
768 #define	get_hblk_span(hmeblkp) ((hmeblkp)->hblk_span)
769 #define	get_hblk_ttesz(hmeblkp)	((hmeblkp)->hblk_ttesz)
770 #define	get_hblk_cache(hmeblkp)	(((hmeblkp)->hblk_ttesz == TTE8K) ? \
771 	sfmmu8_cache : sfmmu1_cache)
772 #define	HMEBLK_SPAN(ttesz)						\
773 	((ttesz == TTE8K)? (TTEBYTES(ttesz) * NHMENTS) : TTEBYTES(ttesz))
774 
775 #define	set_hblk_sz(hmeblkp, ttesz)				\
776 	(hmeblkp)->hblk_ttesz = (ttesz);			\
777 	(hmeblkp)->hblk_span = HMEBLK_SPAN(ttesz)
778 
779 #define	get_hblk_base(hmeblkp)					\
780 	((uintptr_t)(hmeblkp)->hblk_tag.htag_bspage << MMU_PAGESHIFT)
781 
782 #define	get_hblk_endaddr(hmeblkp)				\
783 	((caddr_t)(get_hblk_base(hmeblkp) + get_hblk_span(hmeblkp)))
784 
785 #define	in_hblk_range(hmeblkp, vaddr)					\
786 	(((uintptr_t)(vaddr) >= get_hblk_base(hmeblkp)) &&		\
787 	((uintptr_t)(vaddr) < (get_hblk_base(hmeblkp) +			\
788 	get_hblk_span(hmeblkp))))
789 
790 #define	tte_to_vaddr(hmeblkp, tte)	((caddr_t)(get_hblk_base(hmeblkp) \
791 	+ (TTEBYTES(TTE_CSZ(&tte)) * (tte).tte_hmenum)))
792 
793 #define	vaddr_to_vshift(hblktag, vaddr, shwsz)				\
794 	((((uintptr_t)(vaddr) >> MMU_PAGESHIFT) - (hblktag.htag_bspage)) >>\
795 	TTE_BSZS_SHIFT((shwsz) - 1))
796 
797 #define	HME8BLK_SZ	(sizeof (struct hme_blk) + \
798 			(NHMENTS - 1) * sizeof (struct sf_hment))
799 #define	HME1BLK_SZ	(sizeof (struct hme_blk))
800 #define	H8TOH1		(MMU_PAGESIZE4M / MMU_PAGESIZE)
801 #define	H1MIN		(2 + MAX_BIGKTSB_TTES)	/* nucleus text+data, ktsb */
802 
803 /*
804  * Hme_blk hash structure
805  * Active mappings are kept in a hash structure of hme_blks.  The hash
806  * function is based on (ctx, vaddr) The size of the hash table size is a
807  * power of 2 such that the average hash chain lenth is HMENT_HASHAVELEN.
808  * The hash actually consists of 2 separate hashes.  One hash is for the user
809  * address space and the other hash is for the kernel address space.
810  * The number of buckets are calculated at boot time and stored in the global
811  * variables "uhmehash_num" and "khmehash_num".  By making the hash table size
812  * a power of 2 we can use a simply & function to derive an index instead of
813  * a divide.
814  *
815  * HME_HASH_FUNCTION(hatid, vaddr, shift) returns a pointer to a hme_hash
816  * bucket.
817  * An hme hash bucket contains a pointer to an hme_blk and the mutex that
818  * protects the link list.
819  * Spitfire supports 4 page sizes.  8k and 64K pages only need one hash.
820  * 512K pages need 2 hashes and 4M pages need 3 hashes.
821  * The 'shift' parameter controls how many bits the vaddr will be shifted in
822  * the hash function. It is calculated in the HME_HASH_SHIFT(ttesz) function
823  * and it varies depending on the page size as follows:
824  *	8k pages:  	HBLK_RANGE_SHIFT
825  *	64k pages:	MMU_PAGESHIFT64K
826  *	512K pages:	MMU_PAGESHIFT512K
827  *	4M pages:	MMU_PAGESHIFT4M
828  * An assembly version of the hash function exists in sfmmu_ktsb_miss(). All
829  * changes should be reflected in both versions.  This function and the TSB
830  * miss handlers are the only places which know about the two hashes.
831  *
832  * HBLK_RANGE_SHIFT controls range of virtual addresses that will fall
833  * into the same bucket for a particular process.  It is currently set to
834  * be equivalent to 64K range or one hme_blk.
835  *
836  * The hme_blks in the hash are protected by a per hash bucket mutex
837  * known as SFMMU_HASH_LOCK.
838  * You need to acquire this lock before traversing the hash bucket link
839  * list, while adding/removing a hme_blk to the list, and while
840  * modifying an hme_blk.  A possible optimization is to replace these
841  * mutexes by readers/writer lock but right now it is not clear whether
842  * this is a win or not.
843  *
844  * The HME_HASH_TABLE_SEARCH will search the hash table for the
845  * hme_blk that contains the hment that corresponds to the passed
846  * ctx and vaddr.  It assumed the SFMMU_HASH_LOCK is held.
847  */
848 
849 #endif /* ! _ASM */
850 
851 #define	KHATID			ksfmmup
852 #define	UHMEHASH_SZ		uhmehash_num
853 #define	KHMEHASH_SZ		khmehash_num
854 #define	HMENT_HASHAVELEN	4
855 #define	HBLK_RANGE_SHIFT	MMU_PAGESHIFT64K /* shift for HBLK_BS_MASK */
856 #define	MAX_HASHCNT		5
857 #define	DEFAULT_MAX_HASHCNT	3
858 
859 #ifndef _ASM
860 
861 #define	HASHADDR_MASK(hashno)	TTE_PAGEMASK(hashno)
862 
863 #define	HME_HASH_SHIFT(ttesz)						\
864 	((ttesz == TTE8K)? HBLK_RANGE_SHIFT : TTE_PAGE_SHIFT(ttesz))	\
865 
866 #define	HME_HASH_ADDR(vaddr, hmeshift)					\
867 	((caddr_t)(((uintptr_t)(vaddr) >> (hmeshift)) << (hmeshift)))
868 
869 #define	HME_HASH_BSPAGE(vaddr, hmeshift)				\
870 	(((uintptr_t)(vaddr) >> (hmeshift)) << ((hmeshift) - MMU_PAGESHIFT))
871 
872 #define	HME_HASH_REHASH(ttesz)						\
873 	(((ttesz) < TTE512K)? 1 : (ttesz))
874 
875 #define	HME_HASH_FUNCTION(hatid, vaddr, shift)				\
876 	((hatid != KHATID)?						\
877 	(&uhme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
878 	    UHMEHASH_SZ) ]):					\
879 	(&khme_hash[ (((uintptr_t)(hatid) ^ ((uintptr_t)vaddr >> (shift))) & \
880 	    KHMEHASH_SZ) ]))
881 
882 /*
883  * This macro will traverse a hmeblk hash link list looking for an hme_blk
884  * that owns the specified vaddr and hatid.  If if doesn't find one , hmeblkp
885  * will be set to NULL, otherwise it will point to the correct hme_blk.
886  * This macro also cleans empty hblks.
887  */
888 #define	HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, hblkpa,		\
889 	pr_hblk, prevpa, listp)						\
890 {									\
891 	struct hme_blk *nx_hblk;					\
892 	uint64_t 	nx_pa;						\
893 									\
894 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));				\
895 	hblkp = hmebp->hmeblkp;						\
896 	hblkpa = hmebp->hmeh_nextpa;					\
897 	prevpa = 0;							\
898 	pr_hblk = NULL;							\
899 	while (hblkp) {							\
900 		if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) {		\
901 			/* found hme_blk */				\
902 			break;						\
903 		}							\
904 		nx_hblk = hblkp->hblk_next;				\
905 		nx_pa = hblkp->hblk_nextpa;				\
906 		if (!hblkp->hblk_vcnt && !hblkp->hblk_hmecnt) {		\
907 			sfmmu_hblk_hash_rm(hmebp, hblkp, prevpa, pr_hblk); \
908 			sfmmu_hblk_free(hmebp, hblkp, hblkpa, listp);   \
909 		} else {						\
910 			pr_hblk = hblkp;				\
911 			prevpa = hblkpa;				\
912 		}							\
913 		hblkp = nx_hblk;					\
914 		hblkpa = nx_pa;						\
915 	}								\
916 }
917 
918 #define	HME_HASH_SEARCH(hmebp, hblktag, hblkp, listp)			\
919 {									\
920 	struct hme_blk *pr_hblk;					\
921 	uint64_t hblkpa, prevpa;					\
922 									\
923 	HME_HASH_SEARCH_PREV(hmebp, hblktag, hblkp, hblkpa, pr_hblk,	\
924 		prevpa, listp);						\
925 }
926 
927 /*
928  * This macro will traverse a hmeblk hash link list looking for an hme_blk
929  * that owns the specified vaddr and hatid.  If if doesn't find one , hmeblkp
930  * will be set to NULL, otherwise it will point to the correct hme_blk.
931  * It doesn't remove empty hblks.
932  */
933 #define	HME_HASH_FAST_SEARCH(hmebp, hblktag, hblkp)			\
934 	ASSERT(SFMMU_HASH_LOCK_ISHELD(hmebp));				\
935 	for (hblkp = hmebp->hmeblkp; hblkp;				\
936 	    hblkp = hblkp->hblk_next) {					\
937 		if (HTAGS_EQ(hblkp->hblk_tag, hblktag)) {		\
938 			/* found hme_blk */				\
939 			break;						\
940 		}							\
941 	}								\
942 
943 
944 #define	SFMMU_HASH_LOCK(hmebp)						\
945 		(mutex_enter(&hmebp->hmehash_mutex))
946 
947 #define	SFMMU_HASH_UNLOCK(hmebp)					\
948 		(mutex_exit(&hmebp->hmehash_mutex))
949 
950 #define	SFMMU_HASH_LOCK_TRYENTER(hmebp)					\
951 		(mutex_tryenter(&hmebp->hmehash_mutex))
952 
953 #define	SFMMU_HASH_LOCK_ISHELD(hmebp)					\
954 		(mutex_owned(&hmebp->hmehash_mutex))
955 
956 #define	SFMMU_XCALL_STATS(ctxnum)					\
957 {									\
958 	if (ctxnum == KCONTEXT) {					\
959 		SFMMU_STAT(sf_kernel_xcalls);				\
960 	} else {							\
961 		SFMMU_STAT(sf_user_xcalls);				\
962 	}								\
963 }
964 
965 #define	astosfmmu(as)		((as)->a_hat)
966 #define	sfmmutoctxnum(sfmmup)	((sfmmup)->sfmmu_cnum)
967 #define	sfmmutoctx(sfmmup)	(&ctxs[sfmmutoctxnum(sfmmup)])
968 #define	hblktosfmmu(hmeblkp)	((sfmmu_t *)(hmeblkp)->hblk_tag.htag_id)
969 #define	sfmmutoas(sfmmup)	((sfmmup)->sfmmu_as)
970 #define	ctxnumtoctx(ctxnum)	(&ctxs[ctxnum])
971 /*
972  * We use the sfmmu data structure to keep the per as page coloring info.
973  */
974 #define	as_color_bin(as)	(astosfmmu(as)->sfmmu_clrbin)
975 #define	as_color_start(as)	(astosfmmu(as)->sfmmu_clrstart)
976 
977 typedef struct {
978 	char	h8[HME8BLK_SZ];
979 } hblk8_t;
980 
981 typedef struct {
982 	char	h1[HME1BLK_SZ];
983 } hblk1_t;
984 
985 typedef struct {
986 	ulong_t  	index;
987 	ulong_t  	len;
988 	hblk8_t		*list;
989 } nucleus_hblk8_info_t;
990 
991 typedef struct {
992 	ulong_t		index;
993 	ulong_t		len;
994 	hblk1_t		*list;
995 } nucleus_hblk1_info_t;
996 
997 /*
998  * This struct is used for accumlating information about a range
999  * of pages that are unloading so that a single xcall can flush
1000  * the entire range from remote tlbs. A function that must demap
1001  * a range of virtual addresses declares one of these structures
1002  * and initializes using DEMP_RANGE_INIT(). It then passes a pointer to this
1003  * struct to the appropriate sfmmu_hblk_* level function which does
1004  * all the bookkeeping using the other macros. When the function has
1005  * finished the virtual address range, it needs to call DEMAP_RANGE_FLUSH()
1006  * macro to take care of any remaining unflushed mappings.
1007  *
1008  * The maximum range this struct can represent is the number of bits
1009  * in the dmr_bitvec field times the pagesize in dmr_pgsz. Currently, only
1010  * MMU_PAGESIZE pages are supported.
1011  *
1012  * Since there are now cases where it's no longer necessary to do
1013  * flushes (e.g. when the process isn't runnable because it's swapping
1014  * out or exiting) we allow these macros to take a NULL dmr input and do
1015  * nothing in that case.
1016  */
1017 typedef struct {
1018 	sfmmu_t		*dmr_sfmmup;	/* relevent hat */
1019 	caddr_t		dmr_addr;	/* beginning address */
1020 	caddr_t		dmr_endaddr;	/* ending  address */
1021 	ulong_t		dmr_bitvec;	/* valid pages found */
1022 	ulong_t		dmr_bit;	/* next page to examine */
1023 	ulong_t		dmr_maxbit;	/* highest page in range */
1024 	ulong_t		dmr_pgsz;	/* page size in range */
1025 } demap_range_t;
1026 
1027 #define	DMR_MAXBIT ((ulong_t)1<<63) /* dmr_bit high bit */
1028 
1029 #define	DEMAP_RANGE_INIT(sfmmup, dmrp) \
1030 	if ((dmrp) != NULL) { \
1031 	(dmrp)->dmr_sfmmup = (sfmmup); \
1032 	(dmrp)->dmr_bitvec = 0; \
1033 	(dmrp)->dmr_maxbit = sfmmu_dmr_maxbit; \
1034 	(dmrp)->dmr_pgsz = MMU_PAGESIZE; \
1035 	}
1036 
1037 #define	DEMAP_RANGE_PGSZ(dmrp) ((dmrp)? (dmrp)->dmr_pgsz : MMU_PAGESIZE)
1038 
1039 #define	DEMAP_RANGE_CONTINUE(dmrp, addr, endaddr) \
1040 	if ((dmrp) != NULL) { \
1041 	if ((dmrp)->dmr_bitvec != 0 && (dmrp)->dmr_endaddr != (addr)) \
1042 		sfmmu_tlb_range_demap(dmrp); \
1043 	(dmrp)->dmr_endaddr = (endaddr); \
1044 	}
1045 
1046 #define	DEMAP_RANGE_FLUSH(dmrp) \
1047 	if ((dmrp) != NULL) { \
1048 		if ((dmrp)->dmr_bitvec != 0) \
1049 			sfmmu_tlb_range_demap(dmrp); \
1050 	}
1051 
1052 #define	DEMAP_RANGE_MARKPG(dmrp, addr) \
1053 	if ((dmrp) != NULL) { \
1054 		if ((dmrp)->dmr_bitvec == 0) { \
1055 			(dmrp)->dmr_addr = (addr); \
1056 			(dmrp)->dmr_bit = 1; \
1057 		} \
1058 		(dmrp)->dmr_bitvec |= (dmrp)->dmr_bit; \
1059 	}
1060 
1061 #define	DEMAP_RANGE_NEXTPG(dmrp) \
1062 	if ((dmrp) != NULL && (dmrp)->dmr_bitvec != 0) { \
1063 		if ((dmrp)->dmr_bit & (dmrp)->dmr_maxbit) { \
1064 			sfmmu_tlb_range_demap(dmrp); \
1065 		} else { \
1066 			(dmrp)->dmr_bit <<= 1; \
1067 		} \
1068 	}
1069 
1070 /*
1071  * TSB related structures
1072  *
1073  * The TSB is made up of tte entries.  Both the tag and data are present
1074  * in the TSB.  The TSB locking is managed as follows:
1075  * A software bit in the tsb tag is used to indicate that entry is locked.
1076  * If a cpu servicing a tsb miss reads a locked entry the tag compare will
1077  * fail forcing the cpu to go to the hat hash for the translation.
1078  * The cpu who holds the lock can then modify the data side, and the tag side.
1079  * The last write should be to the word containing the lock bit which will
1080  * clear the lock and allow the tsb entry to be read.  It is assumed that all
1081  * cpus reading the tsb will do so with atomic 128-bit loads.  An atomic 128
1082  * bit load is required to prevent the following from happening:
1083  *
1084  * cpu 0			cpu 1			comments
1085  *
1086  * ldx tag						tag unlocked
1087  *				ldstub lock		set lock
1088  *				stx data
1089  *				stx tag			unlock
1090  * ldx tag						incorrect tte!!!
1091  *
1092  * The software also maintains a bit in the tag to indicate an invalid
1093  * tsb entry.  The purpose of this bit is to allow the tsb invalidate code
1094  * to invalidate a tsb entry with a single cas.  See code for details.
1095  */
1096 
1097 union tsb_tag {
1098 	struct {
1099 		uint32_t	tag_res0:16;	/* reserved - context area */
1100 		uint32_t	tag_inv:1;	/* sw - invalid tsb entry */
1101 		uint32_t	tag_lock:1;	/* sw - locked tsb entry */
1102 		uint32_t	tag_res1:4;	/* reserved */
1103 		uint32_t	tag_va_hi:10;	/* va[63:54] */
1104 		uint32_t	tag_va_lo;	/* va[53:22] */
1105 	} tagbits;
1106 	struct tsb_tagints {
1107 		uint32_t	inthi;
1108 		uint32_t	intlo;
1109 	} tagints;
1110 };
1111 #define	tag_invalid		tagbits.tag_inv
1112 #define	tag_locked		tagbits.tag_lock
1113 #define	tag_vahi		tagbits.tag_va_hi
1114 #define	tag_valo		tagbits.tag_va_lo
1115 #define	tag_inthi		tagints.inthi
1116 #define	tag_intlo		tagints.intlo
1117 
1118 struct tsbe {
1119 	union tsb_tag	tte_tag;
1120 	tte_t		tte_data;
1121 };
1122 
1123 /*
1124  * A per cpu struct is kept that duplicates some info
1125  * used by the tl>0 tsb miss handlers plus it provides
1126  * a scratch area.  Its purpose is to minimize cache misses
1127  * in the tsb miss handler and is 128 bytes (2 e$ lines).
1128  *
1129  * There should be one allocated per cpu in nucleus memory
1130  * and should be aligned on an ecache line boundary.
1131  */
1132 struct tsbmiss {
1133 	sfmmu_t			*ksfmmup;	/* kernel hat id */
1134 	sfmmu_t			*usfmmup;	/* user hat id */
1135 	struct tsbe		*tsbptr;	/* hardware computed ptr */
1136 	struct tsbe		*tsbptr4m;	/* hardware computed ptr */
1137 	uint64_t		ismblkpa;
1138 	struct hmehash_bucket	*khashstart;
1139 	struct hmehash_bucket	*uhashstart;
1140 	uint_t			khashsz;
1141 	uint_t			uhashsz;
1142 	uint16_t 		dcache_line_mask; /* used to flush dcache */
1143 	uint16_t		hat_flags;
1144 	uint32_t		itlb_misses;
1145 	uint32_t		dtlb_misses;
1146 	uint32_t		utsb_misses;
1147 	uint32_t		ktsb_misses;
1148 	uint16_t		uprot_traps;
1149 	uint16_t		kprot_traps;
1150 
1151 	/*
1152 	 * scratch[0] -> TSB_TAGACC
1153 	 * scratch[1] -> TSBMISS_HMEBP
1154 	 * scratch[2] -> TSBMISS_HATID
1155 	 */
1156 	uintptr_t		scratch[3];
1157 	uint8_t			pad[0x10];
1158 };
1159 
1160 /*
1161  * A per cpu struct is kept for the use within the tl>0 kpm tsb
1162  * miss handler. Some members are duplicates of common data or
1163  * the physical addresses of common data. A few members are also
1164  * written by the tl>0 kpm tsb miss handler. Its purpose is to
1165  * minimize cache misses in the kpm tsb miss handler and occupies
1166  * one ecache line. There should be one allocated per cpu in
1167  * nucleus memory and it should be aligned on an ecache line
1168  * boundary. It is not merged w/ struct tsbmiss since there is
1169  * not much to share and the tsbmiss pathes are different, so
1170  * a kpm tlbmiss/tsbmiss only touches one cacheline, except for
1171  * (DEBUG || SFMMU_STAT_GATHER) where the dtlb_misses counter
1172  * of struct tsbmiss is used on every dtlb miss.
1173  */
1174 struct kpmtsbm {
1175 	caddr_t		vbase;		/* start of address kpm range */
1176 	caddr_t		vend;		/* end of address kpm range */
1177 	uchar_t		flags;		/* flags needed in TL tsbmiss handler */
1178 	uchar_t		sz_shift;	/* for single kpm window */
1179 	uchar_t		kpmp_shift;	/* hash lock shift */
1180 	uchar_t		kpmp2pshft;	/* kpm page to page shift */
1181 	uint_t		kpmp_table_sz;	/* size of kpmp_table or kpmp_stable */
1182 	uint64_t	kpmp_tablepa;	/* paddr of kpmp_table or kpmp_stable */
1183 	uint64_t	msegphashpa;	/* paddr of memseg_phash */
1184 	struct tsbe	*tsbptr;	/* saved ktsb pointer */
1185 	uint_t		kpm_dtlb_misses; /* kpm tlbmiss counter */
1186 	uint_t		kpm_tsb_misses;	/* kpm tsbmiss counter */
1187 	uintptr_t	pad[1];
1188 };
1189 
1190 extern uint_t  tsb_slab_size;
1191 extern uint_t  tsb_slab_shift;
1192 extern uint_t  tsb_slab_ttesz;
1193 extern uint_t  tsb_slab_pamask;
1194 
1195 #endif /* !_ASM */
1196 
1197 /*
1198  * Flags for TL kpm tsbmiss handler
1199  */
1200 #define	KPMTSBM_ENABLE_FLAG	0x01	/* bit copy of kpm_enable */
1201 #define	KPMTSBM_TLTSBM_FLAG	0x02	/* use TL tsbmiss handler */
1202 #define	KPMTSBM_TSBPHYS_FLAG	0x04	/* use ASI_MEM for TSB update */
1203 
1204 /*
1205  * The TSB
1206  * All TSB sizes supported by the hardware are now supported (8K - 1M).
1207  * For kernel TSBs we may go beyond the hardware supported sizes and support
1208  * larger TSBs via software.
1209  * All TTE sizes are supported in the TSB; the manner in which this is
1210  * done is cpu dependent.
1211  */
1212 #define	TSB_MIN_SZCODE		TSB_8K_SZCODE	/* min. supported TSB size */
1213 #define	TSB_MIN_OFFSET_MASK	(TSB_OFFSET_MASK(TSB_MIN_SZCODE))
1214 
1215 #define	UTSB_MAX_SZCODE		TSB_1M_SZCODE /* max. supported TSB size */
1216 #define	UTSB_MAX_OFFSET_MASK	(TSB_OFFSET_MASK(UTSB_MAX_SZCODE))
1217 
1218 #define	TSB_FREEMEM_MIN		0x1000		/* 32 mb */
1219 #define	TSB_FREEMEM_LARGE	0x10000		/* 512 mb */
1220 #define	TSB_8K_SZCODE		0		/* 512 entries */
1221 #define	TSB_16K_SZCODE		1		/* 1k entries */
1222 #define	TSB_32K_SZCODE		2		/* 2k entries */
1223 #define	TSB_64K_SZCODE		3		/* 4k entries */
1224 #define	TSB_128K_SZCODE		4		/* 8k entries */
1225 #define	TSB_256K_SZCODE		5		/* 16k entries */
1226 #define	TSB_512K_SZCODE		6		/* 32k entries */
1227 #define	TSB_1M_SZCODE		7		/* 64k entries */
1228 #define	TSB_2M_SZCODE		8		/* 128k entries */
1229 #define	TSB_4M_SZCODE		9		/* 256k entries */
1230 #define	TSB_ENTRY_SHIFT		4	/* each entry = 128 bits = 16 bytes */
1231 #define	TSB_ENTRY_SIZE		(1 << 4)
1232 #define	TSB_START_SIZE		9
1233 #define	TSB_ENTRIES(tsbsz)	(1 << (TSB_START_SIZE + tsbsz))
1234 #define	TSB_BYTES(tsbsz)	(TSB_ENTRIES(tsbsz) << TSB_ENTRY_SHIFT)
1235 #define	TSB_OFFSET_MASK(tsbsz)	(TSB_ENTRIES(tsbsz) - 1)
1236 #define	TSB_BASEADDR_MASK	((1 << 12) - 1)
1237 
1238 /*
1239  * sun4u platforms
1240  * ---------------
1241  * We now support two user TSBs with one TSB base register.
1242  * Hence the TSB base register is split up as follows:
1243  *
1244  * When only one TSB present:
1245  *   [63  62..42  41..13  12..4  3..0]
1246  *     ^   ^       ^       ^     ^
1247  *     |   |       |       |     |
1248  *     |   |       |       |     |_ TSB size code
1249  *     |   |       |       |
1250  *     |   |       |       |_ Reserved 0
1251  *     |   |       |
1252  *     |   |       |_ TSB VA[41..13]
1253  *     |   |
1254  *     |   |_ VA hole (Spitfire), zeros (Cheetah and beyond)
1255  *     |
1256  *     |_ 0
1257  *
1258  * When second TSB present:
1259  *   [63  62..42  41..33  32..29  28..22  21..13  12..4  3..0]
1260  *     ^   ^       ^       ^       ^       ^       ^     ^
1261  *     |   |       |       |       |       |       |     |
1262  *     |   |       |       |       |       |       |     |_ First TSB size code
1263  *     |   |       |       |       |       |       |
1264  *     |   |       |       |       |       |       |_ Reserved 0
1265  *     |   |       |       |       |       |
1266  *     |   |       |       |       |       |_ First TSB's VA[21..13]
1267  *     |   |       |       |       |
1268  *     |   |       |       |       |_ Reserved for future use
1269  *     |   |       |       |
1270  *     |   |       |       |_ Second TSB's size code
1271  *     |   |       |
1272  *     |   |       |_ Second TSB's VA[21..13]
1273  *     |   |
1274  *     |   |_ VA hole (Spitfire) / ones (Cheetah and beyond)
1275  *     |
1276  *     |_ 1
1277  *
1278  * Note that since we store 21..13 of each TSB's VA, TSBs and their slabs
1279  * may be up to 4M in size.  For now, only hardware supported TSB sizes
1280  * are supported, though the slabs are usually 4M in size.
1281  *
1282  * sun4u platforms that define UTSB_PHYS use physical addressing to access
1283  * the user TSBs at TL>0.  The first user TSB base is in the MMU I/D TSB Base
1284  * registers.  The second TSB base uses a dedicated scratchpad register which
1285  * requires a definition of SCRATCHPAD_UTSBREG in mach_sfmmu.h.  The layout for
1286  * both registers is equivalent to sun4v below, except the TSB PA range is
1287  * [46..13] for sun4u.
1288  *
1289  * sun4v platforms
1290  * ---------------
1291  * On sun4v platforms, we use two dedicated scratchpad registers as pseudo
1292  * hardware TSB base registers to hold up to two different user TSBs.
1293  *
1294  * Each register contains TSB's physical base and size code information
1295  * as follows:
1296  *
1297  *   [63..56  55..13  12..4  3..0]
1298  *      ^       ^       ^     ^
1299  *      |       |       |     |
1300  *      |       |       |     |_ TSB size code
1301  *      |       |       |
1302  *      |       |       |_ Reserved 0
1303  *      |       |
1304  *      |       |_ TSB PA[55..13]
1305  *      |
1306  *      |
1307  *      |
1308  *      |_ 0 for valid TSB
1309  *
1310  * Absence of a user TSB (primarily the second user TSB) is indicated by
1311  * storing a negative value in the TSB base register. This allows us to
1312  * check for presence of a user TSB by simply checking bit# 63.
1313  */
1314 #define	TSBREG_MSB_SHIFT	32		/* set upper bits */
1315 #define	TSBREG_MSB_CONST	0xfffff800	/* set bits 63..43 */
1316 #define	TSBREG_FIRTSB_SHIFT	42		/* to clear bits 63:22 */
1317 #define	TSBREG_SECTSB_MKSHIFT	20		/* 21:13 --> 41:33 */
1318 #define	TSBREG_SECTSB_LSHIFT	22		/* to clear bits 63:42 */
1319 #define	TSBREG_SECTSB_RSHIFT	(TSBREG_SECTSB_MKSHIFT + TSBREG_SECTSB_LSHIFT)
1320 						/* sectsb va -> bits 21:13 */
1321 						/* after clearing upper bits */
1322 #define	TSBREG_SECSZ_SHIFT	29		/* to get sectsb szc to 3:0 */
1323 #define	TSBREG_VAMASK_SHIFT	13		/* set up VA mask */
1324 
1325 #define	BIGKTSB_SZ_MASK		0xf
1326 #define	TSB_SOFTSZ_MASK		BIGKTSB_SZ_MASK
1327 #define	MIN_BIGKTSB_SZCODE	9	/* 256k entries */
1328 #define	MAX_BIGKTSB_SZCODE	11	/* 1024k entries */
1329 #define	MAX_BIGKTSB_TTES	(TSB_BYTES(MAX_BIGKTSB_SZCODE) / MMU_PAGESIZE4M)
1330 
1331 #define	TAG_VALO_SHIFT		22		/* tag's va are bits 63-22 */
1332 /*
1333  * sw bits used on tsb_tag - bit masks used only in assembly
1334  * use only a sethi for these fields.
1335  */
1336 #define	TSBTAG_INVALID	0x00008000		/* tsb_tag.tag_invalid */
1337 #define	TSBTAG_LOCKED	0x00004000		/* tsb_tag.tag_locked */
1338 
1339 #ifdef	_ASM
1340 
1341 /*
1342  * Marker to indicate that this instruction will be hot patched at runtime
1343  * to some other value.
1344  * This value must be zero since it fills in the imm bits of the target
1345  * instructions to be patched
1346  */
1347 #define	RUNTIME_PATCH	(0)
1348 
1349 /*
1350  * V9 defines nop instruction as the following, which we use
1351  * at runtime to nullify some instructions we don't want to
1352  * execute in the trap handlers on certain platforms.
1353  */
1354 #define	MAKE_NOP_INSTR(reg)	\
1355 	sethi	%hi(0x1000000), reg
1356 
1357 /*
1358  * Macro to get this CPU's tsbmiss area.
1359  */
1360 #define	CPU_TSBMISS_AREA(tsbmiss, tmp1)					\
1361 	CPU_INDEX(tmp1, tsbmiss);		/* tmp1 = cpu idx */	\
1362 	sethi	%hi(tsbmiss_area), tsbmiss;	/* tsbmiss base ptr */	\
1363 	sllx    tmp1, TSBMISS_SHIFT, tmp1;	/* byte offset */	\
1364 	or	tsbmiss, %lo(tsbmiss_area), tsbmiss;			\
1365 	add	tsbmiss, tmp1, tsbmiss		/* tsbmiss area of CPU */
1366 
1367 
1368 /*
1369  * Macro to set kernel context + page size codes in DMMU primary context
1370  * register. It is only necessary for sun4u because sun4v does not need
1371  * page size codes
1372  */
1373 #ifdef sun4v
1374 
1375 #define	SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3)
1376 
1377 #else
1378 
1379 #define	SET_KCONTEXTREG(reg0, reg1, reg2, reg3, reg4, label1, label2, label3) \
1380 	sethi	%hi(kcontextreg), reg0;					\
1381 	ldx	[reg0 + %lo(kcontextreg)], reg0;			\
1382 	mov	MMU_PCONTEXT, reg1;					\
1383 	ldxa	[reg1]ASI_MMU_CTX, reg2;				\
1384 	xor	reg0, reg2, reg2;					\
1385 	brz	reg2, label3;						\
1386 	srlx	reg2, CTXREG_NEXT_SHIFT, reg2;				\
1387 	rdpr	%pstate, reg3;		/* disable interrupts */	\
1388 	btst	PSTATE_IE, reg3;					\
1389 /*CSTYLED*/								\
1390 	bnz,a,pt %icc, label1;						\
1391 	wrpr	reg3, PSTATE_IE, %pstate;				\
1392 /*CSTYLED*/								\
1393 label1:;								\
1394 	brz	reg2, label2;	   /* need demap if N_pgsz0/1 change */	\
1395 	sethi	%hi(FLUSH_ADDR), reg4;					\
1396 	mov	DEMAP_ALL_TYPE, reg2;					\
1397 	stxa	%g0, [reg2]ASI_DTLB_DEMAP;				\
1398 	stxa	%g0, [reg2]ASI_ITLB_DEMAP;				\
1399 /*CSTYLED*/								\
1400 label2:;								\
1401 	stxa	reg0, [reg1]ASI_MMU_CTX;				\
1402 	flush	reg4;							\
1403 	btst	PSTATE_IE, reg3;					\
1404 /*CSTYLED*/								\
1405 	bnz,a,pt %icc, label3;						\
1406 	wrpr	%g0, reg3, %pstate;	/* restore interrupt state */	\
1407 label3:;
1408 
1409 #endif
1410 
1411 #endif	/* _ASM */
1412 
1413 #ifndef _ASM
1414 
1415 /*
1416  * Page coloring
1417  * The p_vcolor field of the page struct (1 byte) is used to store the
1418  * virtual page color.  This provides for 255 colors.  The value zero is
1419  * used to mean the page has no color - never been mapped or somehow
1420  * purified.
1421  */
1422 
1423 #define	PP_GET_VCOLOR(pp)	(((pp)->p_vcolor) - 1)
1424 #define	PP_NEWPAGE(pp)		(!(pp)->p_vcolor)
1425 #define	PP_SET_VCOLOR(pp, color)                                          \
1426 	((pp)->p_vcolor = ((color) + 1))
1427 
1428 /*
1429  * As mentioned p_vcolor == 0 means there is no color for this page.
1430  * But PP_SET_VCOLOR(pp, color) expects 'color' to be real color minus
1431  * one so we define this constant.
1432  */
1433 #define	NO_VCOLOR	(-1)
1434 
1435 #define	addr_to_vcolor(addr) \
1436 	(((uint_t)(uintptr_t)(addr) >> MMU_PAGESHIFT) & vac_colors_mask)
1437 
1438 /*
1439  * The field p_index in the psm page structure is for large pages support.
1440  * P_index is a bit-vector of the different mapping sizes that a given page
1441  * is part of. An hme structure for a large mapping is only added in the
1442  * group leader page (first page). All pages covered by a given large mapping
1443  * have the corrosponding mapping bit set in their p_index field. This allows
1444  * us to only store an explicit hme structure in the leading page which
1445  * simplifies the mapping link list management. Furthermore, it provides us
1446  * a fast mechanism for determining the largest mapping a page is part of. For
1447  * exmaple, a page with a 64K and a 4M mappings has a p_index value of 0x0A.
1448  *
1449  * Implementation note: even though the first bit in p_index is reserved
1450  * for 8K mappings, it is NOT USED by the code and SHOULD NOT be set.
1451  * In addition, the upper four bits of the p_index field are used by the
1452  * code as temporaries
1453  */
1454 
1455 /*
1456  * Defines for psm page struct fields and large page support
1457  */
1458 #define	SFMMU_INDEX_SHIFT		6
1459 #define	SFMMU_INDEX_MASK		((1 << SFMMU_INDEX_SHIFT) - 1)
1460 
1461 /* Return the mapping index */
1462 #define	PP_MAPINDEX(pp)	((pp)->p_index & SFMMU_INDEX_MASK)
1463 
1464 /*
1465  * These macros rely on the following property:
1466  * All pages constituting a large page are covered by a virtually
1467  * contiguous set of page_t's.
1468  */
1469 
1470 /* Return the leader for this mapping size */
1471 #define	PP_GROUPLEADER(pp, sz) \
1472 	(&(pp)[-(int)(pp->p_pagenum & (TTEPAGES(sz)-1))])
1473 
1474 /* Return the root page for this page based on p_szc */
1475 #define	PP_PAGEROOT(pp)	((pp)->p_szc == 0 ? (pp) : \
1476 	PP_GROUPLEADER((pp), (pp)->p_szc))
1477 
1478 #define	PP_PAGENEXT_N(pp, n)	((pp) + (n))
1479 #define	PP_PAGENEXT(pp)		PP_PAGENEXT_N((pp), 1)
1480 
1481 #define	PP_PAGEPREV_N(pp, n)	((pp) - (n))
1482 #define	PP_PAGEPREV(pp)		PP_PAGEPREV_N((pp), 1)
1483 
1484 #define	PP_ISMAPPED_LARGE(pp)	(PP_MAPINDEX(pp) != 0)
1485 
1486 /* Need function to test the page mappping which takes p_index into account */
1487 #define	PP_ISMAPPED(pp)	((pp)->p_mapping || PP_ISMAPPED_LARGE(pp))
1488 
1489 /*
1490  * Don't call this macro with sz equal to zero. 8K mappings SHOULD NOT
1491  * set p_index field.
1492  */
1493 #define	PAGESZ_TO_INDEX(sz)	(1 << (sz))
1494 
1495 
1496 /*
1497  * prototypes for hat assembly routines.  Some of these are
1498  * known to machine dependent VM code.
1499  */
1500 extern uint64_t sfmmu_make_tsbtag(caddr_t);
1501 extern struct tsbe *
1502 		sfmmu_get_tsbe(uint64_t, caddr_t, int, int);
1503 extern void	sfmmu_load_tsbe(struct tsbe *, uint64_t, tte_t *, int);
1504 extern void	sfmmu_unload_tsbe(struct tsbe *, uint64_t, int);
1505 extern void	sfmmu_load_mmustate(sfmmu_t *);
1506 extern void	sfmmu_ctx_steal_tl1(uint64_t, uint64_t);
1507 extern void	sfmmu_raise_tsb_exception(uint64_t, uint64_t);
1508 #ifndef sun4v
1509 extern void	sfmmu_itlb_ld(caddr_t, int, tte_t *);
1510 extern void	sfmmu_dtlb_ld(caddr_t, int, tte_t *);
1511 #endif /* sun4v */
1512 extern void	sfmmu_copytte(tte_t *, tte_t *);
1513 extern int	sfmmu_modifytte(tte_t *, tte_t *, tte_t *);
1514 extern int	sfmmu_modifytte_try(tte_t *, tte_t *, tte_t *);
1515 extern pfn_t	sfmmu_ttetopfn(tte_t *, caddr_t);
1516 extern void	sfmmu_hblk_hash_rm(struct hmehash_bucket *,
1517 			struct hme_blk *, uint64_t, struct hme_blk *);
1518 extern void	sfmmu_hblk_hash_add(struct hmehash_bucket *, struct hme_blk *,
1519 			uint64_t);
1520 
1521 /*
1522  * functions exported to machine dependent VM code
1523  */
1524 extern void	sfmmu_patch_ktsb(void);
1525 #ifndef UTSB_PHYS
1526 extern void	sfmmu_patch_utsb(void);
1527 #endif /* UTSB_PHYS */
1528 extern pfn_t	sfmmu_vatopfn(caddr_t, sfmmu_t *, tte_t *);
1529 extern void	sfmmu_vatopfn_suspended(caddr_t, sfmmu_t *, tte_t *);
1530 #ifdef	DEBUG
1531 extern void	sfmmu_check_kpfn(pfn_t);
1532 #else
1533 #define		sfmmu_check_kpfn(pfn)	/* disabled */
1534 #endif	/* DEBUG */
1535 extern void	sfmmu_memtte(tte_t *, pfn_t, uint_t, int);
1536 extern void	sfmmu_tteload(struct hat *, tte_t *, caddr_t, page_t *,	uint_t);
1537 extern void	sfmmu_tsbmiss_exception(struct regs *, uintptr_t, uint_t);
1538 extern void	sfmmu_init_tsbs(void);
1539 extern caddr_t  sfmmu_ktsb_alloc(caddr_t);
1540 extern int	sfmmu_getctx_pri(void);
1541 extern int	sfmmu_getctx_sec(void);
1542 extern void	sfmmu_setctx_sec(int);
1543 extern void	sfmmu_inv_tsb(caddr_t, uint_t);
1544 extern void	sfmmu_init_ktsbinfo(void);
1545 extern int	sfmmu_setup_4lp(void);
1546 extern void	sfmmu_patch_mmu_asi(int);
1547 extern void	sfmmu_init_nucleus_hblks(caddr_t, size_t, int, int);
1548 extern void	sfmmu_cache_flushall(void);
1549 extern pgcnt_t  sfmmu_tte_cnt(sfmmu_t *, uint_t);
1550 extern void	*sfmmu_tsb_segkmem_alloc(vmem_t *, size_t, int);
1551 extern void	sfmmu_tsb_segkmem_free(vmem_t *, void *, size_t);
1552 extern void	sfmmu_steal_context(sfmmu_t *, uint8_t *);
1553 
1554 extern void	hat_kern_setup(void);
1555 extern int	hat_page_relocate(page_t **, page_t **, spgcnt_t *);
1556 extern uint_t	hat_preferred_pgsz(struct hat *, caddr_t, size_t, int);
1557 extern int	sfmmu_get_ppvcolor(struct page *);
1558 extern int	sfmmu_get_addrvcolor(caddr_t);
1559 extern int	sfmmu_hat_lock_held(sfmmu_t *);
1560 
1561 /*
1562  * Functions exported to xhat_sfmmu.c
1563  */
1564 extern kmutex_t *sfmmu_mlist_enter(page_t *);
1565 extern void	sfmmu_mlist_exit(kmutex_t *);
1566 extern int	sfmmu_mlist_held(struct page *);
1567 extern struct hme_blk *sfmmu_hmetohblk(struct sf_hment *);
1568 
1569 /*
1570  * MMU-specific functions optionally imported from the CPU module
1571  */
1572 #pragma weak mmu_large_pages_disabled
1573 #pragma weak mmu_set_ctx_page_sizes
1574 #pragma weak mmu_preferred_pgsz
1575 #pragma weak mmu_check_page_sizes
1576 
1577 extern int mmu_large_pages_disabled(uint_t);
1578 extern void mmu_set_ctx_page_sizes(sfmmu_t *);
1579 extern uint_t mmu_preferred_pgsz(sfmmu_t *, caddr_t, size_t);
1580 extern void mmu_check_page_sizes(sfmmu_t *, uint64_t *);
1581 
1582 extern sfmmu_t 		*ksfmmup;
1583 extern struct ctx	*ctxs;
1584 extern uint_t		nctxs;
1585 extern caddr_t		ktsb_base;
1586 extern uint64_t		ktsb_pbase;
1587 extern int		ktsb_sz;
1588 extern int		ktsb_szcode;
1589 extern caddr_t		ktsb4m_base;
1590 extern uint64_t		ktsb4m_pbase;
1591 extern int		ktsb4m_sz;
1592 extern int		ktsb4m_szcode;
1593 extern uint64_t		kpm_tsbbase;
1594 extern int		kpm_tsbsz;
1595 extern int		ktsb_phys;
1596 extern int		enable_bigktsb;
1597 #ifndef sun4v
1598 extern int		utsb_dtlb_ttenum;
1599 extern int		utsb4m_dtlb_ttenum;
1600 #endif /* sun4v */
1601 extern int		uhmehash_num;
1602 extern int		khmehash_num;
1603 extern struct hmehash_bucket *uhme_hash;
1604 extern struct hmehash_bucket *khme_hash;
1605 extern kmutex_t		*mml_table;
1606 extern uint_t		mml_table_sz;
1607 extern uint_t		mml_shift;
1608 extern uint_t		hblk_alloc_dynamic;
1609 extern struct tsbmiss	tsbmiss_area[NCPU];
1610 extern struct kpmtsbm	kpmtsbm_area[NCPU];
1611 extern int		tsb_max_growsize;
1612 #ifndef sun4v
1613 extern int		dtlb_resv_ttenum;
1614 extern caddr_t		utsb_vabase;
1615 extern caddr_t		utsb4m_vabase;
1616 #endif /* sun4v */
1617 extern vmem_t		*kmem_tsb_default_arena[];
1618 extern int		tsb_lgrp_affinity;
1619 
1620 /* kpm externals */
1621 extern pfn_t		sfmmu_kpm_vatopfn(caddr_t);
1622 extern void		sfmmu_kpm_patch_tlbm(void);
1623 extern void		sfmmu_kpm_patch_tsbm(void);
1624 extern void		sfmmu_kpm_load_tsb(caddr_t, tte_t *, int);
1625 extern void		sfmmu_kpm_unload_tsb(caddr_t, int);
1626 extern void		sfmmu_kpm_tsbmtl(short *, uint_t *, int);
1627 extern int		sfmmu_kpm_stsbmtl(char *, uint_t *, int);
1628 extern caddr_t		kpm_vbase;
1629 extern size_t		kpm_size;
1630 extern struct memseg	*memseg_hash[];
1631 extern uint64_t		memseg_phash[];
1632 extern kpm_hlk_t	*kpmp_table;
1633 extern kpm_shlk_t	*kpmp_stable;
1634 extern uint_t		kpmp_table_sz;
1635 extern uint_t		kpmp_stable_sz;
1636 extern uchar_t		kpmp_shift;
1637 
1638 #define	PP_ISMAPPED_KPM(pp)	((pp)->p_kpmref > 0)
1639 
1640 #define	IS_KPM_ALIAS_RANGE(vaddr)					\
1641 	(((vaddr) - kpm_vbase) >> (uintptr_t)kpm_size_shift > 0)
1642 
1643 #endif /* !_ASM */
1644 
1645 /* sfmmu_kpm_tsbmtl flags */
1646 #define	KPMTSBM_STOP		0
1647 #define	KPMTSBM_START		1
1648 
1649 /* kpm_smallpages kp_mapped values */
1650 #define	KPM_MAPPEDS		-1	/* small mapping valid, no conflict */
1651 #define	KPM_MAPPEDSC		1	/* small mapping valid, conflict */
1652 
1653 /* Physical memseg address NULL marker */
1654 #define	MSEG_NULLPTR_PA		-1
1655 
1656 /*
1657  * Memseg hash defines for kpm trap level tsbmiss handler.
1658  * Must be in sync w/ page.h .
1659  */
1660 #define	SFMMU_MEM_HASH_SHIFT		0x9
1661 #define	SFMMU_N_MEM_SLOTS		0x200
1662 #define	SFMMU_MEM_HASH_ENTRY_SHIFT	3
1663 
1664 #ifndef	_ASM
1665 #if (SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT)
1666 #error SFMMU_MEM_HASH_SHIFT != MEM_HASH_SHIFT
1667 #endif
1668 #if (SFMMU_N_MEM_SLOTS != N_MEM_SLOTS)
1669 #error SFMMU_N_MEM_SLOTS != N_MEM_SLOTS
1670 #endif
1671 
1672 /* Physical memseg address NULL marker */
1673 #define	SFMMU_MEMSEG_NULLPTR_PA		-1
1674 
1675 /*
1676  * Check KCONTEXT to be zero, asm parts depend on that assumption.
1677  */
1678 #if (KCONTEXT != 0)
1679 #error KCONTEXT != 0
1680 #endif
1681 #endif	/* !_ASM */
1682 
1683 
1684 #endif /* _KERNEL */
1685 
1686 #ifndef _ASM
1687 /*
1688  * ctx, hmeblk, mlistlock and other stats for sfmmu
1689  */
1690 struct sfmmu_global_stat {
1691 	int		sf_tsb_exceptions;	/* # of tsb exceptions */
1692 	int		sf_tsb_raise_exception;	/* # tsb exc. w/o TLB flush */
1693 
1694 	int		sf_pagefaults;		/* # of pagefaults */
1695 
1696 	int		sf_uhash_searches;	/* # of user hash searches */
1697 	int		sf_uhash_links;		/* # of user hash links */
1698 	int		sf_khash_searches;	/* # of kernel hash searches */
1699 	int		sf_khash_links;		/* # of kernel hash links */
1700 
1701 	int		sf_swapout;		/* # times hat swapped out */
1702 
1703 	int		sf_ctxfree;		/* ctx alloc from free list */
1704 	int		sf_ctxdirty;		/* ctx alloc from dirty list */
1705 	int		sf_ctxsteal;		/* ctx allocated by steal */
1706 
1707 	int		sf_tsb_alloc;		/* # TSB allocations */
1708 	int		sf_tsb_allocfail;	/* # times TSB alloc fail */
1709 	int		sf_tsb_sectsb_create;	/* # times second TSB added */
1710 
1711 	int		sf_tteload8k;		/* calls to sfmmu_tteload */
1712 	int		sf_tteload64k;		/* calls to sfmmu_tteload */
1713 	int		sf_tteload512k;		/* calls to sfmmu_tteload */
1714 	int		sf_tteload4m;		/* calls to sfmmu_tteload */
1715 	int		sf_tteload32m;		/* calls to sfmmu_tteload */
1716 	int		sf_tteload256m;		/* calls to sfmmu_tteload */
1717 
1718 	int		sf_tsb_load8k;		/* # times loaded 8K tsbent */
1719 	int		sf_tsb_load4m;		/* # times loaded 4M tsbent */
1720 
1721 	int		sf_hblk_hit;		/* found hblk during tteload */
1722 	int		sf_hblk8_ncreate;	/* static hblk8's created */
1723 	int		sf_hblk8_nalloc;	/* static hblk8's allocated */
1724 	int		sf_hblk1_ncreate;	/* static hblk1's created */
1725 	int		sf_hblk1_nalloc;	/* static hblk1's allocated */
1726 	int		sf_hblk_slab_cnt;	/* sfmmu8_cache slab creates */
1727 	int		sf_hblk_reserve_cnt;	/* hblk_reserve usage */
1728 	int		sf_hblk_recurse_cnt;	/* hblk_reserve	owner reqs */
1729 	int		sf_hblk_reserve_hit;	/* hblk_reserve hash hits */
1730 	int		sf_get_free_success;	/* reserve list allocs */
1731 	int		sf_get_free_throttle;	/* fails due to throttling */
1732 	int		sf_get_free_fail;	/* fails due to empty list */
1733 	int		sf_put_free_success;	/* reserve list frees */
1734 	int		sf_put_free_fail;	/* fails due to full list */
1735 
1736 	int		sf_pgcolor_conflict;	/* VAC conflict resolution */
1737 	int		sf_uncache_conflict;	/* VAC conflict resolution */
1738 	int		sf_unload_conflict;	/* VAC unload resolution */
1739 	int		sf_ism_uncache;		/* VAC conflict resolution */
1740 	int		sf_ism_recache;		/* VAC conflict resolution */
1741 	int		sf_recache;		/* VAC conflict resolution */
1742 
1743 	int		sf_steal_count;		/* # of hblks stolen */
1744 
1745 	int		sf_pagesync;		/* # of pagesyncs */
1746 	int		sf_clrwrt;		/* # of clear write perms */
1747 	int		sf_pagesync_invalid;	/* pagesync with inv tte */
1748 
1749 	int		sf_kernel_xcalls;	/* # of kernel cross calls */
1750 	int		sf_user_xcalls;		/* # of user cross calls */
1751 
1752 	int		sf_tsb_grow;		/* # of user tsb grows */
1753 	int		sf_tsb_shrink;		/* # of user tsb shrinks */
1754 	int		sf_tsb_resize_failures;	/* # of user tsb resize */
1755 	int		sf_tsb_reloc;		/* # of user tsb relocations */
1756 
1757 	int		sf_user_vtop;		/* # of user vatopfn calls */
1758 
1759 	int		sf_ctx_swap;		/* # times switched MMU ctxs */
1760 	int		sf_tlbflush_all;	/* # times flush all TLBs */
1761 	int		sf_tlbflush_ctx;	/* # times flush TLB ctx */
1762 	int		sf_tlbflush_deferred;	/* # times !flush ctx imm. */
1763 
1764 	int		sf_tlb_reprog_pgsz;	/* # times switch TLB pgsz */
1765 };
1766 
1767 struct sfmmu_tsbsize_stat {
1768 	int		sf_tsbsz_8k;
1769 	int		sf_tsbsz_16k;
1770 	int		sf_tsbsz_32k;
1771 	int		sf_tsbsz_64k;
1772 	int		sf_tsbsz_128k;
1773 	int		sf_tsbsz_256k;
1774 	int		sf_tsbsz_512k;
1775 	int		sf_tsbsz_1m;
1776 	int		sf_tsbsz_2m;
1777 	int		sf_tsbsz_4m;
1778 };
1779 
1780 struct sfmmu_percpu_stat {
1781 	int	sf_itlb_misses;		/* # of itlb misses */
1782 	int	sf_dtlb_misses;		/* # of dtlb misses */
1783 	int	sf_utsb_misses;		/* # of user tsb misses */
1784 	int	sf_ktsb_misses;		/* # of kernel tsb misses */
1785 	int	sf_tsb_hits;		/* # of tsb hits */
1786 	int	sf_umod_faults;		/* # of mod (prot viol) flts */
1787 	int	sf_kmod_faults;		/* # of mod (prot viol) flts */
1788 };
1789 
1790 #define	SFMMU_STAT(stat)		sfmmu_global_stat.stat++;
1791 #define	SFMMU_STAT_ADD(stat, amount)	sfmmu_global_stat.stat += amount;
1792 #define	SFMMU_STAT_SET(stat, count)	sfmmu_global_stat.stat = count;
1793 
1794 #endif /* !_ASM */
1795 
1796 #ifdef	__cplusplus
1797 }
1798 #endif
1799 
1800 #endif	/* _VM_HAT_SFMMU_H */
1801