xref: /titanic_44/usr/src/uts/sun4u/vm/zulu_hat.c (revision 9dd0f810214fdc8e1af881a9a5c4b6927629ff9e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 #include <sys/types.h>
30 #include <sys/cmn_err.h>
31 #include <sys/mman.h>
32 #include <sys/sunddi.h>
33 #include <sys/tnf_probe.h>
34 #include <vm/hat_sfmmu.h>
35 #include <vm/as.h>
36 #include <vm/xhat.h>
37 #include <vm/xhat_sfmmu.h>
38 #include <sys/zulu_hat.h>
39 #include <sys/zulumod.h>
40 
41 /*
42  * This file contains the implementation of zulu_hat: an XHAT provider
43  * to support the MMU for the XVR-4000 graphics accelerator (code name zulu).
44  *
45  * The zulu hat is linked into the kernel misc module zuluvm.
46  * zuluvm provides services that the zulu device driver module requires
47  * that are not part of the standard ddi. See PSARC 2002/231.
48  *
49  * The zulu driver is delivered by the graphics consolidation.
50  * zuluvm is in ON workspace.
51  *
52  * There are two types of interfaces provided by zulu_hat
53  *   1.	The set of functions and data structures used by zuluvm to obtain
54  * 	tte entries for the zulu MMU and to manage the association between
55  *	user process's address spaces and zulu graphics contexts.
56  *
57  *   2.	The entry points required for an XHAT provider: zulu_hat_ops
58  */
59 
60 /*
61  * zulu_ctx_tab contains an array of pointers to the zulu_hats.
62  *
63  * During zulu graphics context switch, the zulu MMU's current context register
64  * is set to the index of the process's zulu hat's location in the array
65  * zulu_ctx_tab.
66  *
67  * This allows the TL=1 TLB miss handler to quickly find the zulu hat and
68  * lookup a tte in the zulu hat's TSB.
69  *
70  * To synchronize with the trap handler we use bit zero of
71  * the pointer as a lock bit. See the function zulu_ctx_tsb_lock_enter().
72  *
73  * If the trap handler finds the ctx locked it doesn't wait, it
74  * posts a soft interrupt which is handled at TL=0.
75  */
76 
77 #define		ZULU_HAT_MAX_CTX 32
78 struct zulu_hat *zulu_ctx_tab[ZULU_HAT_MAX_CTX];
79 
80 /*
81  * To avoid searching through the whole zulu_ctx_tab for a free slot,
82  * we maintain the value of zulu_ctx_search_start.
83  *
84  * This value is a guess as to where a free slot in the context table might be.
85  * All slots < zulu_ctx_search_start are definitely occupied.
86  */
87 static int zulu_ctx_search_start = 0;
88 
89 
90 /*
91  * this mutex protects the zulu_ctx_tab and zulu_ctx_search_start
92  */
93 static kmutex_t	zulu_ctx_lock;
94 
95 
96 uint64_t	zulu_tsb_hit = 0;	/* assembly code increments this */
97 static uint64_t	zulu_tsb_miss = 0;
98 static uint64_t	zulu_as_fault = 0;
99 
100 /*
101  * The zulu device has two zulu data mmus.
102  * We use the base pagesize for one of them and the and 4M for the other.
103  */
104 extern int zuluvm_base_pgsize;
105 
106 
107 
108 /*
109  * call zuluvm to remove translations for a page
110  */
111 static void
112 zulu_hat_demap_page(struct zulu_hat *zhat, caddr_t vaddr, int size)
113 {
114 	if (zhat->zulu_ctx < 0) {
115 		/* context has been stolen, so page is already demapped */
116 		return;
117 	}
118 	zuluvm_demap_page(zhat->zdev, NULL, zhat->zulu_ctx, vaddr, size);
119 }
120 
121 static void
122 zulu_hat_demap_ctx(void *zdev, int zulu_ctx)
123 {
124 	if (zulu_ctx < 0) {
125 		/* context has been stolen */
126 		return;
127 	}
128 	zuluvm_demap_ctx(zdev, zulu_ctx);
129 }
130 
131 
132 /*
133  * steal the least recently used context slot.
134  */
135 static int
136 zulu_hat_steal_ctx()
137 {
138 	int		ctx;
139 	hrtime_t	delta = INT64_MAX;
140 	struct zulu_hat *zhat_oldest = NULL;
141 
142 	ASSERT(mutex_owned(&zulu_ctx_lock));
143 
144 	for (ctx = 0; ctx < ZULU_HAT_MAX_CTX; ctx++) {
145 		struct zulu_hat *zhat = ZULU_CTX_GET_HAT(ctx);
146 
147 		/*
148 		 * we shouldn't be here unless all slots are occupied
149 		 */
150 		ASSERT(zhat != NULL);
151 
152 		TNF_PROBE_3(steal_ctx_loop, "zulu_hat", /* CSTYLED */,
153 			tnf_int, ctx, ctx,
154 			tnf_long, last_used, zhat->last_used,
155 			tnf_long, oldest, delta);
156 
157 		if (zhat->last_used <  delta) {
158 			zhat_oldest = zhat;
159 			delta  = zhat->last_used;
160 		}
161 	}
162 
163 	ASSERT(zhat_oldest != NULL);
164 
165 	mutex_enter(&zhat_oldest->lock);
166 
167 	/* Nobody should have the tsb lock bit set here */
168 	ASSERT(((uint64_t)zulu_ctx_tab[zhat_oldest->zulu_ctx] & ZULU_CTX_LOCK)
169 		== 0);
170 
171 	ctx = zhat_oldest->zulu_ctx;
172 	zhat_oldest->zulu_ctx = -1;
173 
174 	ZULU_CTX_SET_HAT(ctx, NULL);
175 
176 	zulu_hat_demap_ctx(zhat_oldest->zdev, ctx);
177 
178 	mutex_exit(&zhat_oldest->lock);
179 
180 	TNF_PROBE_1(zulu_hat_steal_ctx, "zulu_hat", /* CSTYLED */,
181 		tnf_int, ctx, ctx);
182 
183 	return (ctx);
184 }
185 
186 /*
187  * find a slot in the context table for a zulu_hat
188  */
189 static void
190 zulu_hat_ctx_alloc(struct zulu_hat *zhat)
191 {
192 	int 		ctx;
193 
194 	mutex_enter(&zulu_ctx_lock);
195 
196 	for (ctx = zulu_ctx_search_start; ctx < ZULU_HAT_MAX_CTX; ctx++) {
197 		if (ZULU_CTX_IS_FREE(ctx)) {
198 			zulu_ctx_search_start = ctx + 1;
199 			break;
200 		}
201 	}
202 
203 	if (ctx == ZULU_HAT_MAX_CTX) {
204 		/* table is full need to steal an entry */
205 		zulu_ctx_search_start = ZULU_HAT_MAX_CTX;
206 		ctx = zulu_hat_steal_ctx();
207 	}
208 
209 	mutex_enter(&zhat->lock);
210 
211 	ZULU_CTX_SET_HAT(ctx, zhat);
212 	zhat->zulu_ctx = ctx;
213 
214 	mutex_exit(&zhat->lock);
215 
216 	mutex_exit(&zulu_ctx_lock);
217 
218 	TNF_PROBE_2(zulu_hat_ctx_alloc, "zulu_hat", /* CSTYLED */,
219 		tnf_opaque, zhat, zhat, tnf_int, ctx, ctx);
220 }
221 
222 /*
223  * zulu_hat_validate_ctx: Called before the graphics context associated
224  * with a given zulu hat becomes the current zulu graphics context.
225  * Make sure that the hat has a slot in zulu_ctx_tab.
226  */
227 void
228 zulu_hat_validate_ctx(struct zulu_hat *zhat)
229 {
230 	if (zhat->zulu_ctx < 0)  {
231 		zulu_hat_ctx_alloc(zhat);
232 	}
233 	zhat->last_used = gethrtime();
234 }
235 
236 
237 static void
238 zulu_hat_ctx_free(struct zulu_hat *zhat)
239 {
240 	TNF_PROBE_1(zulu_hat_ctx_free, "zulu_hat", /* CSTYLED */,
241 		tnf_int, ctx, zhat->zulu_ctx);
242 
243 	mutex_enter(&zulu_ctx_lock);
244 
245 	mutex_enter(&zhat->lock);
246 	if (zhat->zulu_ctx >= 0) {
247 		ZULU_CTX_SET_HAT(zhat->zulu_ctx, NULL);
248 
249 		if (zulu_ctx_search_start > zhat->zulu_ctx) {
250 			zulu_ctx_search_start = zhat->zulu_ctx;
251 		}
252 	}
253 	mutex_exit(&zhat->lock);
254 	mutex_exit(&zulu_ctx_lock);
255 }
256 
257 /*
258  * Lock the zulu tsb for a given zulu_hat.
259  *
260  * We're just protecting against the TLB trap handler here. Other operations
261  * on the zulu_hat require entering the zhat's lock.
262  */
263 static void
264 zulu_ctx_tsb_lock_enter(struct zulu_hat *zhat)
265 {
266 	uint64_t	lck;
267 	uint64_t    	*plck;
268 
269 	ASSERT(mutex_owned(&zhat->lock));
270 
271 	if (zhat->zulu_ctx < 0) {
272 		return;
273 	}
274 	plck = (uint64_t *)&zulu_ctx_tab[zhat->zulu_ctx];
275 
276 	for (; ; ) {
277 		lck = *plck;
278 		if (!(lck & ZULU_CTX_LOCK)) {
279 			uint64_t old_lck, new_lck;
280 
281 			new_lck = lck | ZULU_CTX_LOCK;
282 
283 			old_lck = cas64(plck, lck, new_lck);
284 
285 			if (old_lck == lck) {
286 				/*
287 				 * success
288 				 */
289 				break;
290 			}
291 		}
292 	}
293 }
294 
295 static void
296 zulu_ctx_tsb_lock_exit(struct zulu_hat *zhat)
297 {
298 	uint64_t	lck;
299 	int		zulu_ctx = zhat->zulu_ctx;
300 
301 	if (zulu_ctx < 0) {
302 		return;
303 	}
304 	lck = (uint64_t)zulu_ctx_tab[zulu_ctx];
305 	ASSERT(lck & ZULU_CTX_LOCK);
306 	lck &= ~ZULU_CTX_LOCK;
307 	zulu_ctx_tab[zulu_ctx] = (struct zulu_hat *)lck;
308 }
309 
310 /*
311  * Each zulu hat has a "shadow tree" which is a table of 4MB address regions
312  * for which the zhat has mappings.
313  *
314  * This table is maintained in an avl tree.
315  * Nodes in the tree are called shadow blocks (or sblks)
316  *
317  * This data structure allows unload operations by (address, range) to be
318  * much more efficent.
319  *
320  * We get called a lot for address ranges that have never been supplied
321  * to zulu.
322  */
323 
324 /*
325  * compare the base address of two nodes in the shadow tree
326  */
327 static int
328 zulu_shadow_tree_compare(const void *a, const void *b)
329 {
330 	struct zulu_shadow_blk *zba = (struct zulu_shadow_blk *)a;
331 	struct zulu_shadow_blk *zbb = (struct zulu_shadow_blk *)b;
332 	uint64_t		addr_a = zba->ivaddr;
333 	uint64_t		addr_b = zbb->ivaddr;
334 
335 	TNF_PROBE_2(zulu_shadow_tree_compare, "zulu_shadow_tree", /* CSTYLED */,
336 		tnf_opaque, addr_a, addr_a, tnf_opaque, addr_b, addr_b);
337 
338 	if (addr_a < addr_b) {
339 		return (-1);
340 	} else if (addr_a > addr_b) {
341 		return (1);
342 	} else {
343 		return (0);
344 	}
345 }
346 
347 /*
348  * lookup the entry in the shadow tree for a given virtual address
349  */
350 static struct zulu_shadow_blk *
351 zulu_shadow_tree_lookup(struct zulu_hat *zhat, uint64_t ivaddr,
352 	avl_index_t *where)
353 {
354 	struct zulu_shadow_blk proto;
355 	struct zulu_shadow_blk *sblk;
356 
357 	proto.ivaddr = ivaddr & ZULU_SHADOW_BLK_MASK;
358 
359 	/*
360 	 * pages typically fault in in order so we cache the last shadow
361 	 * block that was referenced so we usually get to reduce calls to
362 	 * avl_find.
363 	 */
364 	if ((zhat->sblk_last != NULL) &&
365 		(proto.ivaddr == zhat->sblk_last->ivaddr)) {
366 		sblk = zhat->sblk_last;
367 	} else {
368 		sblk = (struct zulu_shadow_blk *)avl_find(&zhat->shadow_tree,
369 								&proto, where);
370 		zhat->sblk_last = sblk;
371 	}
372 
373 	TNF_PROBE_2(zulu_shadow_tree_lookup, "zulu_shadow_tree", /* CSTYLED */,
374 		tnf_opaque, ivaddr, proto.ivaddr,
375 		tnf_opaque, where, where ? *where : ~0);
376 
377 	return (sblk);
378 }
379 
380 /*
381  * insert a sblk into the shadow tree for a given zblk.
382  * If a sblk already exists, just increment it's refcount.
383  */
384 static void
385 zulu_shadow_tree_insert(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
386 {
387 	avl_index_t		where;
388 	struct zulu_shadow_blk 	*sblk  = NULL;
389 	uint64_t		ivaddr;
390 	uint64_t		end;
391 
392 	ivaddr = zblk->zulu_hat_blk_vaddr & ZULU_SHADOW_BLK_MASK;
393 
394 	end = zblk->zulu_hat_blk_vaddr + ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
395 
396 	sblk = zulu_shadow_tree_lookup(zhat, ivaddr, &where);
397 	if (sblk != NULL) {
398 		sblk->ref_count++;
399 
400 		end = zblk->zulu_hat_blk_vaddr +
401 					ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
402 		if (zblk->zulu_hat_blk_vaddr < sblk->min_addr) {
403 			sblk->min_addr = zblk->zulu_hat_blk_vaddr;
404 		}
405 		/*
406 		 * a blk can set both the minimum and maximum when it
407 		 * is the first zblk added to a previously emptied sblk
408 		 */
409 		if (end > sblk->max_addr) {
410 			sblk->max_addr = end;
411 		}
412 	} else {
413 		sblk = kmem_zalloc(sizeof (*sblk), KM_SLEEP);
414 		sblk->ref_count = 1;
415 		sblk->ivaddr = ivaddr;
416 		sblk->min_addr = zblk->zulu_hat_blk_vaddr;
417 		sblk->max_addr = end;
418 		zhat->sblk_last = sblk;
419 
420 		avl_insert(&zhat->shadow_tree, sblk, where);
421 	}
422 	zblk->zulu_shadow_blk = sblk;
423 	TNF_PROBE_2(zulu_shadow_tree_insert, "zulu_shadow_tree", /* CSTYLED */,
424 		tnf_opaque, vaddr, ivaddr,
425 		tnf_opaque, ref_count, sblk->ref_count);
426 }
427 
428 /*
429  * decrement the ref_count for the sblk that corresponds to a given zblk.
430  * When the ref_count goes to zero remove the sblk from the tree and free it.
431  */
432 
433 static void
434 zulu_shadow_tree_delete(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
435 {
436 	struct zulu_shadow_blk 	*sblk;
437 
438 	ASSERT(zblk->zulu_shadow_blk != NULL);
439 
440 	sblk = zblk->zulu_shadow_blk;
441 
442 	TNF_PROBE_2(zulu_shadow_tree_delete, "zulu_shadow_tree", /* CSTYLED */,
443 		tnf_opaque, vaddr, sblk->ivaddr,
444 		tnf_opaque, ref_count, sblk->ref_count-1);
445 
446 	if (--sblk->ref_count == 0) {
447 		if (zhat->sblk_last == sblk) {
448 			zhat->sblk_last = NULL;
449 		}
450 		sblk->min_addr = sblk->ivaddr + ZULU_SHADOW_BLK_RANGE;
451 		sblk->max_addr = sblk->ivaddr;
452 	} else {
453 		/*
454 		 * Update the high and low water marks for this sblk.
455 		 * These are estimates, because we don't know if the previous
456 		 * or next region are actually occupied, but we can tell
457 		 * whether the previous values have become invalid.
458 		 *
459 		 * In the most often applied case a segment is being
460 		 * unloaded, and the min_addr will be kept up to date as
461 		 * the zblks are deleted in order.
462 		 */
463 		uint64_t end = zblk->zulu_hat_blk_vaddr +
464 					ZULU_HAT_PGSZ(zblk->zulu_hat_blk_size);
465 
466 		if (zblk->zulu_hat_blk_vaddr == sblk->min_addr) {
467 			sblk->min_addr = end;
468 		}
469 		if (end == sblk->max_addr) {
470 			sblk->max_addr = zblk->zulu_hat_blk_vaddr;
471 		}
472 	}
473 
474 	zblk->zulu_shadow_blk = NULL;
475 }
476 
477 static void
478 zulu_shadow_tree_destroy(struct zulu_hat *zhat)
479 {
480 	struct zulu_shadow_blk *sblk;
481 	void	*cookie = NULL;
482 
483 	while ((sblk = (struct zulu_shadow_blk *)avl_destroy_nodes(
484 					&zhat->shadow_tree, &cookie)) != NULL) {
485 		TNF_PROBE_2(shadow_tree_destroy, "zulu_hat", /* CSTYLED */,
486 			tnf_opaque, vaddr, sblk->ivaddr,
487 			tnf_opaque, ref_count, sblk->ref_count);
488 		kmem_free(sblk, sizeof (*sblk));
489 	}
490 	avl_destroy(&zhat->shadow_tree);
491 }
492 
493 /*
494  * zulu_hat_insert_map:
495  *
496  * Add a zulu_hat_blk to the a zhat's mappings list.
497  *
498  * Several data stuctures are used
499  *	tsb: for simple fast lookups by the trap handler
500  *	hash table: for efficent lookups by address, range
501  *	An shadow tree of 4MB ranges with mappings for unloading big regions.
502  */
503 static void
504 zulu_hat_insert_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
505 {
506 	int tsb_hash;
507 
508 	tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
509 			    zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
510 
511 	TNF_PROBE_3(zulu_hat_insert_map, "zulu_hat", /* CSTYLED */,
512 		tnf_opaque, zblkp, zblk,
513 		tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
514 		tnf_opaque, hash, tsb_hash);
515 
516 	ASSERT(tsb_hash < zhat->zulu_tsb_size);
517 
518 	zulu_shadow_tree_insert(zhat, zblk);
519 
520 	/*
521 	 * The hash table is an array of buckets. Each bucket is the
522 	 * head of a linked list of mappings who's address hashess to the bucket
523 	 * New entries go to the head of the list.
524 	 */
525 	zblk->zulu_hash_prev = NULL;
526 	zblk->zulu_hash_next = ZULU_MAP_HASH_HEAD(zhat,
527 			zblk->zulu_hat_blk_vaddr, zblk->zulu_hat_blk_size);
528 	if (zblk->zulu_hash_next) {
529 		zblk->zulu_hash_next->zulu_hash_prev = zblk;
530 	}
531 	ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
532 				zblk->zulu_hat_blk_size) = zblk;
533 
534 	zulu_ctx_tsb_lock_enter(zhat);
535 	zhat->zulu_tsb[tsb_hash] = zblk->zulu_hat_blk_tte;
536 	zulu_ctx_tsb_lock_exit(zhat);
537 }
538 
539 /*
540  * remove a block from a zhat
541  */
542 static void
543 zulu_hat_remove_map(struct zulu_hat *zhat, struct zulu_hat_blk *zblk)
544 {
545 	int tsb_hash = ZULU_TSB_HASH(zblk->zulu_hat_blk_vaddr,
546 			    zblk->zulu_hat_blk_size, zhat->zulu_tsb_size);
547 
548 	TNF_PROBE_2(zulu_hat_remove_map, "zulu_hat", /* CSTYLED */,
549 		tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
550 		tnf_opaque, hash, tsb_hash);
551 
552 	ASSERT(tsb_hash < zhat->zulu_tsb_size);
553 	ASSERT(mutex_owned(&zhat->lock));
554 
555 	zulu_shadow_tree_delete(zhat, zblk);
556 
557 	/*
558 	 * first remove zblk from hash table
559 	 */
560 	if (zblk->zulu_hash_prev) {
561 		zblk->zulu_hash_prev->zulu_hash_next = zblk->zulu_hash_next;
562 	} else {
563 		ZULU_MAP_HASH_HEAD(zhat, zblk->zulu_hat_blk_vaddr,
564 			zblk->zulu_hat_blk_size) = NULL;
565 	}
566 	if (zblk->zulu_hash_next) {
567 		zblk->zulu_hash_next->zulu_hash_prev = zblk->zulu_hash_prev;
568 	}
569 	zblk->zulu_hash_next = NULL;
570 	zblk->zulu_hash_prev = NULL;
571 
572 	/*
573 	 * then remove the tsb entry
574 	 */
575 	zulu_ctx_tsb_lock_enter(zhat);
576 	if (zhat->zulu_tsb[tsb_hash].un.zulu_tte_addr ==
577 	    zblk->zulu_hat_blk_vaddr) {
578 		zhat->zulu_tsb[tsb_hash].zulu_tte_valid = 0;
579 	}
580 	zulu_ctx_tsb_lock_exit(zhat);
581 }
582 
583 /*
584  * look for a mapping to a given vaddr and page size
585  */
586 static struct zulu_hat_blk *
587 zulu_lookup_map_bysize(struct zulu_hat *zhat, caddr_t vaddr, int page_sz)
588 {
589 	struct  	zulu_hat_blk *zblkp;
590 	uint64_t	ivaddr = (uint64_t)vaddr;
591 	int		blks_checked = 0;
592 
593 	ASSERT(mutex_owned(&zhat->lock));
594 
595 	for (zblkp = ZULU_MAP_HASH_HEAD(zhat, ivaddr, page_sz); zblkp != NULL;
596 						zblkp = zblkp->zulu_hash_next) {
597 		uint64_t	size;
598 		uint64_t	iaddr;
599 
600 		blks_checked++;
601 
602 		size = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
603 		iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
604 
605 		if (iaddr <= ivaddr && (iaddr + size) > ivaddr) {
606 			int tsb_hash;
607 
608 			tsb_hash = ZULU_TSB_HASH(zblkp->zulu_hat_blk_vaddr,
609 				    zblkp->zulu_hat_blk_size,
610 				    zhat->zulu_tsb_size);
611 			ASSERT(tsb_hash < zhat->zulu_tsb_size);
612 
613 			zulu_ctx_tsb_lock_enter(zhat);
614 			zhat->zulu_tsb[tsb_hash] = zblkp->zulu_hat_blk_tte;
615 			zulu_ctx_tsb_lock_exit(zhat);
616 			break;
617 		}
618 
619 	}
620 
621 	TNF_PROBE_3(zulu_hat_lookup_map_bysz, "zulu_hat", /* CSTYLED */,
622 		tnf_opaque, zblkp, zblkp,
623 		tnf_int, blks_checked, blks_checked,
624 		tnf_int, page_sz, page_sz);
625 
626 	return (zblkp);
627 }
628 
629 /*
630  * Lookup a zblk for a given virtual address.
631  */
632 static struct zulu_hat_blk *
633 zulu_lookup_map(struct zulu_hat *zhat, caddr_t vaddr)
634 {
635 	struct  	zulu_hat_blk *zblkp = NULL;
636 
637 	/*
638 	 * if the hat is using 4M pages, look first for a 4M page
639 	 */
640 	if (zhat->map4m) {
641 		zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE4M);
642 		if (zblkp != NULL) {
643 			return (zblkp);
644 		}
645 	}
646 	/*
647 	 * Otherwise look for a 8k page
648 	 * Note: if base pagesize gets increased to 64K remove this test
649 	 */
650 	if (zhat->map8k) {
651 		zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE8K);
652 		if (zblkp != NULL) {
653 			return (zblkp);
654 		}
655 	}
656 	/*
657 	 * only if the page isn't found in the sizes that match the zulu mmus
658 	 * look for the inefficient 64K or 512K page sizes
659 	 */
660 	if (zhat->map64k) {
661 		zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE64K);
662 		if (zblkp != NULL) {
663 			return (zblkp);
664 		}
665 	}
666 	if (zhat->map512k) {
667 		zblkp = zulu_lookup_map_bysize(zhat, vaddr, ZULU_TTE512K);
668 	}
669 
670 	return (zblkp);
671 }
672 
673 /*
674  * zulu_hat_load: Load translation for given vaddr
675  */
676 int
677 zulu_hat_load(struct zulu_hat *zhat, caddr_t vaddr,
678 		enum seg_rw rw, int *ppg_size)
679 {
680 	faultcode_t 		as_err;
681 	struct zulu_hat_blk 	*zblkp;
682 	int			rval;
683 	uint64_t 		flags_pfn;
684 	struct zulu_tte		tte;
685 
686 	TNF_PROBE_2(zulu_hat_load, "zulu_hat", /* CSTYLED */,
687 		tnf_int, zulu_ctx, zhat->zulu_ctx,
688 		tnf_opaque, vaddr, vaddr);
689 
690 	mutex_enter(&zhat->lock);
691 	ASSERT(zhat->zulu_ctx >= 0);
692 	/*
693 	 * lookup in our tsb first
694 	 */
695 	zulu_ctx_tsb_lock_enter(zhat);
696 	flags_pfn = zulu_hat_tsb_lookup_tl0(zhat, vaddr);
697 	zulu_ctx_tsb_lock_exit(zhat);
698 
699 	if (flags_pfn) {
700 		uint64_t *p = (uint64_t *)&tte;
701 
702 		p++; 			/* ignore the tag */
703 		*p = flags_pfn;		/* load the flags */
704 
705 		zuluvm_load_tte(zhat, vaddr, flags_pfn, tte.zulu_tte_perm,
706 			tte.zulu_tte_size);
707 		if (ppg_size != NULL) {
708 			*ppg_size = tte.zulu_tte_size;
709 		}
710 
711 		zulu_tsb_hit++;
712 		mutex_exit(&zhat->lock);
713 		return (0);
714 	}
715 
716 	zulu_tsb_miss++;
717 
718 	zblkp = zulu_lookup_map(zhat, vaddr);
719 	if (zblkp) {
720 		tte = zblkp->zulu_hat_blk_tte;
721 		tte.zulu_tte_pfn = ZULU_HAT_ADJ_PFN((&tte), vaddr);
722 		zuluvm_load_tte(zhat, vaddr,  tte.zulu_tte_pfn,
723 			tte.zulu_tte_perm, tte.zulu_tte_size);
724 		if (ppg_size != NULL) {
725 			*ppg_size = tte.zulu_tte_size;
726 		}
727 		mutex_exit(&zhat->lock);
728 		return (0);
729 	}
730 
731 	/*
732 	 * Set a flag indicating that we're processing a fault.
733 	 * See comments in zulu_hat_unload_region.
734 	 */
735 	zhat->in_fault = 1;
736 	mutex_exit(&zhat->lock);
737 
738 	zulu_as_fault++;
739 	TNF_PROBE_0(calling_as_fault, "zulu_hat", /* CSTYLED */);
740 
741 	as_err = as_fault((struct hat *)zhat, zhat->zulu_xhat.xhat_as,
742 			(caddr_t)(ZULU_VADDR((uint64_t)vaddr) & PAGEMASK),
743 			PAGESIZE, F_INVAL, rw);
744 
745 	mutex_enter(&zhat->lock);
746 	zhat->in_fault = 0;
747 	if (ppg_size != NULL) {
748 		/*
749 		 * caller wants to know the page size (used by preload)
750 		 */
751 		zblkp = zulu_lookup_map(zhat, vaddr);
752 		if (zblkp != NULL) {
753 			*ppg_size = zblkp->zulu_hat_blk_size;
754 		} else {
755 			*ppg_size = -1;
756 		}
757 	}
758 	mutex_exit(&zhat->lock);
759 
760 	TNF_PROBE_1(as_fault_returned, "zulu_hat", /* CSTYLED */,
761 		tnf_int, as_err, as_err);
762 
763 	if (as_err != 0) {
764 		printf("as_fault returned %d\n", as_err);
765 		rval = as_err;
766 	} else if (zhat->freed) {
767 		rval = -1;
768 	} else {
769 		rval = 0;
770 	}
771 
772 	return (rval);
773 }
774 
775 static struct xhat *
776 zulu_hat_alloc(void *arg)
777 {
778 	struct zulu_hat *zhat = kmem_zalloc(sizeof (struct zulu_hat), KM_SLEEP);
779 
780 	(void) arg;
781 
782 	zulu_hat_ctx_alloc(zhat);
783 
784 	mutex_init(&zhat->lock, NULL, MUTEX_DEFAULT, NULL);
785 
786 	zhat->zulu_tsb = kmem_zalloc(ZULU_TSB_SZ, KM_SLEEP);
787 	zhat->zulu_tsb_size = ZULU_TSB_NUM;
788 	zhat->hash_tbl = kmem_zalloc(ZULU_HASH_TBL_SZ, KM_SLEEP);
789 	avl_create(&zhat->shadow_tree, zulu_shadow_tree_compare,
790 		sizeof (zhat->shadow_tree), ZULU_SHADOW_BLK_LINK_OFFSET);
791 	/*
792 	 * The zulu hat has a few opaque data structs embedded in it.
793 	 * This tag makes finding the our data easier with a debugger.
794 	 */
795 	zhat->magic = 0x42;
796 
797 	zhat->freed = 0;
798 	TNF_PROBE_1(zulu_hat_alloc, "zulu_hat", /* CSTYLED */,
799 		tnf_int, zulu_ctx, zhat->zulu_ctx);
800 	return ((struct xhat *)zhat);
801 }
802 
803 static void
804 zulu_hat_free(struct xhat *xhat)
805 {
806 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
807 
808 	TNF_PROBE_1(zulu_hat_free, "zulu_hat", /* CSTYLED */,
809 		tnf_int, zulu_ctx, zhat->zulu_ctx);
810 
811 	zulu_shadow_tree_destroy(zhat);
812 	kmem_free(zhat->hash_tbl, ZULU_HASH_TBL_SZ);
813 	kmem_free(zhat->zulu_tsb, ZULU_TSB_SZ);
814 	mutex_destroy(&zhat->lock);
815 	kmem_free(xhat, sizeof (struct zulu_hat));
816 }
817 
818 static void
819 zulu_hat_free_start(struct xhat *xhat)
820 {
821 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
822 
823 	TNF_PROBE_1(zulu_hat_free_start, "zulu_hat", /* CSTYLED */,
824 		tnf_int, zulu_ctx, zhat->zulu_ctx);
825 	(void) xhat;
826 }
827 
828 /*
829  * zulu_hat_memload: This is the callback where the vm system gives us our
830  * translations
831  */
832 static void
833 zulu_do_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
834     uint_t attr, uint_t flags, int use_pszc)
835 {
836 	void *blk;
837 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
838 	struct zulu_hat_blk *zblk;
839 	pfn_t pfn;
840 
841 	TNF_PROBE_4(zulu_hat_memload, "zulu_hat", /* CSTYLED */,
842 		tnf_int, zulu_ctx, zhat->zulu_ctx,
843 		tnf_opaque, vaddr, vaddr, tnf_opaque, attr, attr,
844 		tnf_opaque, flags, flags);
845 
846 	/*
847 	 * keep track of the highest address that this zhat has had
848 	 * a mapping for.
849 	 * We use this in unload to avoid searching for regions that
850 	 * we've never seen.
851 	 *
852 	 * This is particularly useful avoiding repeated searches for
853 	 * for the process's mappings to the zulu hardware. These mappings
854 	 * are explicitly unloaded at each graphics context switch..
855 	 *
856 	 * This takes advantage of the fact that the device addresses
857 	 * are always above than the heap where most DMA data is stored.
858 	 */
859 	if (vaddr > zhat->vaddr_max) {
860 		zhat->vaddr_max = vaddr;
861 	}
862 
863 	pfn = xhat_insert_xhatblk(page, xhat, &blk);
864 	zblk = (struct zulu_hat_blk *)blk;
865 	zblk->zulu_hat_blk_vaddr = (uintptr_t)vaddr;
866 	zblk->zulu_hat_blk_pfn = (uint_t)pfn;
867 	/*
868 	 * The perm bit is actually in the tte which gets copied to the TSB
869 	 */
870 	zblk->zulu_hat_blk_perm = (attr & PROT_WRITE) ? 1 : 0;
871 	zblk->zulu_hat_blk_size = use_pszc ? page->p_szc : 0;
872 	zblk->zulu_hat_blk_valid = 1;
873 
874 	switch (zblk->zulu_hat_blk_size) {
875 	case	ZULU_TTE8K:
876 		zhat->map8k = 1;
877 		break;
878 	case	ZULU_TTE64K:
879 		zhat->map64k = 1;
880 		break;
881 	case	ZULU_TTE512K:
882 		zhat->map512k = 1;
883 		break;
884 	case	ZULU_TTE4M:
885 		zhat->map4m = 1;
886 		break;
887 	default:
888 		panic("zulu_hat illegal page size\n");
889 	}
890 
891 	mutex_enter(&zhat->lock);
892 
893 	zulu_hat_insert_map(zhat, zblk);
894 	if (!zhat->freed) {
895 		zuluvm_load_tte(zhat, vaddr, zblk->zulu_hat_blk_pfn,
896 			zblk->zulu_hat_blk_perm, zblk->zulu_hat_blk_size);
897 	}
898 	zhat->fault_ivaddr_last =
899 		ZULU_VADDR((uint64_t)zblk->zulu_hat_blk_vaddr);
900 
901 	mutex_exit(&zhat->lock);
902 }
903 
904 static void
905 zulu_hat_memload(struct xhat *xhat, caddr_t vaddr, struct page *page,
906     uint_t attr, uint_t flags)
907 {
908 	zulu_do_hat_memload(xhat, vaddr, page, attr, flags, 0);
909 }
910 
911 static void
912 zulu_hat_devload(struct xhat *xhat, caddr_t vaddr, size_t size, pfn_t pfn,
913 	uint_t attr, int flags)
914 {
915 	struct page *pp = page_numtopp_nolock(pfn);
916 	(void) size;
917 	zulu_do_hat_memload(xhat, vaddr, pp, attr, (uint_t)flags, 1);
918 }
919 
920 static void
921 zulu_hat_memload_array(struct xhat *xhat, caddr_t addr, size_t len,
922     struct page **gen_pps, uint_t attr, uint_t flags)
923 {
924 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
925 
926 	TNF_PROBE_3(zulu_hat_memload_array, "zulu_hat", /* CSTYLED */,
927 		tnf_int, zulu_ctx, zhat->zulu_ctx,
928 		tnf_opaque, addr, addr,
929 		tnf_opaque, len, len);
930 
931 	for (; len > 0; len -= ZULU_HAT_PGSZ((*gen_pps)->p_szc),
932 	    gen_pps += ZULU_HAT_NUM_PGS((*gen_pps)->p_szc)) {
933 		zulu_do_hat_memload(xhat, addr, *gen_pps, attr, flags, 1);
934 
935 		addr += ZULU_HAT_PGSZ((*gen_pps)->p_szc);
936 	}
937 }
938 
939 static void
940 free_zblks(struct zulu_hat_blk *free_list)
941 {
942 	struct zulu_hat_blk *zblkp;
943 	struct zulu_hat_blk *next;
944 
945 	for (zblkp = free_list; zblkp != NULL; zblkp = next) {
946 		next = zblkp->zulu_hash_next;
947 		(void) xhat_delete_xhatblk((struct xhat_hme_blk *)zblkp, 0);
948 	}
949 }
950 
951 static void
952 add_to_free_list(struct zulu_hat_blk **pfree_list, struct zulu_hat_blk *zblk)
953 {
954 	zblk->zulu_hash_next = *pfree_list;
955 	*pfree_list = zblk;
956 }
957 
958 static void
959 zulu_hat_unload_region(struct zulu_hat *zhat, uint64_t ivaddr, size_t size,
960 		struct zulu_shadow_blk *sblk, struct zulu_hat_blk **pfree_list)
961 {
962 	uint64_t	end = ivaddr + size;
963 	int		found = 0;
964 
965 	TNF_PROBE_2(zulu_hat_unload_region, "zulu_hat", /* CSTYLED */,
966 		tnf_opaque, vaddr, ivaddr, tnf_opaque, size, size);
967 
968 	/*
969 	 * check address against the low and highwater marks for mappings
970 	 * in this sblk
971 	 */
972 	if (ivaddr < sblk->min_addr) {
973 		ivaddr = sblk->min_addr;
974 		TNF_PROBE_1(zulu_hat_unload_skip, "zulu_hat", /* CSTYLED */,
975 			tnf_opaque, ivaddr, ivaddr);
976 	}
977 	if (end > sblk->max_addr) {
978 		end = sblk->max_addr;
979 		TNF_PROBE_1(zulu_hat_unload_reg_skip, "zulu_hat", /* CSTYLED */,
980 			tnf_opaque, end, end);
981 	}
982 	/*
983 	 * REMIND: It's not safe to touch the sblk after we enter this loop
984 	 * because it may get deleted.
985 	 */
986 
987 	while (ivaddr < end) {
988 		uint64_t iaddr;
989 		size_t  pg_sz;
990 		struct zulu_hat_blk *zblkp;
991 
992 		zblkp = zulu_lookup_map(zhat, (caddr_t)ivaddr);
993 		if (zblkp == NULL) {
994 			ivaddr += PAGESIZE;
995 			continue;
996 		}
997 
998 		iaddr = ZULU_VADDR((uint64_t)zblkp->zulu_hat_blk_vaddr);
999 		pg_sz = ZULU_HAT_PGSZ(zblkp->zulu_hat_blk_size);
1000 
1001 		found++;
1002 
1003 		zulu_hat_remove_map(zhat, zblkp);
1004 		/*
1005 		 * skip demap page if as_free has already been entered
1006 		 * zuluvm demapped the context already
1007 		 */
1008 		if (!zhat->freed) {
1009 			if ((zhat->in_fault) &&
1010 			    (iaddr == zhat->fault_ivaddr_last)) {
1011 				/*
1012 				 * We're being called from within as_fault to
1013 				 * unload the last translation we loaded.
1014 				 *
1015 				 * This is probably due to watchpoint handling.
1016 				 * Delay the demap for a millisecond
1017 				 * to allow zulu to make some progress.
1018 				 */
1019 				drv_usecwait(1000);
1020 				zhat->fault_ivaddr_last = 0;
1021 			}
1022 			zulu_hat_demap_page(zhat, (caddr_t)iaddr,
1023 					zblkp->zulu_hat_blk_size);
1024 		}
1025 
1026 		add_to_free_list(pfree_list, zblkp);
1027 
1028 		if ((iaddr + pg_sz) >= end) {
1029 			break;
1030 		}
1031 
1032 		ivaddr += pg_sz;
1033 	}
1034 	TNF_PROBE_1(zulu_hat_unload_region_done, "zulu_hat", /* CSTYLED */,
1035 		tnf_opaque, found, found);
1036 }
1037 
1038 static void
1039 zulu_hat_unload(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1040 {
1041 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1042 	uint64_t	ivaddr;
1043 	uint64_t	end;
1044 	int		found = 0;
1045 	struct zulu_hat_blk *free_list = NULL;
1046 
1047 	(void) flags;
1048 
1049 	TNF_PROBE_4(zulu_hat_unload, "zulu_hat", /* CSTYLED */,
1050 		tnf_int, zulu_ctx, zhat->zulu_ctx,
1051 		tnf_opaque, vaddr, vaddr,
1052 		tnf_opaque, vaddr_max, zhat->vaddr_max,
1053 		tnf_opaque, size, size);
1054 
1055 	mutex_enter(&zhat->lock);
1056 
1057 	/*
1058 	 * The following test prevents us from searching for the user's
1059 	 * mappings to the zulu device registers. Those mappings get unloaded
1060 	 * every time a graphics context switch away from a given context
1061 	 * occurs.
1062 	 *
1063 	 * Since the heap is located at smaller virtual addresses than the
1064 	 * registers, this simple test avoids quite a bit of useless work.
1065 	 */
1066 	if (vaddr > zhat->vaddr_max) {
1067 		/*
1068 		 * all existing mappings have lower addresses than vaddr
1069 		 * no need to search further.
1070 		 */
1071 		mutex_exit(&zhat->lock);
1072 		return;
1073 	}
1074 
1075 	ivaddr = (uint64_t)vaddr;
1076 	end = ivaddr + size;
1077 
1078 	do {
1079 		struct zulu_shadow_blk *sblk;
1080 
1081 		sblk = zulu_shadow_tree_lookup(zhat, ivaddr, NULL);
1082 		if (sblk != NULL) {
1083 			uint64_t 	sblk_end;
1084 			size_t		region_size;
1085 
1086 			found++;
1087 
1088 			sblk_end = (ivaddr + ZULU_SHADOW_BLK_RANGE) &
1089 					ZULU_SHADOW_BLK_MASK;
1090 
1091 			if (sblk_end < end) {
1092 				region_size = sblk_end - ivaddr;
1093 			} else {
1094 				region_size = end - ivaddr;
1095 			}
1096 			zulu_hat_unload_region(zhat, ivaddr, region_size, sblk,
1097 				&free_list);
1098 
1099 		}
1100 		ivaddr += ZULU_SHADOW_BLK_RANGE;
1101 	} while (ivaddr < end);
1102 
1103 	mutex_exit(&zhat->lock);
1104 
1105 	free_zblks(free_list);
1106 
1107 	TNF_PROBE_1(zulu_hat_unload_done, "zulu_hat", /* CSTYLED */,
1108 		tnf_int, found, found);
1109 }
1110 
1111 static void
1112 zulu_hat_unload_callback(struct xhat *xhat, caddr_t vaddr, size_t size,
1113 	uint_t flags, hat_callback_t *pcb)
1114 {
1115 	(void) size;
1116 	(void) pcb;
1117 	zulu_hat_unload(xhat, vaddr, size, flags);
1118 }
1119 
1120 
1121 /*
1122  * unload one page
1123  */
1124 static int
1125 zulu_hat_pageunload(struct xhat *xhat, struct page *pp, uint_t flags,
1126     void *xblk)
1127 {
1128 	struct zulu_hat_blk *zblk = (struct zulu_hat_blk *)xblk;
1129 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1130 	int	do_delete;
1131 
1132 	(void) pp;
1133 	(void) flags;
1134 
1135 	TNF_PROBE_3(zulu_hat_pageunload, "zulu_hat", /* CSTYLED */,
1136 		tnf_int, zulu_ctx, zhat->zulu_ctx,
1137 		tnf_opaque, vaddr, zblk->zulu_hat_blk_vaddr,
1138 		tnf_int, pg_size, zblk->zulu_hat_blk_size);
1139 
1140 	mutex_enter(&zhat->lock);
1141 	if (zblk->zulu_shadow_blk != NULL) {
1142 
1143 		do_delete = 1;
1144 
1145 		zulu_hat_remove_map(zhat, zblk);
1146 
1147 		/*
1148 		 * now that the entry is removed from the TSB, remove the
1149 		 * translation from the zulu hardware.
1150 		 *
1151 		 * Skip the demap if this as is in the process of being freed.
1152 		 * The zuluvm as callback has demapped the whole context.
1153 		 */
1154 		if (!zhat->freed) {
1155 			zulu_hat_demap_page(zhat,
1156 			(caddr_t)(zblk->zulu_hat_blk_page << ZULU_HAT_BP_SHIFT),
1157 			zblk->zulu_hat_blk_size);
1158 		}
1159 	} else {
1160 		/*
1161 		 * This block has already been removed from the zulu_hat,
1162 		 * it's on a free list waiting for our thread to release
1163 		 * a mutex so it can be freed
1164 		 */
1165 		do_delete = 0;
1166 
1167 		TNF_PROBE_0(zulu_hat_pageunload_skip, "zulu_hat",
1168 			    /* CSTYLED */);
1169 	}
1170 	mutex_exit(&zhat->lock);
1171 
1172 	if (do_delete) {
1173 		(void) xhat_delete_xhatblk(xblk, 1);
1174 	}
1175 
1176 	return (0);
1177 }
1178 
1179 static void
1180 zulu_hat_swapout(struct xhat *xhat)
1181 {
1182 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1183 	struct zulu_hat_blk *zblk;
1184 	struct zulu_hat_blk *free_list = NULL;
1185 	int	i;
1186 	int	nblks = 0;
1187 
1188 	TNF_PROBE_1(zulu_hat_swapout, "zulu_hat", /* CSTYLED */,
1189 		tnf_int, zulu_ctx, zhat->zulu_ctx);
1190 
1191 	mutex_enter(&zhat->lock);
1192 
1193 	/*
1194 	 * real swapout calls are rare so we don't do anything in
1195 	 * particular to optimize them.
1196 	 *
1197 	 * Just loop over all buckets in the hash table and free each
1198 	 * zblk.
1199 	 */
1200 	for (i = 0; i < ZULU_HASH_TBL_NUM; i++) {
1201 		struct zulu_hat_blk *next;
1202 		for (zblk = zhat->hash_tbl[i]; zblk != NULL; zblk = next) {
1203 			next = zblk->zulu_hash_next;
1204 			zulu_hat_remove_map(zhat, zblk);
1205 			add_to_free_list(&free_list, zblk);
1206 			nblks++;
1207 		}
1208 	}
1209 
1210 	/*
1211 	 * remove all mappings for this context from zulu hardware.
1212 	 */
1213 	zulu_hat_demap_ctx(zhat->zdev, zhat->zulu_ctx);
1214 
1215 	mutex_exit(&zhat->lock);
1216 
1217 	free_zblks(free_list);
1218 
1219 	TNF_PROBE_1(zulu_hat_swapout_done, "zulu_hat", /* CSTYLED */,
1220 		tnf_int, nblks, nblks);
1221 }
1222 
1223 
1224 static void
1225 zulu_hat_unshare(struct xhat *xhat, caddr_t vaddr, size_t size)
1226 {
1227 	TNF_PROBE_0(zulu_hat_unshare, "zulu_hat", /* CSTYLED */);
1228 
1229 	zulu_hat_unload(xhat, vaddr, size, 0);
1230 }
1231 
1232 /*
1233  * Functions to manage changes in protections for mappings.
1234  *
1235  * These are rarely called in normal operation so for now just unload
1236  * the region.
1237  * If the mapping is still needed, it will fault in later with the new
1238  * attrributes.
1239  */
1240 typedef enum {
1241 	ZULU_HAT_CHGATTR,
1242 	ZULU_HAT_SETATTR,
1243 	ZULU_HAT_CLRATTR
1244 } zulu_hat_prot_op;
1245 
1246 static void
1247 zulu_hat_update_attr(struct xhat *xhat, caddr_t vaddr, size_t size,
1248 	uint_t flags, zulu_hat_prot_op op)
1249 {
1250 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1251 
1252 	TNF_PROBE_5(zulu_hat_changeprot, "zulu_hat", /* CSTYLED */,
1253 		tnf_int, ctx, zhat->zulu_ctx,
1254 		tnf_opaque, vaddr, vaddr, tnf_opaque, size, size,
1255 		tnf_uint, flags, flags, tnf_int, op, op);
1256 
1257 	zulu_hat_unload(xhat, vaddr, size, 0);
1258 }
1259 
1260 static void
1261 zulu_hat_chgprot(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1262 {
1263 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1264 #ifdef DEBUG
1265 	printf("zulu_hat_chgprot: ctx: %d addr: %lx, size: %lx flags: %x\n",
1266 		zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1267 #endif
1268 	zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1269 }
1270 
1271 
1272 static void
1273 zulu_hat_setattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1274 {
1275 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1276 #ifdef DEBUG
1277 	printf("zulu_hat_setattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1278 		zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1279 #endif
1280 	zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_SETATTR);
1281 }
1282 
1283 static void
1284 zulu_hat_clrattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1285 {
1286 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1287 #ifdef DEBUG
1288 	printf("zulu_hat_clrattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1289 		zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1290 #endif
1291 	zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CLRATTR);
1292 }
1293 
1294 static void
1295 zulu_hat_chgattr(struct xhat *xhat, caddr_t vaddr, size_t size, uint_t flags)
1296 {
1297 	struct zulu_hat *zhat = (struct zulu_hat *)xhat;
1298 	TNF_PROBE_3(zulu_hat_chgattr, "zulu_hat", /* CSTYLED */,
1299 		tnf_int, ctx, zhat->zulu_ctx,
1300 		tnf_opaque, vaddr, vaddr,
1301 		tnf_opaque, flags, flags);
1302 #ifdef DEBUG
1303 	printf("zulu_hat_chgattr: ctx: %d addr: %lx, size: %lx flags: %x\n",
1304 		zhat->zulu_ctx, (uint64_t)vaddr, size, flags);
1305 #endif
1306 	zulu_hat_update_attr(xhat, vaddr, size, flags, ZULU_HAT_CHGATTR);
1307 }
1308 
1309 
1310 struct xhat_ops zulu_hat_ops = {
1311 	zulu_hat_alloc,		/* xhat_alloc */
1312 	zulu_hat_free,		/* xhat_free */
1313 	zulu_hat_free_start,	/* xhat_free_start */
1314 	NULL,			/* xhat_free_end */
1315 	NULL,			/* xhat_dup */
1316 	NULL,			/* xhat_swapin */
1317 	zulu_hat_swapout,	/* xhat_swapout */
1318 	zulu_hat_memload,	/* xhat_memload */
1319 	zulu_hat_memload_array,	/* xhat_memload_array */
1320 	zulu_hat_devload,	/* xhat_devload */
1321 	zulu_hat_unload,	/* xhat_unload */
1322 	zulu_hat_unload_callback, /* xhat_unload_callback */
1323 	zulu_hat_setattr,	/* xhat_setattr */
1324 	zulu_hat_clrattr,	/* xhat_clrattr */
1325 	zulu_hat_chgattr,	/* xhat_chgattr */
1326 	zulu_hat_unshare,	/* xhat_unshare */
1327 	zulu_hat_chgprot,	/* xhat_chgprot */
1328 	zulu_hat_pageunload,	/* xhat_pageunload */
1329 };
1330 
1331 xblk_cache_t zulu_xblk_cache = {
1332     NULL,
1333     NULL,
1334     NULL,
1335     xhat_xblkcache_reclaim
1336 };
1337 
1338 xhat_provider_t zulu_hat_provider = {
1339 	XHAT_PROVIDER_VERSION,
1340 	0,
1341 	NULL,
1342 	NULL,
1343 	"zulu_hat_provider",
1344 	&zulu_xblk_cache,
1345 	&zulu_hat_ops,
1346 	sizeof (struct zulu_hat_blk) + sizeof (struct xhat_hme_blk)
1347 };
1348 
1349 /*
1350  * The following functions are the entry points that zuluvm uses.
1351  */
1352 
1353 /*
1354  * initialize this module. Called from zuluvm's _init function
1355  */
1356 int
1357 zulu_hat_init()
1358 {
1359 	int 	c;
1360 	int	rval;
1361 	mutex_init(&zulu_ctx_lock, NULL, MUTEX_DEFAULT, NULL);
1362 
1363 	for (c = 0; c < ZULU_HAT_MAX_CTX; c++) {
1364 		ZULU_CTX_LOCK_INIT(c);
1365 	}
1366 	zulu_ctx_search_start = 0;
1367 	rval = xhat_provider_register(&zulu_hat_provider);
1368 	if (rval != 0) {
1369 		mutex_destroy(&zulu_ctx_lock);
1370 	}
1371 	return (rval);
1372 }
1373 
1374 /*
1375  * un-initialize this module. Called from zuluvm's _fini function
1376  */
1377 int
1378 zulu_hat_destroy()
1379 {
1380 	if (xhat_provider_unregister(&zulu_hat_provider) != 0) {
1381 		return (-1);
1382 	}
1383 	mutex_destroy(&zulu_ctx_lock);
1384 	return (0);
1385 }
1386 
1387 int
1388 zulu_hat_attach(void *arg)
1389 {
1390 	(void) arg;
1391 	return (0);
1392 }
1393 
1394 int
1395 zulu_hat_detach(void *arg)
1396 {
1397 	(void) arg;
1398 	return (0);
1399 }
1400 
1401 /*
1402  * create a zulu hat for this address space.
1403  */
1404 struct zulu_hat *
1405 zulu_hat_proc_attach(struct as *as, void *zdev)
1406 {
1407 	struct zulu_hat *zhat;
1408 	int		xhat_rval;
1409 
1410 	xhat_rval = xhat_attach_xhat(&zulu_hat_provider, as,
1411 			(struct xhat **)&zhat, NULL);
1412 	if ((xhat_rval == 0) && (zhat != NULL)) {
1413 		mutex_enter(&zhat->lock);
1414 		ZULU_HAT2AS(zhat) = as;
1415 		zhat->zdev = zdev;
1416 		mutex_exit(&zhat->lock);
1417 	}
1418 
1419 	TNF_PROBE_3(zulu_hat_proc_attach, "zulu_hat", /* CSTYLED */,
1420 		tnf_int, xhat_rval, xhat_rval, tnf_opaque, as, as,
1421 		tnf_opaque, zhat, zhat);
1422 
1423 	return (zhat);
1424 }
1425 
1426 void
1427 zulu_hat_proc_detach(struct zulu_hat *zhat)
1428 {
1429 	struct  as *as = ZULU_HAT2AS(zhat);
1430 
1431 	zulu_hat_ctx_free(zhat);
1432 
1433 	(void) xhat_detach_xhat(&zulu_hat_provider, ZULU_HAT2AS(zhat));
1434 
1435 	TNF_PROBE_1(zulu_hat_proc_detach, "zulu_hat", /* CSTYLED */,
1436 			tnf_opaque, as, as);
1437 }
1438 
1439 /*
1440  * zulu_hat_terminate
1441  *
1442  * Disables any further TLB miss processing for this hat
1443  * Called by zuluvm's as_free callback. The primary purpose of this
1444  * function is to cause any pending zulu DMA to abort quickly.
1445  */
1446 void
1447 zulu_hat_terminate(struct zulu_hat *zhat)
1448 {
1449 	int	ctx = zhat->zulu_ctx;
1450 
1451 	TNF_PROBE_1(zulu_hat_terminate, "zulu_hat", /* CSTYLED */,
1452 		tnf_int, ctx, ctx);
1453 
1454 	mutex_enter(&zhat->lock);
1455 
1456 	zhat->freed = 1;
1457 
1458 	zulu_ctx_tsb_lock_enter(zhat);
1459 	/*
1460 	 * zap the tsb
1461 	 */
1462 	bzero(zhat->zulu_tsb, ZULU_TSB_SZ);
1463 	zulu_ctx_tsb_lock_exit(zhat);
1464 
1465 	zulu_hat_demap_ctx(zhat->zdev, zhat->zulu_ctx);
1466 
1467 	mutex_exit(&zhat->lock);
1468 
1469 	TNF_PROBE_0(zulu_hat_terminate_done, "zulu_hat", /* CSTYLED */);
1470 }
1471