xref: /titanic_53/usr/src/uts/sun4v/os/ppage.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*
23*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
25*7c478bd9Sstevel@tonic-gate  */
26*7c478bd9Sstevel@tonic-gate 
27*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
28*7c478bd9Sstevel@tonic-gate 
29*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
30*7c478bd9Sstevel@tonic-gate #include <sys/systm.h>
31*7c478bd9Sstevel@tonic-gate #include <sys/archsystm.h>
32*7c478bd9Sstevel@tonic-gate #include <sys/machsystm.h>
33*7c478bd9Sstevel@tonic-gate #include <sys/t_lock.h>
34*7c478bd9Sstevel@tonic-gate #include <sys/vmem.h>
35*7c478bd9Sstevel@tonic-gate #include <sys/mman.h>
36*7c478bd9Sstevel@tonic-gate #include <sys/vm.h>
37*7c478bd9Sstevel@tonic-gate #include <sys/cpu.h>
38*7c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h>
39*7c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h>
40*7c478bd9Sstevel@tonic-gate #include <sys/atomic.h>
41*7c478bd9Sstevel@tonic-gate #include <vm/as.h>
42*7c478bd9Sstevel@tonic-gate #include <vm/hat.h>
43*7c478bd9Sstevel@tonic-gate #include <vm/as.h>
44*7c478bd9Sstevel@tonic-gate #include <vm/page.h>
45*7c478bd9Sstevel@tonic-gate #include <vm/seg.h>
46*7c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h>
47*7c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h>
48*7c478bd9Sstevel@tonic-gate #include <sys/debug.h>
49*7c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h>
50*7c478bd9Sstevel@tonic-gate 
51*7c478bd9Sstevel@tonic-gate /*
52*7c478bd9Sstevel@tonic-gate  * A quick way to generate a cache consistent address to map in a page.
53*7c478bd9Sstevel@tonic-gate  * users: ppcopy, pagezero, /proc, dev/mem
54*7c478bd9Sstevel@tonic-gate  *
55*7c478bd9Sstevel@tonic-gate  * The ppmapin/ppmapout routines provide a quick way of generating a cache
56*7c478bd9Sstevel@tonic-gate  * consistent address by reserving a given amount of kernel address space.
57*7c478bd9Sstevel@tonic-gate  * The base is PPMAPBASE and its size is PPMAPSIZE.  This memory is divided
58*7c478bd9Sstevel@tonic-gate  * into x number of sets, where x is the number of colors for the virtual
59*7c478bd9Sstevel@tonic-gate  * cache. The number of colors is how many times a page can be mapped
60*7c478bd9Sstevel@tonic-gate  * simulatenously in the cache.  For direct map caches this translates to
61*7c478bd9Sstevel@tonic-gate  * the number of pages in the cache.
62*7c478bd9Sstevel@tonic-gate  * Each set will be assigned a group of virtual pages from the reserved memory
63*7c478bd9Sstevel@tonic-gate  * depending on its virtual color.
64*7c478bd9Sstevel@tonic-gate  * When trying to assign a virtual address we will find out the color for the
65*7c478bd9Sstevel@tonic-gate  * physical page in question (if applicable).  Then we will try to find an
66*7c478bd9Sstevel@tonic-gate  * available virtual page from the set of the appropiate color.
67*7c478bd9Sstevel@tonic-gate  */
68*7c478bd9Sstevel@tonic-gate 
69*7c478bd9Sstevel@tonic-gate #define	clsettoarray(color, set) ((color * nsets) + set)
70*7c478bd9Sstevel@tonic-gate 
71*7c478bd9Sstevel@tonic-gate int pp_slots = 4;		/* small default, tuned by cpu module */
72*7c478bd9Sstevel@tonic-gate 
73*7c478bd9Sstevel@tonic-gate /* tuned by cpu module, default is "safe" */
74*7c478bd9Sstevel@tonic-gate int pp_consistent_coloring = PPAGE_STORES_POLLUTE | PPAGE_LOADS_POLLUTE;
75*7c478bd9Sstevel@tonic-gate 
76*7c478bd9Sstevel@tonic-gate static caddr_t	ppmap_vaddrs[PPMAPSIZE / MMU_PAGESIZE];
77*7c478bd9Sstevel@tonic-gate static int	nsets;			/* number of sets */
78*7c478bd9Sstevel@tonic-gate static int	ppmap_pages;		/* generate align mask */
79*7c478bd9Sstevel@tonic-gate static int	ppmap_shift;		/* set selector */
80*7c478bd9Sstevel@tonic-gate 
81*7c478bd9Sstevel@tonic-gate #ifdef PPDEBUG
82*7c478bd9Sstevel@tonic-gate #define		MAXCOLORS	16	/* for debug only */
83*7c478bd9Sstevel@tonic-gate static int	ppalloc_noslot = 0;	/* # of allocations from kernelmap */
84*7c478bd9Sstevel@tonic-gate static int	align_hits[MAXCOLORS];
85*7c478bd9Sstevel@tonic-gate static int	pp_allocs;		/* # of ppmapin requests */
86*7c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */
87*7c478bd9Sstevel@tonic-gate 
88*7c478bd9Sstevel@tonic-gate /*
89*7c478bd9Sstevel@tonic-gate  * There are only 64 TLB entries on spitfire, 16 on cheetah
90*7c478bd9Sstevel@tonic-gate  * (fully-associative TLB) so we allow the cpu module to tune the
91*7c478bd9Sstevel@tonic-gate  * number to use here via pp_slots.
92*7c478bd9Sstevel@tonic-gate  */
93*7c478bd9Sstevel@tonic-gate static struct ppmap_va {
94*7c478bd9Sstevel@tonic-gate 	caddr_t	ppmap_slots[MAXPP_SLOTS];
95*7c478bd9Sstevel@tonic-gate } ppmap_va[NCPU];
96*7c478bd9Sstevel@tonic-gate 
97*7c478bd9Sstevel@tonic-gate void
98*7c478bd9Sstevel@tonic-gate ppmapinit(void)
99*7c478bd9Sstevel@tonic-gate {
100*7c478bd9Sstevel@tonic-gate 	int color, nset, setsize;
101*7c478bd9Sstevel@tonic-gate 	caddr_t va;
102*7c478bd9Sstevel@tonic-gate 
103*7c478bd9Sstevel@tonic-gate 	ASSERT(pp_slots <= MAXPP_SLOTS);
104*7c478bd9Sstevel@tonic-gate 
105*7c478bd9Sstevel@tonic-gate 	va = (caddr_t)PPMAPBASE;
106*7c478bd9Sstevel@tonic-gate 	if (cache & CACHE_VAC) {
107*7c478bd9Sstevel@tonic-gate 		int a;
108*7c478bd9Sstevel@tonic-gate 
109*7c478bd9Sstevel@tonic-gate 		ppmap_pages = mmu_btop(shm_alignment);
110*7c478bd9Sstevel@tonic-gate 		nsets = PPMAPSIZE / shm_alignment;
111*7c478bd9Sstevel@tonic-gate 		setsize = shm_alignment;
112*7c478bd9Sstevel@tonic-gate 		ppmap_shift = MMU_PAGESHIFT;
113*7c478bd9Sstevel@tonic-gate 		a = ppmap_pages;
114*7c478bd9Sstevel@tonic-gate 		while (a >>= 1)
115*7c478bd9Sstevel@tonic-gate 			ppmap_shift++;
116*7c478bd9Sstevel@tonic-gate 	} else {
117*7c478bd9Sstevel@tonic-gate 		/*
118*7c478bd9Sstevel@tonic-gate 		 * If we do not have a virtual indexed cache we simply
119*7c478bd9Sstevel@tonic-gate 		 * have only one set containing all pages.
120*7c478bd9Sstevel@tonic-gate 		 */
121*7c478bd9Sstevel@tonic-gate 		ppmap_pages = 1;
122*7c478bd9Sstevel@tonic-gate 		nsets = mmu_btop(PPMAPSIZE);
123*7c478bd9Sstevel@tonic-gate 		setsize = MMU_PAGESIZE;
124*7c478bd9Sstevel@tonic-gate 		ppmap_shift = MMU_PAGESHIFT;
125*7c478bd9Sstevel@tonic-gate 	}
126*7c478bd9Sstevel@tonic-gate 	for (color = 0; color < ppmap_pages; color++) {
127*7c478bd9Sstevel@tonic-gate 		for (nset = 0; nset < nsets; nset++) {
128*7c478bd9Sstevel@tonic-gate 			ppmap_vaddrs[clsettoarray(color, nset)] =
129*7c478bd9Sstevel@tonic-gate 			    (caddr_t)((uintptr_t)va + (nset * setsize));
130*7c478bd9Sstevel@tonic-gate 		}
131*7c478bd9Sstevel@tonic-gate 		va += MMU_PAGESIZE;
132*7c478bd9Sstevel@tonic-gate 	}
133*7c478bd9Sstevel@tonic-gate }
134*7c478bd9Sstevel@tonic-gate 
135*7c478bd9Sstevel@tonic-gate /*
136*7c478bd9Sstevel@tonic-gate  * Allocate a cache consistent virtual address to map a page, pp,
137*7c478bd9Sstevel@tonic-gate  * with protection, vprot; and map it in the MMU, using the most
138*7c478bd9Sstevel@tonic-gate  * efficient means possible.  The argument avoid is a virtual address
139*7c478bd9Sstevel@tonic-gate  * hint which when masked yields an offset into a virtual cache
140*7c478bd9Sstevel@tonic-gate  * that should be avoided when allocating an address to map in a
141*7c478bd9Sstevel@tonic-gate  * page.  An avoid arg of -1 means you don't care, for instance pagezero.
142*7c478bd9Sstevel@tonic-gate  *
143*7c478bd9Sstevel@tonic-gate  * machine dependent, depends on virtual address space layout,
144*7c478bd9Sstevel@tonic-gate  * understands that all kernel addresses have bit 31 set.
145*7c478bd9Sstevel@tonic-gate  *
146*7c478bd9Sstevel@tonic-gate  * NOTE: For sun4 platforms the meaning of the hint argument is opposite from
147*7c478bd9Sstevel@tonic-gate  * that found in other architectures.  In other architectures the hint
148*7c478bd9Sstevel@tonic-gate  * (called avoid) was used to ask ppmapin to NOT use the specified cache color.
149*7c478bd9Sstevel@tonic-gate  * This was used to avoid virtual cache trashing in the bcopy.  Unfortunately
150*7c478bd9Sstevel@tonic-gate  * in the case of a COW,  this later on caused a cache aliasing conflict.  In
151*7c478bd9Sstevel@tonic-gate  * sun4, the bcopy routine uses the block ld/st instructions so we don't have
152*7c478bd9Sstevel@tonic-gate  * to worry about virtual cache trashing.  Actually, by using the hint to choose
153*7c478bd9Sstevel@tonic-gate  * the right color we can almost guarantee a cache conflict will not occur.
154*7c478bd9Sstevel@tonic-gate  */
155*7c478bd9Sstevel@tonic-gate 
156*7c478bd9Sstevel@tonic-gate caddr_t
157*7c478bd9Sstevel@tonic-gate ppmapin(page_t *pp, uint_t vprot, caddr_t hint)
158*7c478bd9Sstevel@tonic-gate {
159*7c478bd9Sstevel@tonic-gate 	int color, nset, index, start;
160*7c478bd9Sstevel@tonic-gate 	caddr_t va;
161*7c478bd9Sstevel@tonic-gate 
162*7c478bd9Sstevel@tonic-gate #ifdef PPDEBUG
163*7c478bd9Sstevel@tonic-gate 	pp_allocs++;
164*7c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */
165*7c478bd9Sstevel@tonic-gate 	if (cache & CACHE_VAC) {
166*7c478bd9Sstevel@tonic-gate 		color = sfmmu_get_ppvcolor(pp);
167*7c478bd9Sstevel@tonic-gate 		if (color == -1) {
168*7c478bd9Sstevel@tonic-gate 			if ((intptr_t)hint != -1L) {
169*7c478bd9Sstevel@tonic-gate 				color = addr_to_vcolor(hint);
170*7c478bd9Sstevel@tonic-gate 			} else {
171*7c478bd9Sstevel@tonic-gate 				color = addr_to_vcolor(mmu_ptob(pp->p_pagenum));
172*7c478bd9Sstevel@tonic-gate 			}
173*7c478bd9Sstevel@tonic-gate 		}
174*7c478bd9Sstevel@tonic-gate 
175*7c478bd9Sstevel@tonic-gate 	} else {
176*7c478bd9Sstevel@tonic-gate 		/*
177*7c478bd9Sstevel@tonic-gate 		 * For physical caches, we can pick any address we want.
178*7c478bd9Sstevel@tonic-gate 		 */
179*7c478bd9Sstevel@tonic-gate 		color = 0;
180*7c478bd9Sstevel@tonic-gate 	}
181*7c478bd9Sstevel@tonic-gate 
182*7c478bd9Sstevel@tonic-gate 	start = color;
183*7c478bd9Sstevel@tonic-gate 	do {
184*7c478bd9Sstevel@tonic-gate 		for (nset = 0; nset < nsets; nset++) {
185*7c478bd9Sstevel@tonic-gate 			index = clsettoarray(color, nset);
186*7c478bd9Sstevel@tonic-gate 			va = ppmap_vaddrs[index];
187*7c478bd9Sstevel@tonic-gate 			if (va != NULL) {
188*7c478bd9Sstevel@tonic-gate #ifdef PPDEBUG
189*7c478bd9Sstevel@tonic-gate 				align_hits[color]++;
190*7c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */
191*7c478bd9Sstevel@tonic-gate 				if (casptr(&ppmap_vaddrs[index],
192*7c478bd9Sstevel@tonic-gate 				    va, NULL) == va) {
193*7c478bd9Sstevel@tonic-gate 					hat_memload(kas.a_hat, va, pp,
194*7c478bd9Sstevel@tonic-gate 						vprot | HAT_NOSYNC,
195*7c478bd9Sstevel@tonic-gate 						HAT_LOAD_LOCK);
196*7c478bd9Sstevel@tonic-gate 					return (va);
197*7c478bd9Sstevel@tonic-gate 				}
198*7c478bd9Sstevel@tonic-gate 			}
199*7c478bd9Sstevel@tonic-gate 		}
200*7c478bd9Sstevel@tonic-gate 		/*
201*7c478bd9Sstevel@tonic-gate 		 * first pick didn't succeed, try another
202*7c478bd9Sstevel@tonic-gate 		 */
203*7c478bd9Sstevel@tonic-gate 		if (++color == ppmap_pages)
204*7c478bd9Sstevel@tonic-gate 			color = 0;
205*7c478bd9Sstevel@tonic-gate 	} while (color != start);
206*7c478bd9Sstevel@tonic-gate 
207*7c478bd9Sstevel@tonic-gate #ifdef PPDEBUG
208*7c478bd9Sstevel@tonic-gate 	ppalloc_noslot++;
209*7c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */
210*7c478bd9Sstevel@tonic-gate 
211*7c478bd9Sstevel@tonic-gate 	/*
212*7c478bd9Sstevel@tonic-gate 	 * No free slots; get a random one from the kernel heap area.
213*7c478bd9Sstevel@tonic-gate 	 */
214*7c478bd9Sstevel@tonic-gate 	va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);
215*7c478bd9Sstevel@tonic-gate 
216*7c478bd9Sstevel@tonic-gate 	hat_memload(kas.a_hat, va, pp, vprot | HAT_NOSYNC, HAT_LOAD_LOCK);
217*7c478bd9Sstevel@tonic-gate 
218*7c478bd9Sstevel@tonic-gate 	return (va);
219*7c478bd9Sstevel@tonic-gate 
220*7c478bd9Sstevel@tonic-gate }
221*7c478bd9Sstevel@tonic-gate 
222*7c478bd9Sstevel@tonic-gate void
223*7c478bd9Sstevel@tonic-gate ppmapout(caddr_t va)
224*7c478bd9Sstevel@tonic-gate {
225*7c478bd9Sstevel@tonic-gate 	int color, nset, index;
226*7c478bd9Sstevel@tonic-gate 
227*7c478bd9Sstevel@tonic-gate 	if (va >= kernelheap && va < ekernelheap) {
228*7c478bd9Sstevel@tonic-gate 		/*
229*7c478bd9Sstevel@tonic-gate 		 * Space came from kernelmap, flush the page and
230*7c478bd9Sstevel@tonic-gate 		 * return the space.
231*7c478bd9Sstevel@tonic-gate 		 */
232*7c478bd9Sstevel@tonic-gate 		hat_unload(kas.a_hat, va, PAGESIZE,
233*7c478bd9Sstevel@tonic-gate 		    (HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK));
234*7c478bd9Sstevel@tonic-gate 		vmem_free(heap_arena, va, PAGESIZE);
235*7c478bd9Sstevel@tonic-gate 	} else {
236*7c478bd9Sstevel@tonic-gate 		/*
237*7c478bd9Sstevel@tonic-gate 		 * Space came from ppmap_vaddrs[], give it back.
238*7c478bd9Sstevel@tonic-gate 		 */
239*7c478bd9Sstevel@tonic-gate 		color = addr_to_vcolor(va);
240*7c478bd9Sstevel@tonic-gate 		ASSERT((cache & CACHE_VAC)? (color < ppmap_pages) : 1);
241*7c478bd9Sstevel@tonic-gate 
242*7c478bd9Sstevel@tonic-gate 		nset = ((uintptr_t)va >> ppmap_shift) & (nsets - 1);
243*7c478bd9Sstevel@tonic-gate 		index = clsettoarray(color, nset);
244*7c478bd9Sstevel@tonic-gate 		hat_unload(kas.a_hat, va, PAGESIZE,
245*7c478bd9Sstevel@tonic-gate 		    (HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK));
246*7c478bd9Sstevel@tonic-gate 
247*7c478bd9Sstevel@tonic-gate 		ASSERT(ppmap_vaddrs[index] == NULL);
248*7c478bd9Sstevel@tonic-gate 		ppmap_vaddrs[index] = va;
249*7c478bd9Sstevel@tonic-gate 	}
250*7c478bd9Sstevel@tonic-gate }
251*7c478bd9Sstevel@tonic-gate 
252*7c478bd9Sstevel@tonic-gate #ifdef DEBUG
253*7c478bd9Sstevel@tonic-gate #define	PP_STAT_ADD(stat)	(stat)++
254*7c478bd9Sstevel@tonic-gate uint_t pload, ploadfail;
255*7c478bd9Sstevel@tonic-gate uint_t ppzero, ppzero_short;
256*7c478bd9Sstevel@tonic-gate #else
257*7c478bd9Sstevel@tonic-gate #define	PP_STAT_ADD(stat)
258*7c478bd9Sstevel@tonic-gate #endif /* DEBUG */
259*7c478bd9Sstevel@tonic-gate 
260*7c478bd9Sstevel@tonic-gate static void
261*7c478bd9Sstevel@tonic-gate pp_unload_tlb(caddr_t *pslot, caddr_t va)
262*7c478bd9Sstevel@tonic-gate {
263*7c478bd9Sstevel@tonic-gate 	ASSERT(*pslot == va);
264*7c478bd9Sstevel@tonic-gate 
265*7c478bd9Sstevel@tonic-gate 	vtag_flushpage(va, KCONTEXT);
266*7c478bd9Sstevel@tonic-gate 	*pslot = NULL;				/* release the slot */
267*7c478bd9Sstevel@tonic-gate }
268*7c478bd9Sstevel@tonic-gate 
269*7c478bd9Sstevel@tonic-gate /*
270*7c478bd9Sstevel@tonic-gate  * Routine to copy kernel pages during relocation.  It will copy one
271*7c478bd9Sstevel@tonic-gate  * PAGESIZE page to another PAGESIZE page.  This function may be called
272*7c478bd9Sstevel@tonic-gate  * above LOCK_LEVEL so it should not grab any locks.
273*7c478bd9Sstevel@tonic-gate  */
274*7c478bd9Sstevel@tonic-gate void
275*7c478bd9Sstevel@tonic-gate ppcopy_kernel__relocatable(page_t *fm_pp, page_t *to_pp)
276*7c478bd9Sstevel@tonic-gate {
277*7c478bd9Sstevel@tonic-gate 	uint64_t fm_pa, to_pa;
278*7c478bd9Sstevel@tonic-gate 	size_t nbytes;
279*7c478bd9Sstevel@tonic-gate 
280*7c478bd9Sstevel@tonic-gate 	fm_pa = (uint64_t)(fm_pp->p_pagenum) << MMU_PAGESHIFT;
281*7c478bd9Sstevel@tonic-gate 	to_pa = (uint64_t)(to_pp->p_pagenum) << MMU_PAGESHIFT;
282*7c478bd9Sstevel@tonic-gate 
283*7c478bd9Sstevel@tonic-gate 	nbytes = MMU_PAGESIZE;
284*7c478bd9Sstevel@tonic-gate 
285*7c478bd9Sstevel@tonic-gate 	for (; nbytes > 0; fm_pa += 32, to_pa += 32, nbytes -= 32)
286*7c478bd9Sstevel@tonic-gate 		hw_pa_bcopy32(fm_pa, to_pa);
287*7c478bd9Sstevel@tonic-gate }
288*7c478bd9Sstevel@tonic-gate 
289*7c478bd9Sstevel@tonic-gate /*
290*7c478bd9Sstevel@tonic-gate  * Copy the data from the physical page represented by "frompp" to
291*7c478bd9Sstevel@tonic-gate  * that represented by "topp".
292*7c478bd9Sstevel@tonic-gate  *
293*7c478bd9Sstevel@tonic-gate  * Try to use per cpu mapping first, if that fails then call pp_mapin
294*7c478bd9Sstevel@tonic-gate  * to load it.
295*7c478bd9Sstevel@tonic-gate  */
296*7c478bd9Sstevel@tonic-gate void
297*7c478bd9Sstevel@tonic-gate ppcopy(page_t *fm_pp, page_t *to_pp)
298*7c478bd9Sstevel@tonic-gate {
299*7c478bd9Sstevel@tonic-gate 	caddr_t fm_va, to_va;
300*7c478bd9Sstevel@tonic-gate 
301*7c478bd9Sstevel@tonic-gate 	fm_va = ppmapin(fm_pp, PROT_READ, (caddr_t)-1);
302*7c478bd9Sstevel@tonic-gate 	to_va = ppmapin(to_pp, PROT_READ | PROT_WRITE, fm_va);
303*7c478bd9Sstevel@tonic-gate 	bcopy(fm_va, to_va, PAGESIZE);
304*7c478bd9Sstevel@tonic-gate 	ppmapout(fm_va);
305*7c478bd9Sstevel@tonic-gate 	ppmapout(to_va);
306*7c478bd9Sstevel@tonic-gate }
307*7c478bd9Sstevel@tonic-gate 
308*7c478bd9Sstevel@tonic-gate /*
309*7c478bd9Sstevel@tonic-gate  * Zero the physical page from off to off + len given by `pp'
310*7c478bd9Sstevel@tonic-gate  * without changing the reference and modified bits of page.
311*7c478bd9Sstevel@tonic-gate  *
312*7c478bd9Sstevel@tonic-gate  * Again, we'll try per cpu mapping first.
313*7c478bd9Sstevel@tonic-gate  */
314*7c478bd9Sstevel@tonic-gate void
315*7c478bd9Sstevel@tonic-gate pagezero(page_t *pp, uint_t off, uint_t len)
316*7c478bd9Sstevel@tonic-gate {
317*7c478bd9Sstevel@tonic-gate 	caddr_t va;
318*7c478bd9Sstevel@tonic-gate 	extern int hwblkclr(void *, size_t);
319*7c478bd9Sstevel@tonic-gate 	extern int use_hw_bzero;
320*7c478bd9Sstevel@tonic-gate 
321*7c478bd9Sstevel@tonic-gate 	ASSERT((int)len > 0 && (int)off >= 0 && off + len <= PAGESIZE);
322*7c478bd9Sstevel@tonic-gate 	ASSERT(PAGE_LOCKED(pp));
323*7c478bd9Sstevel@tonic-gate 
324*7c478bd9Sstevel@tonic-gate 	PP_STAT_ADD(ppzero);
325*7c478bd9Sstevel@tonic-gate 
326*7c478bd9Sstevel@tonic-gate 	if (len != MMU_PAGESIZE || !use_hw_bzero) {
327*7c478bd9Sstevel@tonic-gate 		PP_STAT_ADD(ppzero_short);
328*7c478bd9Sstevel@tonic-gate 	}
329*7c478bd9Sstevel@tonic-gate 
330*7c478bd9Sstevel@tonic-gate 	kpreempt_disable();
331*7c478bd9Sstevel@tonic-gate 
332*7c478bd9Sstevel@tonic-gate 	va = ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1);
333*7c478bd9Sstevel@tonic-gate 
334*7c478bd9Sstevel@tonic-gate 	if (!use_hw_bzero) {
335*7c478bd9Sstevel@tonic-gate 		bzero(va + off, len);
336*7c478bd9Sstevel@tonic-gate 		sync_icache(va + off, len);
337*7c478bd9Sstevel@tonic-gate 	} else if (hwblkclr(va + off, len)) {
338*7c478bd9Sstevel@tonic-gate 		/*
339*7c478bd9Sstevel@tonic-gate 		 * We may not have used block commit asi.
340*7c478bd9Sstevel@tonic-gate 		 * So flush the I-$ manually
341*7c478bd9Sstevel@tonic-gate 		 */
342*7c478bd9Sstevel@tonic-gate 		sync_icache(va + off, len);
343*7c478bd9Sstevel@tonic-gate 	} else {
344*7c478bd9Sstevel@tonic-gate 		/*
345*7c478bd9Sstevel@tonic-gate 		 * We have used blk commit, and flushed the I-$. However we
346*7c478bd9Sstevel@tonic-gate 		 * still may have an instruction in the pipeline. Only a flush
347*7c478bd9Sstevel@tonic-gate 		 * instruction will invalidate that.
348*7c478bd9Sstevel@tonic-gate 		 */
349*7c478bd9Sstevel@tonic-gate 		doflush(va);
350*7c478bd9Sstevel@tonic-gate 	}
351*7c478bd9Sstevel@tonic-gate 
352*7c478bd9Sstevel@tonic-gate 	ppmapout(va);
353*7c478bd9Sstevel@tonic-gate 	kpreempt_enable();
354*7c478bd9Sstevel@tonic-gate }
355