17c478bd9Sstevel@tonic-gate /* 27c478bd9Sstevel@tonic-gate * CDDL HEADER START 37c478bd9Sstevel@tonic-gate * 47c478bd9Sstevel@tonic-gate * The contents of this file are subject to the terms of the 54f4136d2Sjb145095 * Common Development and Distribution License (the "License"). 64f4136d2Sjb145095 * You may not use this file except in compliance with the License. 77c478bd9Sstevel@tonic-gate * 87c478bd9Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 97c478bd9Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 107c478bd9Sstevel@tonic-gate * See the License for the specific language governing permissions 117c478bd9Sstevel@tonic-gate * and limitations under the License. 127c478bd9Sstevel@tonic-gate * 137c478bd9Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 147c478bd9Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 157c478bd9Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 167c478bd9Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 177c478bd9Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 187c478bd9Sstevel@tonic-gate * 197c478bd9Sstevel@tonic-gate * CDDL HEADER END 207c478bd9Sstevel@tonic-gate */ 217c478bd9Sstevel@tonic-gate /* 22418e6a4eScb222892 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 237c478bd9Sstevel@tonic-gate * Use is subject to license terms. 247c478bd9Sstevel@tonic-gate */ 257c478bd9Sstevel@tonic-gate 267c478bd9Sstevel@tonic-gate #include <sys/types.h> 277c478bd9Sstevel@tonic-gate #include <sys/systm.h> 287c478bd9Sstevel@tonic-gate #include <sys/archsystm.h> 297c478bd9Sstevel@tonic-gate #include <sys/machsystm.h> 307c478bd9Sstevel@tonic-gate #include <sys/t_lock.h> 317c478bd9Sstevel@tonic-gate #include <sys/vmem.h> 327c478bd9Sstevel@tonic-gate #include <sys/mman.h> 337c478bd9Sstevel@tonic-gate #include <sys/vm.h> 347c478bd9Sstevel@tonic-gate #include <sys/cpu.h> 357c478bd9Sstevel@tonic-gate #include <sys/cmn_err.h> 367c478bd9Sstevel@tonic-gate #include <sys/cpuvar.h> 377c478bd9Sstevel@tonic-gate #include <sys/atomic.h> 387c478bd9Sstevel@tonic-gate #include <vm/as.h> 397c478bd9Sstevel@tonic-gate #include <vm/hat.h> 407c478bd9Sstevel@tonic-gate #include <vm/as.h> 417c478bd9Sstevel@tonic-gate #include <vm/page.h> 427c478bd9Sstevel@tonic-gate #include <vm/seg.h> 437c478bd9Sstevel@tonic-gate #include <vm/seg_kmem.h> 44418e6a4eScb222892 #include <vm/seg_kpm.h> 457c478bd9Sstevel@tonic-gate #include <vm/hat_sfmmu.h> 467c478bd9Sstevel@tonic-gate #include <sys/debug.h> 477c478bd9Sstevel@tonic-gate #include <sys/cpu_module.h> 487c478bd9Sstevel@tonic-gate 497c478bd9Sstevel@tonic-gate /* 507c478bd9Sstevel@tonic-gate * A quick way to generate a cache consistent address to map in a page. 517c478bd9Sstevel@tonic-gate * users: ppcopy, pagezero, /proc, dev/mem 527c478bd9Sstevel@tonic-gate * 537c478bd9Sstevel@tonic-gate * The ppmapin/ppmapout routines provide a quick way of generating a cache 547c478bd9Sstevel@tonic-gate * consistent address by reserving a given amount of kernel address space. 557c478bd9Sstevel@tonic-gate * The base is PPMAPBASE and its size is PPMAPSIZE. This memory is divided 567c478bd9Sstevel@tonic-gate * into x number of sets, where x is the number of colors for the virtual 577c478bd9Sstevel@tonic-gate * cache. The number of colors is how many times a page can be mapped 587c478bd9Sstevel@tonic-gate * simulatenously in the cache. For direct map caches this translates to 597c478bd9Sstevel@tonic-gate * the number of pages in the cache. 607c478bd9Sstevel@tonic-gate * Each set will be assigned a group of virtual pages from the reserved memory 617c478bd9Sstevel@tonic-gate * depending on its virtual color. 627c478bd9Sstevel@tonic-gate * When trying to assign a virtual address we will find out the color for the 637c478bd9Sstevel@tonic-gate * physical page in question (if applicable). Then we will try to find an 647c478bd9Sstevel@tonic-gate * available virtual page from the set of the appropiate color. 657c478bd9Sstevel@tonic-gate */ 667c478bd9Sstevel@tonic-gate 677c478bd9Sstevel@tonic-gate int pp_slots = 4; /* small default, tuned by cpu module */ 687c478bd9Sstevel@tonic-gate 697c478bd9Sstevel@tonic-gate /* tuned by cpu module, default is "safe" */ 707c478bd9Sstevel@tonic-gate int pp_consistent_coloring = PPAGE_STORES_POLLUTE | PPAGE_LOADS_POLLUTE; 717c478bd9Sstevel@tonic-gate 727c478bd9Sstevel@tonic-gate static caddr_t ppmap_vaddrs[PPMAPSIZE / MMU_PAGESIZE]; 737c478bd9Sstevel@tonic-gate static int nsets; /* number of sets */ 747c478bd9Sstevel@tonic-gate static int ppmap_shift; /* set selector */ 757c478bd9Sstevel@tonic-gate 767c478bd9Sstevel@tonic-gate #ifdef PPDEBUG 777c478bd9Sstevel@tonic-gate #define MAXCOLORS 16 /* for debug only */ 787c478bd9Sstevel@tonic-gate static int ppalloc_noslot = 0; /* # of allocations from kernelmap */ 79fedab560Sae112802 static int align_hits; 807c478bd9Sstevel@tonic-gate static int pp_allocs; /* # of ppmapin requests */ 817c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */ 827c478bd9Sstevel@tonic-gate 837c478bd9Sstevel@tonic-gate /* 847c478bd9Sstevel@tonic-gate * There are only 64 TLB entries on spitfire, 16 on cheetah 857c478bd9Sstevel@tonic-gate * (fully-associative TLB) so we allow the cpu module to tune the 867c478bd9Sstevel@tonic-gate * number to use here via pp_slots. 877c478bd9Sstevel@tonic-gate */ 887c478bd9Sstevel@tonic-gate static struct ppmap_va { 897c478bd9Sstevel@tonic-gate caddr_t ppmap_slots[MAXPP_SLOTS]; 907c478bd9Sstevel@tonic-gate } ppmap_va[NCPU]; 917c478bd9Sstevel@tonic-gate 92fedab560Sae112802 /* prevent compilation with VAC defined */ 93fedab560Sae112802 #ifdef VAC 94fedab560Sae112802 #error "sun4v ppmapin and ppmapout do not support VAC" 95fedab560Sae112802 #endif 96fedab560Sae112802 977c478bd9Sstevel@tonic-gate void 987c478bd9Sstevel@tonic-gate ppmapinit(void) 997c478bd9Sstevel@tonic-gate { 100fedab560Sae112802 int nset; 1017c478bd9Sstevel@tonic-gate caddr_t va; 1027c478bd9Sstevel@tonic-gate 1037c478bd9Sstevel@tonic-gate ASSERT(pp_slots <= MAXPP_SLOTS); 1047c478bd9Sstevel@tonic-gate 1057c478bd9Sstevel@tonic-gate va = (caddr_t)PPMAPBASE; 1067c478bd9Sstevel@tonic-gate 1077c478bd9Sstevel@tonic-gate /* 108fedab560Sae112802 * sun4v does not have a virtual indexed cache and simply 109fedab560Sae112802 * has only one set containing all pages. 1107c478bd9Sstevel@tonic-gate */ 1117c478bd9Sstevel@tonic-gate nsets = mmu_btop(PPMAPSIZE); 1127c478bd9Sstevel@tonic-gate ppmap_shift = MMU_PAGESHIFT; 113fedab560Sae112802 1147c478bd9Sstevel@tonic-gate for (nset = 0; nset < nsets; nset++) { 115fedab560Sae112802 ppmap_vaddrs[nset] = 116fedab560Sae112802 (caddr_t)((uintptr_t)va + (nset * MMU_PAGESIZE)); 1177c478bd9Sstevel@tonic-gate } 1187c478bd9Sstevel@tonic-gate } 1197c478bd9Sstevel@tonic-gate 1207c478bd9Sstevel@tonic-gate /* 1217c478bd9Sstevel@tonic-gate * Allocate a cache consistent virtual address to map a page, pp, 1227c478bd9Sstevel@tonic-gate * with protection, vprot; and map it in the MMU, using the most 1237c478bd9Sstevel@tonic-gate * efficient means possible. The argument avoid is a virtual address 1247c478bd9Sstevel@tonic-gate * hint which when masked yields an offset into a virtual cache 1257c478bd9Sstevel@tonic-gate * that should be avoided when allocating an address to map in a 1267c478bd9Sstevel@tonic-gate * page. An avoid arg of -1 means you don't care, for instance pagezero. 1277c478bd9Sstevel@tonic-gate * 1287c478bd9Sstevel@tonic-gate * machine dependent, depends on virtual address space layout, 1297c478bd9Sstevel@tonic-gate * understands that all kernel addresses have bit 31 set. 1307c478bd9Sstevel@tonic-gate * 1317c478bd9Sstevel@tonic-gate * NOTE: For sun4 platforms the meaning of the hint argument is opposite from 1327c478bd9Sstevel@tonic-gate * that found in other architectures. In other architectures the hint 1337c478bd9Sstevel@tonic-gate * (called avoid) was used to ask ppmapin to NOT use the specified cache color. 1347c478bd9Sstevel@tonic-gate * This was used to avoid virtual cache trashing in the bcopy. Unfortunately 1357c478bd9Sstevel@tonic-gate * in the case of a COW, this later on caused a cache aliasing conflict. In 1367c478bd9Sstevel@tonic-gate * sun4, the bcopy routine uses the block ld/st instructions so we don't have 1377c478bd9Sstevel@tonic-gate * to worry about virtual cache trashing. Actually, by using the hint to choose 1387c478bd9Sstevel@tonic-gate * the right color we can almost guarantee a cache conflict will not occur. 1397c478bd9Sstevel@tonic-gate */ 1407c478bd9Sstevel@tonic-gate 141fedab560Sae112802 /*ARGSUSED2*/ 1427c478bd9Sstevel@tonic-gate caddr_t 1437c478bd9Sstevel@tonic-gate ppmapin(page_t *pp, uint_t vprot, caddr_t hint) 1447c478bd9Sstevel@tonic-gate { 145fedab560Sae112802 int nset; 1467c478bd9Sstevel@tonic-gate caddr_t va; 1477c478bd9Sstevel@tonic-gate 1487c478bd9Sstevel@tonic-gate #ifdef PPDEBUG 1497c478bd9Sstevel@tonic-gate pp_allocs++; 1507c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */ 1517c478bd9Sstevel@tonic-gate 1527c478bd9Sstevel@tonic-gate /* 153fedab560Sae112802 * For sun4v caches are physical caches, we can pick any address 154fedab560Sae112802 * we want. 1557c478bd9Sstevel@tonic-gate */ 1567c478bd9Sstevel@tonic-gate for (nset = 0; nset < nsets; nset++) { 157fedab560Sae112802 va = ppmap_vaddrs[nset]; 1587c478bd9Sstevel@tonic-gate if (va != NULL) { 1597c478bd9Sstevel@tonic-gate #ifdef PPDEBUG 160fedab560Sae112802 align_hits++; 1617c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */ 162*75d94465SJosef 'Jeff' Sipek if (atomic_cas_ptr(&ppmap_vaddrs[nset], va, NULL) == 163*75d94465SJosef 'Jeff' Sipek va) { 1647c478bd9Sstevel@tonic-gate hat_memload(kas.a_hat, va, pp, 1657c478bd9Sstevel@tonic-gate vprot | HAT_NOSYNC, 1667c478bd9Sstevel@tonic-gate HAT_LOAD_LOCK); 1677c478bd9Sstevel@tonic-gate return (va); 1687c478bd9Sstevel@tonic-gate } 1697c478bd9Sstevel@tonic-gate } 1707c478bd9Sstevel@tonic-gate } 1717c478bd9Sstevel@tonic-gate 1727c478bd9Sstevel@tonic-gate #ifdef PPDEBUG 1737c478bd9Sstevel@tonic-gate ppalloc_noslot++; 1747c478bd9Sstevel@tonic-gate #endif /* PPDEBUG */ 1757c478bd9Sstevel@tonic-gate 1767c478bd9Sstevel@tonic-gate /* 1777c478bd9Sstevel@tonic-gate * No free slots; get a random one from the kernel heap area. 1787c478bd9Sstevel@tonic-gate */ 1797c478bd9Sstevel@tonic-gate va = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP); 1807c478bd9Sstevel@tonic-gate 1817c478bd9Sstevel@tonic-gate hat_memload(kas.a_hat, va, pp, vprot | HAT_NOSYNC, HAT_LOAD_LOCK); 1827c478bd9Sstevel@tonic-gate 1837c478bd9Sstevel@tonic-gate return (va); 1847c478bd9Sstevel@tonic-gate 1857c478bd9Sstevel@tonic-gate } 1867c478bd9Sstevel@tonic-gate 1877c478bd9Sstevel@tonic-gate void 1887c478bd9Sstevel@tonic-gate ppmapout(caddr_t va) 1897c478bd9Sstevel@tonic-gate { 190fedab560Sae112802 int nset; 1917c478bd9Sstevel@tonic-gate 1927c478bd9Sstevel@tonic-gate if (va >= kernelheap && va < ekernelheap) { 1937c478bd9Sstevel@tonic-gate /* 1947c478bd9Sstevel@tonic-gate * Space came from kernelmap, flush the page and 1957c478bd9Sstevel@tonic-gate * return the space. 1967c478bd9Sstevel@tonic-gate */ 1977c478bd9Sstevel@tonic-gate hat_unload(kas.a_hat, va, PAGESIZE, 1987c478bd9Sstevel@tonic-gate (HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK)); 1997c478bd9Sstevel@tonic-gate vmem_free(heap_arena, va, PAGESIZE); 2007c478bd9Sstevel@tonic-gate } else { 2017c478bd9Sstevel@tonic-gate /* 2027c478bd9Sstevel@tonic-gate * Space came from ppmap_vaddrs[], give it back. 2037c478bd9Sstevel@tonic-gate */ 2047c478bd9Sstevel@tonic-gate nset = ((uintptr_t)va >> ppmap_shift) & (nsets - 1); 2057c478bd9Sstevel@tonic-gate hat_unload(kas.a_hat, va, PAGESIZE, 2067c478bd9Sstevel@tonic-gate (HAT_UNLOAD_NOSYNC | HAT_UNLOAD_UNLOCK)); 2077c478bd9Sstevel@tonic-gate 208fedab560Sae112802 ASSERT(ppmap_vaddrs[nset] == NULL); 209fedab560Sae112802 ppmap_vaddrs[nset] = va; 2107c478bd9Sstevel@tonic-gate } 2117c478bd9Sstevel@tonic-gate } 2127c478bd9Sstevel@tonic-gate 2137c478bd9Sstevel@tonic-gate #ifdef DEBUG 2147c478bd9Sstevel@tonic-gate #define PP_STAT_ADD(stat) (stat)++ 2157c478bd9Sstevel@tonic-gate uint_t pload, ploadfail; 2167c478bd9Sstevel@tonic-gate uint_t ppzero, ppzero_short; 2177c478bd9Sstevel@tonic-gate #else 2187c478bd9Sstevel@tonic-gate #define PP_STAT_ADD(stat) 2197c478bd9Sstevel@tonic-gate #endif /* DEBUG */ 2207c478bd9Sstevel@tonic-gate 2217c478bd9Sstevel@tonic-gate static void 2227c478bd9Sstevel@tonic-gate pp_unload_tlb(caddr_t *pslot, caddr_t va) 2237c478bd9Sstevel@tonic-gate { 2247c478bd9Sstevel@tonic-gate ASSERT(*pslot == va); 2257c478bd9Sstevel@tonic-gate 2261e2e7a75Shuah vtag_flushpage(va, (uint64_t)ksfmmup); 2277c478bd9Sstevel@tonic-gate *pslot = NULL; /* release the slot */ 2287c478bd9Sstevel@tonic-gate } 2297c478bd9Sstevel@tonic-gate 2307c478bd9Sstevel@tonic-gate /* 2317c478bd9Sstevel@tonic-gate * Routine to copy kernel pages during relocation. It will copy one 2327c478bd9Sstevel@tonic-gate * PAGESIZE page to another PAGESIZE page. This function may be called 2337c478bd9Sstevel@tonic-gate * above LOCK_LEVEL so it should not grab any locks. 2347c478bd9Sstevel@tonic-gate */ 2357c478bd9Sstevel@tonic-gate void 2367c478bd9Sstevel@tonic-gate ppcopy_kernel__relocatable(page_t *fm_pp, page_t *to_pp) 2377c478bd9Sstevel@tonic-gate { 2387c478bd9Sstevel@tonic-gate uint64_t fm_pa, to_pa; 2397c478bd9Sstevel@tonic-gate size_t nbytes; 2407c478bd9Sstevel@tonic-gate 2417c478bd9Sstevel@tonic-gate fm_pa = (uint64_t)(fm_pp->p_pagenum) << MMU_PAGESHIFT; 2427c478bd9Sstevel@tonic-gate to_pa = (uint64_t)(to_pp->p_pagenum) << MMU_PAGESHIFT; 2437c478bd9Sstevel@tonic-gate 2447c478bd9Sstevel@tonic-gate nbytes = MMU_PAGESIZE; 2457c478bd9Sstevel@tonic-gate 2467c478bd9Sstevel@tonic-gate for (; nbytes > 0; fm_pa += 32, to_pa += 32, nbytes -= 32) 2477c478bd9Sstevel@tonic-gate hw_pa_bcopy32(fm_pa, to_pa); 2487c478bd9Sstevel@tonic-gate } 2497c478bd9Sstevel@tonic-gate 2507c478bd9Sstevel@tonic-gate /* 2517c478bd9Sstevel@tonic-gate * Copy the data from the physical page represented by "frompp" to 2527c478bd9Sstevel@tonic-gate * that represented by "topp". 2537c478bd9Sstevel@tonic-gate * 2547c478bd9Sstevel@tonic-gate * Try to use per cpu mapping first, if that fails then call pp_mapin 2557c478bd9Sstevel@tonic-gate * to load it. 2568b464eb8Smec * Returns one on success or zero on some sort of fault while doing the copy. 2577c478bd9Sstevel@tonic-gate */ 2588b464eb8Smec int 2597c478bd9Sstevel@tonic-gate ppcopy(page_t *fm_pp, page_t *to_pp) 2607c478bd9Sstevel@tonic-gate { 261418e6a4eScb222892 caddr_t fm_va = NULL; 2624f4136d2Sjb145095 caddr_t to_va; 2634f4136d2Sjb145095 boolean_t fast; 2648b464eb8Smec label_t ljb; 2658b464eb8Smec int ret = 1; 2667c478bd9Sstevel@tonic-gate 2674f4136d2Sjb145095 ASSERT(PAGE_LOCKED(fm_pp)); 2684f4136d2Sjb145095 ASSERT(PAGE_LOCKED(to_pp)); 2694f4136d2Sjb145095 2704f4136d2Sjb145095 /* 271418e6a4eScb222892 * Try to map using KPM if enabled. If it fails, fall 272418e6a4eScb222892 * back to ppmapin/ppmapout. 2734f4136d2Sjb145095 */ 274418e6a4eScb222892 if ((kpm_enable == 0) || 275418e6a4eScb222892 (fm_va = hat_kpm_mapin(fm_pp, NULL)) == NULL || 2764f4136d2Sjb145095 (to_va = hat_kpm_mapin(to_pp, NULL)) == NULL) { 2774f4136d2Sjb145095 if (fm_va != NULL) 2784f4136d2Sjb145095 hat_kpm_mapout(fm_pp, NULL, fm_va); 2797c478bd9Sstevel@tonic-gate fm_va = ppmapin(fm_pp, PROT_READ, (caddr_t)-1); 2807c478bd9Sstevel@tonic-gate to_va = ppmapin(to_pp, PROT_READ | PROT_WRITE, fm_va); 2814f4136d2Sjb145095 fast = B_FALSE; 2824f4136d2Sjb145095 } else 2834f4136d2Sjb145095 fast = B_TRUE; 2844f4136d2Sjb145095 2858b464eb8Smec if (on_fault(&ljb)) { 2868b464eb8Smec ret = 0; 2878b464eb8Smec goto faulted; 2888b464eb8Smec } 2897c478bd9Sstevel@tonic-gate bcopy(fm_va, to_va, PAGESIZE); 2908b464eb8Smec no_fault(); 2918b464eb8Smec faulted: 2924f4136d2Sjb145095 2934f4136d2Sjb145095 /* Unmap */ 2944f4136d2Sjb145095 if (fast) { 2954f4136d2Sjb145095 hat_kpm_mapout(fm_pp, NULL, fm_va); 2964f4136d2Sjb145095 hat_kpm_mapout(to_pp, NULL, to_va); 2974f4136d2Sjb145095 } else { 2987c478bd9Sstevel@tonic-gate ppmapout(fm_va); 2997c478bd9Sstevel@tonic-gate ppmapout(to_va); 3007c478bd9Sstevel@tonic-gate } 3018b464eb8Smec return (ret); 3024f4136d2Sjb145095 } 3037c478bd9Sstevel@tonic-gate 3047c478bd9Sstevel@tonic-gate /* 3057c478bd9Sstevel@tonic-gate * Zero the physical page from off to off + len given by `pp' 3067c478bd9Sstevel@tonic-gate * without changing the reference and modified bits of page. 3077c478bd9Sstevel@tonic-gate * 3087c478bd9Sstevel@tonic-gate * Again, we'll try per cpu mapping first. 3097c478bd9Sstevel@tonic-gate */ 3104f4136d2Sjb145095 3117c478bd9Sstevel@tonic-gate void 3127c478bd9Sstevel@tonic-gate pagezero(page_t *pp, uint_t off, uint_t len) 3137c478bd9Sstevel@tonic-gate { 3147c478bd9Sstevel@tonic-gate caddr_t va; 3157c478bd9Sstevel@tonic-gate extern int hwblkclr(void *, size_t); 3167c478bd9Sstevel@tonic-gate extern int use_hw_bzero; 3174f4136d2Sjb145095 boolean_t fast; 3187c478bd9Sstevel@tonic-gate 3197c478bd9Sstevel@tonic-gate ASSERT((int)len > 0 && (int)off >= 0 && off + len <= PAGESIZE); 3207c478bd9Sstevel@tonic-gate ASSERT(PAGE_LOCKED(pp)); 3217c478bd9Sstevel@tonic-gate 3227c478bd9Sstevel@tonic-gate PP_STAT_ADD(ppzero); 3237c478bd9Sstevel@tonic-gate 3247c478bd9Sstevel@tonic-gate if (len != MMU_PAGESIZE || !use_hw_bzero) { 3257c478bd9Sstevel@tonic-gate PP_STAT_ADD(ppzero_short); 3267c478bd9Sstevel@tonic-gate } 3277c478bd9Sstevel@tonic-gate 3287c478bd9Sstevel@tonic-gate kpreempt_disable(); 3297c478bd9Sstevel@tonic-gate 3304f4136d2Sjb145095 /* 331418e6a4eScb222892 * Try to use KPM if enabled. If that fails, fall back to 3324f4136d2Sjb145095 * ppmapin/ppmapout. 3334f4136d2Sjb145095 */ 334418e6a4eScb222892 335418e6a4eScb222892 if (kpm_enable != 0) { 3364f4136d2Sjb145095 fast = B_TRUE; 3374f4136d2Sjb145095 va = hat_kpm_mapin(pp, NULL); 338418e6a4eScb222892 } else 339418e6a4eScb222892 va = NULL; 340418e6a4eScb222892 3414f4136d2Sjb145095 if (va == NULL) { 3424f4136d2Sjb145095 fast = B_FALSE; 3437c478bd9Sstevel@tonic-gate va = ppmapin(pp, PROT_READ | PROT_WRITE, (caddr_t)-1); 3444f4136d2Sjb145095 } 3457c478bd9Sstevel@tonic-gate 3467c478bd9Sstevel@tonic-gate if (!use_hw_bzero) { 3477c478bd9Sstevel@tonic-gate bzero(va + off, len); 3487c478bd9Sstevel@tonic-gate sync_icache(va + off, len); 3497c478bd9Sstevel@tonic-gate } else if (hwblkclr(va + off, len)) { 3507c478bd9Sstevel@tonic-gate /* 3517c478bd9Sstevel@tonic-gate * We may not have used block commit asi. 3527c478bd9Sstevel@tonic-gate * So flush the I-$ manually 3537c478bd9Sstevel@tonic-gate */ 3547c478bd9Sstevel@tonic-gate sync_icache(va + off, len); 3557c478bd9Sstevel@tonic-gate } else { 3567c478bd9Sstevel@tonic-gate /* 3574f4136d2Sjb145095 * We have used blk commit, and flushed the I-$. 3584f4136d2Sjb145095 * However we still may have an instruction in the 3594f4136d2Sjb145095 * pipeline. Only a flush will invalidate that. 3607c478bd9Sstevel@tonic-gate */ 3617c478bd9Sstevel@tonic-gate doflush(va); 3627c478bd9Sstevel@tonic-gate } 3637c478bd9Sstevel@tonic-gate 3644f4136d2Sjb145095 if (fast) { 3654f4136d2Sjb145095 hat_kpm_mapout(pp, NULL, va); 3664f4136d2Sjb145095 } else { 3677c478bd9Sstevel@tonic-gate ppmapout(va); 3684f4136d2Sjb145095 } 3697c478bd9Sstevel@tonic-gate kpreempt_enable(); 3707c478bd9Sstevel@tonic-gate } 371