1fedab560Sae112802 /*
2fedab560Sae112802 * CDDL HEADER START
3fedab560Sae112802 *
4fedab560Sae112802 * The contents of this file are subject to the terms of the
5fedab560Sae112802 * Common Development and Distribution License (the "License").
6fedab560Sae112802 * You may not use this file except in compliance with the License.
7fedab560Sae112802 *
8fedab560Sae112802 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9fedab560Sae112802 * or http://www.opensolaris.org/os/licensing.
10fedab560Sae112802 * See the License for the specific language governing permissions
11fedab560Sae112802 * and limitations under the License.
12fedab560Sae112802 *
13fedab560Sae112802 * When distributing Covered Code, include this CDDL HEADER in each
14fedab560Sae112802 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15fedab560Sae112802 * If applicable, add the following below this CDDL HEADER, with the
16fedab560Sae112802 * fields enclosed by brackets "[]" replaced with your own identifying
17fedab560Sae112802 * information: Portions Copyright [yyyy] [name of copyright owner]
18fedab560Sae112802 *
19fedab560Sae112802 * CDDL HEADER END
20fedab560Sae112802 */
21fedab560Sae112802 /*
22*d20abfaaSPavel Tatashin * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23fedab560Sae112802 * Use is subject to license terms.
24fedab560Sae112802 */
25fedab560Sae112802
26fedab560Sae112802 /*
27fedab560Sae112802 * Kernel Physical Mapping (segkpm) hat interface routines for sun4u.
28fedab560Sae112802 */
29fedab560Sae112802
30fedab560Sae112802 #include <sys/types.h>
31fedab560Sae112802 #include <vm/hat.h>
32fedab560Sae112802 #include <vm/hat_sfmmu.h>
33fedab560Sae112802 #include <vm/page.h>
34fedab560Sae112802 #include <sys/sysmacros.h>
35fedab560Sae112802 #include <sys/cmn_err.h>
36fedab560Sae112802 #include <sys/machsystm.h>
37fedab560Sae112802 #include <vm/seg_kpm.h>
38fedab560Sae112802 #include <sys/cpu_module.h>
39fedab560Sae112802 #include <vm/mach_kpm.h>
40fedab560Sae112802
41fedab560Sae112802 /* kpm prototypes */
42fedab560Sae112802 static caddr_t sfmmu_kpm_mapin(page_t *);
43fedab560Sae112802 static void sfmmu_kpm_mapout(page_t *, caddr_t);
44fedab560Sae112802 static int sfmmu_kpme_lookup(struct kpme *, page_t *);
45fedab560Sae112802 static void sfmmu_kpme_add(struct kpme *, page_t *);
46fedab560Sae112802 static void sfmmu_kpme_sub(struct kpme *, page_t *);
47fedab560Sae112802 static caddr_t sfmmu_kpm_getvaddr(page_t *, int *);
48fedab560Sae112802 static int sfmmu_kpm_fault(caddr_t, struct memseg *, page_t *);
49fedab560Sae112802 static int sfmmu_kpm_fault_small(caddr_t, struct memseg *, page_t *);
50fedab560Sae112802 static void sfmmu_kpm_vac_conflict(page_t *, caddr_t);
51fedab560Sae112802 void sfmmu_kpm_pageunload(page_t *);
52fedab560Sae112802 void sfmmu_kpm_vac_unload(page_t *, caddr_t);
53fedab560Sae112802 static void sfmmu_kpm_demap_large(caddr_t);
54fedab560Sae112802 static void sfmmu_kpm_demap_small(caddr_t);
55fedab560Sae112802 static void sfmmu_kpm_demap_tlbs(caddr_t);
56fedab560Sae112802 void sfmmu_kpm_hme_unload(page_t *);
57fedab560Sae112802 kpm_hlk_t *sfmmu_kpm_kpmp_enter(page_t *, pgcnt_t);
58fedab560Sae112802 void sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp);
59fedab560Sae112802 void sfmmu_kpm_page_cache(page_t *, int, int);
60fedab560Sae112802
61*d20abfaaSPavel Tatashin extern uint_t vac_colors;
62*d20abfaaSPavel Tatashin
63fedab560Sae112802 /*
64fedab560Sae112802 * Kernel Physical Mapping (kpm) facility
65fedab560Sae112802 */
66fedab560Sae112802
67fedab560Sae112802 void
mach_kpm_init()68fedab560Sae112802 mach_kpm_init()
69fedab560Sae112802 {}
70fedab560Sae112802
71fedab560Sae112802 /* -- hat_kpm interface section -- */
72fedab560Sae112802
73fedab560Sae112802 /*
74fedab560Sae112802 * Mapin a locked page and return the vaddr.
75fedab560Sae112802 * When a kpme is provided by the caller it is added to
76fedab560Sae112802 * the page p_kpmelist. The page to be mapped in must
77fedab560Sae112802 * be at least read locked (p_selock).
78fedab560Sae112802 */
79fedab560Sae112802 caddr_t
hat_kpm_mapin(struct page * pp,struct kpme * kpme)80fedab560Sae112802 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
81fedab560Sae112802 {
82fedab560Sae112802 kmutex_t *pml;
83fedab560Sae112802 caddr_t vaddr;
84fedab560Sae112802
85fedab560Sae112802 if (kpm_enable == 0) {
86fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set");
87fedab560Sae112802 return ((caddr_t)NULL);
88fedab560Sae112802 }
89fedab560Sae112802
90fedab560Sae112802 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
91fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked");
92fedab560Sae112802 return ((caddr_t)NULL);
93fedab560Sae112802 }
94fedab560Sae112802
95fedab560Sae112802 pml = sfmmu_mlist_enter(pp);
96fedab560Sae112802 ASSERT(pp->p_kpmref >= 0);
97fedab560Sae112802
98fedab560Sae112802 vaddr = (pp->p_kpmref == 0) ?
99fedab560Sae112802 sfmmu_kpm_mapin(pp) : hat_kpm_page2va(pp, 1);
100fedab560Sae112802
101fedab560Sae112802 if (kpme != NULL) {
102fedab560Sae112802 /*
103fedab560Sae112802 * Tolerate multiple mapins for the same kpme to avoid
104fedab560Sae112802 * the need for an extra serialization.
105fedab560Sae112802 */
106fedab560Sae112802 if ((sfmmu_kpme_lookup(kpme, pp)) == 0)
107fedab560Sae112802 sfmmu_kpme_add(kpme, pp);
108fedab560Sae112802
109fedab560Sae112802 ASSERT(pp->p_kpmref > 0);
110fedab560Sae112802
111fedab560Sae112802 } else {
112fedab560Sae112802 pp->p_kpmref++;
113fedab560Sae112802 }
114fedab560Sae112802
115fedab560Sae112802 sfmmu_mlist_exit(pml);
116fedab560Sae112802 return (vaddr);
117fedab560Sae112802 }
118fedab560Sae112802
119fedab560Sae112802 /*
120fedab560Sae112802 * Mapout a locked page.
121fedab560Sae112802 * When a kpme is provided by the caller it is removed from
122fedab560Sae112802 * the page p_kpmelist. The page to be mapped out must be at
123fedab560Sae112802 * least read locked (p_selock).
124fedab560Sae112802 * Note: The seg_kpm layer provides a mapout interface for the
125fedab560Sae112802 * case that a kpme is used and the underlying page is unlocked.
126fedab560Sae112802 * This can be used instead of calling this function directly.
127fedab560Sae112802 */
128fedab560Sae112802 void
hat_kpm_mapout(struct page * pp,struct kpme * kpme,caddr_t vaddr)129fedab560Sae112802 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
130fedab560Sae112802 {
131fedab560Sae112802 kmutex_t *pml;
132fedab560Sae112802
133fedab560Sae112802 if (kpm_enable == 0) {
134fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set");
135fedab560Sae112802 return;
136fedab560Sae112802 }
137fedab560Sae112802
138fedab560Sae112802 if (IS_KPM_ADDR(vaddr) == 0) {
139fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address");
140fedab560Sae112802 return;
141fedab560Sae112802 }
142fedab560Sae112802
143fedab560Sae112802 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
144fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked");
145fedab560Sae112802 return;
146fedab560Sae112802 }
147fedab560Sae112802
148fedab560Sae112802 if (kpme != NULL) {
149fedab560Sae112802 ASSERT(pp == kpme->kpe_page);
150fedab560Sae112802 pp = kpme->kpe_page;
151fedab560Sae112802 pml = sfmmu_mlist_enter(pp);
152fedab560Sae112802
153fedab560Sae112802 if (sfmmu_kpme_lookup(kpme, pp) == 0)
154fedab560Sae112802 panic("hat_kpm_mapout: kpme not found pp=%p",
155fedab560Sae112802 (void *)pp);
156fedab560Sae112802
157fedab560Sae112802 ASSERT(pp->p_kpmref > 0);
158fedab560Sae112802 sfmmu_kpme_sub(kpme, pp);
159fedab560Sae112802
160fedab560Sae112802 } else {
161fedab560Sae112802 pml = sfmmu_mlist_enter(pp);
162fedab560Sae112802 pp->p_kpmref--;
163fedab560Sae112802 }
164fedab560Sae112802
165fedab560Sae112802 ASSERT(pp->p_kpmref >= 0);
166fedab560Sae112802 if (pp->p_kpmref == 0)
167fedab560Sae112802 sfmmu_kpm_mapout(pp, vaddr);
168fedab560Sae112802
169fedab560Sae112802 sfmmu_mlist_exit(pml);
170fedab560Sae112802 }
171fedab560Sae112802
172fedab560Sae112802 /*
173*d20abfaaSPavel Tatashin * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
174*d20abfaaSPavel Tatashin * memory addresses that are not described by a page_t. It can
175*d20abfaaSPavel Tatashin * only be supported if vac_colors=1, because there is no page_t
176*d20abfaaSPavel Tatashin * and corresponding kpm_page_t to track VAC conflicts. Currently,
177*d20abfaaSPavel Tatashin * this may not be used on pfn's backed by page_t's, because the
178*d20abfaaSPavel Tatashin * kpm state may not be consistent in hat_kpm_fault if the page is
179*d20abfaaSPavel Tatashin * mapped using both this routine and hat_kpm_mapin. KPM should be
180*d20abfaaSPavel Tatashin * cleaned up on sun4u/vac_colors=1 to be minimal as on sun4v.
181*d20abfaaSPavel Tatashin * The caller must only pass pfn's for valid physical addresses; violation
182*d20abfaaSPavel Tatashin * of this rule will cause panic.
183*d20abfaaSPavel Tatashin */
184*d20abfaaSPavel Tatashin caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)185*d20abfaaSPavel Tatashin hat_kpm_mapin_pfn(pfn_t pfn)
186*d20abfaaSPavel Tatashin {
187*d20abfaaSPavel Tatashin caddr_t paddr, vaddr;
188*d20abfaaSPavel Tatashin tte_t tte;
189*d20abfaaSPavel Tatashin uint_t szc = kpm_smallpages ? TTE8K : TTE4M;
190*d20abfaaSPavel Tatashin uint_t shift = kpm_smallpages ? MMU_PAGESHIFT : MMU_PAGESHIFT4M;
191*d20abfaaSPavel Tatashin
192*d20abfaaSPavel Tatashin if (kpm_enable == 0 || vac_colors > 1 ||
193*d20abfaaSPavel Tatashin page_numtomemseg_nolock(pfn) != NULL)
194*d20abfaaSPavel Tatashin return ((caddr_t)NULL);
195*d20abfaaSPavel Tatashin
196*d20abfaaSPavel Tatashin paddr = (caddr_t)ptob(pfn);
197*d20abfaaSPavel Tatashin vaddr = (uintptr_t)kpm_vbase + paddr;
198*d20abfaaSPavel Tatashin
199*d20abfaaSPavel Tatashin KPM_TTE_VCACHED(tte.ll, pfn, szc);
200*d20abfaaSPavel Tatashin sfmmu_kpm_load_tsb(vaddr, &tte, shift);
201*d20abfaaSPavel Tatashin
202*d20abfaaSPavel Tatashin return (vaddr);
203*d20abfaaSPavel Tatashin }
204*d20abfaaSPavel Tatashin
205*d20abfaaSPavel Tatashin /*ARGSUSED*/
206*d20abfaaSPavel Tatashin void
hat_kpm_mapout_pfn(pfn_t pfn)207*d20abfaaSPavel Tatashin hat_kpm_mapout_pfn(pfn_t pfn)
208*d20abfaaSPavel Tatashin {
209*d20abfaaSPavel Tatashin /* empty */
210*d20abfaaSPavel Tatashin }
211*d20abfaaSPavel Tatashin
212*d20abfaaSPavel Tatashin /*
213fedab560Sae112802 * Return the kpm virtual address for the page at pp.
214fedab560Sae112802 * If checkswap is non zero and the page is backed by a
215fedab560Sae112802 * swap vnode the physical address is used rather than
216fedab560Sae112802 * p_offset to determine the kpm region.
217fedab560Sae112802 * Note: The function has to be used w/ extreme care. The
218fedab560Sae112802 * stability of the page identity is in the responsibility
219fedab560Sae112802 * of the caller.
220fedab560Sae112802 */
221fedab560Sae112802 /*ARGSUSED*/
222fedab560Sae112802 caddr_t
hat_kpm_page2va(struct page * pp,int checkswap)223fedab560Sae112802 hat_kpm_page2va(struct page *pp, int checkswap)
224fedab560Sae112802 {
225fedab560Sae112802 int vcolor, vcolor_pa;
226fedab560Sae112802 uintptr_t paddr, vaddr;
227fedab560Sae112802
228fedab560Sae112802 ASSERT(kpm_enable);
229fedab560Sae112802
230fedab560Sae112802 paddr = ptob(pp->p_pagenum);
231fedab560Sae112802 vcolor_pa = addr_to_vcolor(paddr);
232fedab560Sae112802
233fedab560Sae112802 if (checkswap && pp->p_vnode && IS_SWAPFSVP(pp->p_vnode))
234fedab560Sae112802 vcolor = (PP_ISNC(pp)) ? vcolor_pa : PP_GET_VCOLOR(pp);
235fedab560Sae112802 else
236fedab560Sae112802 vcolor = addr_to_vcolor(pp->p_offset);
237fedab560Sae112802
238fedab560Sae112802 vaddr = (uintptr_t)kpm_vbase + paddr;
239fedab560Sae112802
240fedab560Sae112802 if (vcolor_pa != vcolor) {
241fedab560Sae112802 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
242fedab560Sae112802 vaddr += (vcolor_pa > vcolor) ?
243fedab560Sae112802 ((uintptr_t)vcolor_pa << kpm_size_shift) :
244fedab560Sae112802 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
245fedab560Sae112802 }
246fedab560Sae112802
247fedab560Sae112802 return ((caddr_t)vaddr);
248fedab560Sae112802 }
249fedab560Sae112802
250fedab560Sae112802 /*
251fedab560Sae112802 * Return the page for the kpm virtual address vaddr.
252fedab560Sae112802 * Caller is responsible for the kpm mapping and lock
253fedab560Sae112802 * state of the page.
254fedab560Sae112802 */
255fedab560Sae112802 page_t *
hat_kpm_vaddr2page(caddr_t vaddr)256fedab560Sae112802 hat_kpm_vaddr2page(caddr_t vaddr)
257fedab560Sae112802 {
258fedab560Sae112802 uintptr_t paddr;
259fedab560Sae112802 pfn_t pfn;
260fedab560Sae112802
261fedab560Sae112802 ASSERT(IS_KPM_ADDR(vaddr));
262fedab560Sae112802
263fedab560Sae112802 SFMMU_KPM_VTOP(vaddr, paddr);
264fedab560Sae112802 pfn = (pfn_t)btop(paddr);
265fedab560Sae112802
266fedab560Sae112802 return (page_numtopp_nolock(pfn));
267fedab560Sae112802 }
268fedab560Sae112802
269fedab560Sae112802 /* page to kpm_page */
270fedab560Sae112802 #define PP2KPMPG(pp, kp) { \
271fedab560Sae112802 struct memseg *mseg; \
272fedab560Sae112802 pgcnt_t inx; \
273fedab560Sae112802 pfn_t pfn; \
274fedab560Sae112802 \
275fedab560Sae112802 pfn = pp->p_pagenum; \
276fedab560Sae112802 mseg = page_numtomemseg_nolock(pfn); \
277fedab560Sae112802 ASSERT(mseg); \
278fedab560Sae112802 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase); \
279fedab560Sae112802 ASSERT(inx < mseg->kpm_nkpmpgs); \
280fedab560Sae112802 kp = &mseg->kpm_pages[inx]; \
281fedab560Sae112802 }
282fedab560Sae112802
283fedab560Sae112802 /* page to kpm_spage */
284fedab560Sae112802 #define PP2KPMSPG(pp, ksp) { \
285fedab560Sae112802 struct memseg *mseg; \
286fedab560Sae112802 pgcnt_t inx; \
287fedab560Sae112802 pfn_t pfn; \
288fedab560Sae112802 \
289fedab560Sae112802 pfn = pp->p_pagenum; \
290fedab560Sae112802 mseg = page_numtomemseg_nolock(pfn); \
291fedab560Sae112802 ASSERT(mseg); \
292fedab560Sae112802 inx = pfn - mseg->kpm_pbase; \
293fedab560Sae112802 ksp = &mseg->kpm_spages[inx]; \
294fedab560Sae112802 }
295fedab560Sae112802
296fedab560Sae112802 /*
297fedab560Sae112802 * hat_kpm_fault is called from segkpm_fault when a kpm tsbmiss occurred
298fedab560Sae112802 * which could not be resolved by the trap level tsbmiss handler for the
299fedab560Sae112802 * following reasons:
300fedab560Sae112802 * . The vaddr is in VAC alias range (always PAGESIZE mapping size).
301fedab560Sae112802 * . The kpm (s)page range of vaddr is in a VAC alias prevention state.
302fedab560Sae112802 * . tsbmiss handling at trap level is not desired (DEBUG kernel only,
303fedab560Sae112802 * kpm_tsbmtl == 0).
304fedab560Sae112802 */
305fedab560Sae112802 int
hat_kpm_fault(struct hat * hat,caddr_t vaddr)306fedab560Sae112802 hat_kpm_fault(struct hat *hat, caddr_t vaddr)
307fedab560Sae112802 {
308fedab560Sae112802 int error;
309fedab560Sae112802 uintptr_t paddr;
310fedab560Sae112802 pfn_t pfn;
311fedab560Sae112802 struct memseg *mseg;
312fedab560Sae112802 page_t *pp;
313fedab560Sae112802
314fedab560Sae112802 if (kpm_enable == 0) {
315fedab560Sae112802 cmn_err(CE_WARN, "hat_kpm_fault: kpm_enable not set");
316fedab560Sae112802 return (ENOTSUP);
317fedab560Sae112802 }
318fedab560Sae112802
319fedab560Sae112802 ASSERT(hat == ksfmmup);
320fedab560Sae112802 ASSERT(IS_KPM_ADDR(vaddr));
321fedab560Sae112802
322fedab560Sae112802 SFMMU_KPM_VTOP(vaddr, paddr);
323fedab560Sae112802 pfn = (pfn_t)btop(paddr);
324*d20abfaaSPavel Tatashin if ((mseg = page_numtomemseg_nolock(pfn)) != NULL) {
325fedab560Sae112802 pp = &mseg->pages[(pgcnt_t)(pfn - mseg->pages_base)];
326fedab560Sae112802 ASSERT((pfn_t)pp->p_pagenum == pfn);
327*d20abfaaSPavel Tatashin }
328fedab560Sae112802
329*d20abfaaSPavel Tatashin /*
330*d20abfaaSPavel Tatashin * hat_kpm_mapin_pfn may add a kpm translation for memory that falls
331*d20abfaaSPavel Tatashin * outside of memsegs. Check for this case and provide the translation
332*d20abfaaSPavel Tatashin * here.
333*d20abfaaSPavel Tatashin */
334*d20abfaaSPavel Tatashin if (vac_colors == 1 && mseg == NULL) {
335*d20abfaaSPavel Tatashin tte_t tte;
336*d20abfaaSPavel Tatashin uint_t szc = kpm_smallpages ? TTE8K : TTE4M;
337*d20abfaaSPavel Tatashin uint_t shift = kpm_smallpages ? MMU_PAGESHIFT : MMU_PAGESHIFT4M;
338fedab560Sae112802
339*d20abfaaSPavel Tatashin ASSERT(address_in_memlist(phys_install, paddr, 1));
340*d20abfaaSPavel Tatashin KPM_TTE_VCACHED(tte.ll, pfn, szc);
341*d20abfaaSPavel Tatashin sfmmu_kpm_load_tsb(vaddr, &tte, shift);
342*d20abfaaSPavel Tatashin error = 0;
343*d20abfaaSPavel Tatashin } else if (mseg == NULL || !PAGE_LOCKED(pp))
344*d20abfaaSPavel Tatashin error = EFAULT;
345*d20abfaaSPavel Tatashin else if (kpm_smallpages == 0)
346fedab560Sae112802 error = sfmmu_kpm_fault(vaddr, mseg, pp);
347fedab560Sae112802 else
348fedab560Sae112802 error = sfmmu_kpm_fault_small(vaddr, mseg, pp);
349fedab560Sae112802
350fedab560Sae112802 return (error);
351fedab560Sae112802 }
352fedab560Sae112802
353fedab560Sae112802 /*
354fedab560Sae112802 * memseg_hash[] was cleared, need to clear memseg_phash[] too.
355fedab560Sae112802 */
356fedab560Sae112802 void
hat_kpm_mseghash_clear(int nentries)357fedab560Sae112802 hat_kpm_mseghash_clear(int nentries)
358fedab560Sae112802 {
359fedab560Sae112802 pgcnt_t i;
360fedab560Sae112802
361fedab560Sae112802 if (kpm_enable == 0)
362fedab560Sae112802 return;
363fedab560Sae112802
364fedab560Sae112802 for (i = 0; i < nentries; i++)
365fedab560Sae112802 memseg_phash[i] = MSEG_NULLPTR_PA;
366fedab560Sae112802 }
367fedab560Sae112802
368fedab560Sae112802 /*
369fedab560Sae112802 * Update memseg_phash[inx] when memseg_hash[inx] was changed.
370fedab560Sae112802 */
371fedab560Sae112802 void
hat_kpm_mseghash_update(pgcnt_t inx,struct memseg * msp)372fedab560Sae112802 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
373fedab560Sae112802 {
374fedab560Sae112802 if (kpm_enable == 0)
375fedab560Sae112802 return;
376fedab560Sae112802
377fedab560Sae112802 memseg_phash[inx] = (msp) ? va_to_pa(msp) : MSEG_NULLPTR_PA;
378fedab560Sae112802 }
379fedab560Sae112802
380fedab560Sae112802 /*
381fedab560Sae112802 * Update kpm memseg members from basic memseg info.
382fedab560Sae112802 */
383fedab560Sae112802 void
hat_kpm_addmem_mseg_update(struct memseg * msp,pgcnt_t nkpmpgs,offset_t kpm_pages_off)384fedab560Sae112802 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
385fedab560Sae112802 offset_t kpm_pages_off)
386fedab560Sae112802 {
387fedab560Sae112802 if (kpm_enable == 0)
388fedab560Sae112802 return;
389fedab560Sae112802
390fedab560Sae112802 msp->kpm_pages = (kpm_page_t *)((caddr_t)msp->pages + kpm_pages_off);
391fedab560Sae112802 msp->kpm_nkpmpgs = nkpmpgs;
392fedab560Sae112802 msp->kpm_pbase = kpmptop(ptokpmp(msp->pages_base));
393fedab560Sae112802 msp->pagespa = va_to_pa(msp->pages);
394fedab560Sae112802 msp->epagespa = va_to_pa(msp->epages);
395fedab560Sae112802 msp->kpm_pagespa = va_to_pa(msp->kpm_pages);
396fedab560Sae112802 }
397fedab560Sae112802
398fedab560Sae112802 /*
399fedab560Sae112802 * Setup nextpa when a memseg is inserted.
400fedab560Sae112802 * Assumes that the memsegslock is already held.
401fedab560Sae112802 */
402fedab560Sae112802 void
hat_kpm_addmem_mseg_insert(struct memseg * msp)403fedab560Sae112802 hat_kpm_addmem_mseg_insert(struct memseg *msp)
404fedab560Sae112802 {
405fedab560Sae112802 if (kpm_enable == 0)
406fedab560Sae112802 return;
407fedab560Sae112802
408ae115bc7Smrj ASSERT(memsegs_lock_held());
409fedab560Sae112802 msp->nextpa = (memsegs) ? va_to_pa(memsegs) : MSEG_NULLPTR_PA;
410fedab560Sae112802 }
411fedab560Sae112802
412fedab560Sae112802 /*
413fedab560Sae112802 * Setup memsegspa when a memseg is (head) inserted.
414fedab560Sae112802 * Called before memsegs is updated to complete a
415fedab560Sae112802 * memseg insert operation.
416fedab560Sae112802 * Assumes that the memsegslock is already held.
417fedab560Sae112802 */
418fedab560Sae112802 void
hat_kpm_addmem_memsegs_update(struct memseg * msp)419fedab560Sae112802 hat_kpm_addmem_memsegs_update(struct memseg *msp)
420fedab560Sae112802 {
421fedab560Sae112802 if (kpm_enable == 0)
422fedab560Sae112802 return;
423fedab560Sae112802
424ae115bc7Smrj ASSERT(memsegs_lock_held());
425fedab560Sae112802 ASSERT(memsegs);
426fedab560Sae112802 memsegspa = va_to_pa(msp);
427fedab560Sae112802 }
428fedab560Sae112802
429fedab560Sae112802 /*
430fedab560Sae112802 * Return end of metadata for an already setup memseg.
431fedab560Sae112802 *
432fedab560Sae112802 * Note: kpm_pages and kpm_spages are aliases and the underlying
433fedab560Sae112802 * member of struct memseg is a union, therefore they always have
434fedab560Sae112802 * the same address within a memseg. They must be differentiated
435fedab560Sae112802 * when pointer arithmetic is used with them.
436fedab560Sae112802 */
437fedab560Sae112802 caddr_t
hat_kpm_mseg_reuse(struct memseg * msp)438fedab560Sae112802 hat_kpm_mseg_reuse(struct memseg *msp)
439fedab560Sae112802 {
440fedab560Sae112802 caddr_t end;
441fedab560Sae112802
442fedab560Sae112802 if (kpm_smallpages == 0)
443fedab560Sae112802 end = (caddr_t)(msp->kpm_pages + msp->kpm_nkpmpgs);
444fedab560Sae112802 else
445fedab560Sae112802 end = (caddr_t)(msp->kpm_spages + msp->kpm_nkpmpgs);
446fedab560Sae112802
447fedab560Sae112802 return (end);
448fedab560Sae112802 }
449fedab560Sae112802
450fedab560Sae112802 /*
451fedab560Sae112802 * Update memsegspa (when first memseg in list
452fedab560Sae112802 * is deleted) or nextpa when a memseg deleted.
453fedab560Sae112802 * Assumes that the memsegslock is already held.
454fedab560Sae112802 */
455fedab560Sae112802 void
hat_kpm_delmem_mseg_update(struct memseg * msp,struct memseg ** mspp)456fedab560Sae112802 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
457fedab560Sae112802 {
458fedab560Sae112802 struct memseg *lmsp;
459fedab560Sae112802
460fedab560Sae112802 if (kpm_enable == 0)
461fedab560Sae112802 return;
462fedab560Sae112802
463ae115bc7Smrj ASSERT(memsegs_lock_held());
464fedab560Sae112802
465fedab560Sae112802 if (mspp == &memsegs) {
466fedab560Sae112802 memsegspa = (msp->next) ?
467fedab560Sae112802 va_to_pa(msp->next) : MSEG_NULLPTR_PA;
468fedab560Sae112802 } else {
469fedab560Sae112802 lmsp = (struct memseg *)
470fedab560Sae112802 ((uint64_t)mspp - offsetof(struct memseg, next));
471fedab560Sae112802 lmsp->nextpa = (msp->next) ?
472fedab560Sae112802 va_to_pa(msp->next) : MSEG_NULLPTR_PA;
473fedab560Sae112802 }
474fedab560Sae112802 }
475fedab560Sae112802
476fedab560Sae112802 /*
477fedab560Sae112802 * Update kpm members for all memseg's involved in a split operation
478fedab560Sae112802 * and do the atomic update of the physical memseg chain.
479fedab560Sae112802 *
480fedab560Sae112802 * Note: kpm_pages and kpm_spages are aliases and the underlying member
481fedab560Sae112802 * of struct memseg is a union, therefore they always have the same
482fedab560Sae112802 * address within a memseg. With that the direct assignments and
483fedab560Sae112802 * va_to_pa conversions below don't have to be distinguished wrt. to
484fedab560Sae112802 * kpm_smallpages. They must be differentiated when pointer arithmetic
485fedab560Sae112802 * is used with them.
486fedab560Sae112802 *
487fedab560Sae112802 * Assumes that the memsegslock is already held.
488fedab560Sae112802 */
489fedab560Sae112802 void
hat_kpm_split_mseg_update(struct memseg * msp,struct memseg ** mspp,struct memseg * lo,struct memseg * mid,struct memseg * hi)490fedab560Sae112802 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
491fedab560Sae112802 struct memseg *lo, struct memseg *mid, struct memseg *hi)
492fedab560Sae112802 {
493fedab560Sae112802 pgcnt_t start, end, kbase, kstart, num;
494fedab560Sae112802 struct memseg *lmsp;
495fedab560Sae112802
496fedab560Sae112802 if (kpm_enable == 0)
497fedab560Sae112802 return;
498fedab560Sae112802
499ae115bc7Smrj ASSERT(memsegs_lock_held());
500fedab560Sae112802 ASSERT(msp && mid && msp->kpm_pages);
501fedab560Sae112802
502fedab560Sae112802 kbase = ptokpmp(msp->kpm_pbase);
503fedab560Sae112802
504fedab560Sae112802 if (lo) {
505fedab560Sae112802 num = lo->pages_end - lo->pages_base;
506fedab560Sae112802 start = kpmptop(ptokpmp(lo->pages_base));
507fedab560Sae112802 /* align end to kpm page size granularity */
508fedab560Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
509fedab560Sae112802 lo->kpm_pbase = start;
510fedab560Sae112802 lo->kpm_nkpmpgs = ptokpmp(end - start);
511fedab560Sae112802 lo->kpm_pages = msp->kpm_pages;
512fedab560Sae112802 lo->kpm_pagespa = va_to_pa(lo->kpm_pages);
513fedab560Sae112802 lo->pagespa = va_to_pa(lo->pages);
514fedab560Sae112802 lo->epagespa = va_to_pa(lo->epages);
515fedab560Sae112802 lo->nextpa = va_to_pa(lo->next);
516fedab560Sae112802 }
517fedab560Sae112802
518fedab560Sae112802 /* mid */
519fedab560Sae112802 num = mid->pages_end - mid->pages_base;
520fedab560Sae112802 kstart = ptokpmp(mid->pages_base);
521fedab560Sae112802 start = kpmptop(kstart);
522fedab560Sae112802 /* align end to kpm page size granularity */
523fedab560Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
524fedab560Sae112802 mid->kpm_pbase = start;
525fedab560Sae112802 mid->kpm_nkpmpgs = ptokpmp(end - start);
526fedab560Sae112802 if (kpm_smallpages == 0) {
527fedab560Sae112802 mid->kpm_pages = msp->kpm_pages + (kstart - kbase);
528fedab560Sae112802 } else {
529fedab560Sae112802 mid->kpm_spages = msp->kpm_spages + (kstart - kbase);
530fedab560Sae112802 }
531fedab560Sae112802 mid->kpm_pagespa = va_to_pa(mid->kpm_pages);
532fedab560Sae112802 mid->pagespa = va_to_pa(mid->pages);
533fedab560Sae112802 mid->epagespa = va_to_pa(mid->epages);
534fedab560Sae112802 mid->nextpa = (mid->next) ? va_to_pa(mid->next) : MSEG_NULLPTR_PA;
535fedab560Sae112802
536fedab560Sae112802 if (hi) {
537fedab560Sae112802 num = hi->pages_end - hi->pages_base;
538fedab560Sae112802 kstart = ptokpmp(hi->pages_base);
539fedab560Sae112802 start = kpmptop(kstart);
540fedab560Sae112802 /* align end to kpm page size granularity */
541fedab560Sae112802 end = kpmptop(ptokpmp(start + num - 1)) + kpmpnpgs;
542fedab560Sae112802 hi->kpm_pbase = start;
543fedab560Sae112802 hi->kpm_nkpmpgs = ptokpmp(end - start);
544fedab560Sae112802 if (kpm_smallpages == 0) {
545fedab560Sae112802 hi->kpm_pages = msp->kpm_pages + (kstart - kbase);
546fedab560Sae112802 } else {
547fedab560Sae112802 hi->kpm_spages = msp->kpm_spages + (kstart - kbase);
548fedab560Sae112802 }
549fedab560Sae112802 hi->kpm_pagespa = va_to_pa(hi->kpm_pages);
550fedab560Sae112802 hi->pagespa = va_to_pa(hi->pages);
551fedab560Sae112802 hi->epagespa = va_to_pa(hi->epages);
552fedab560Sae112802 hi->nextpa = (hi->next) ? va_to_pa(hi->next) : MSEG_NULLPTR_PA;
553fedab560Sae112802 }
554fedab560Sae112802
555fedab560Sae112802 /*
556fedab560Sae112802 * Atomic update of the physical memseg chain
557fedab560Sae112802 */
558fedab560Sae112802 if (mspp == &memsegs) {
559fedab560Sae112802 memsegspa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
560fedab560Sae112802 } else {
561fedab560Sae112802 lmsp = (struct memseg *)
562fedab560Sae112802 ((uint64_t)mspp - offsetof(struct memseg, next));
563fedab560Sae112802 lmsp->nextpa = (lo) ? va_to_pa(lo) : va_to_pa(mid);
564fedab560Sae112802 }
565fedab560Sae112802 }
566fedab560Sae112802
567fedab560Sae112802 /*
568fedab560Sae112802 * Walk the memsegs chain, applying func to each memseg span and vcolor.
569fedab560Sae112802 */
570fedab560Sae112802 void
hat_kpm_walk(void (* func)(void *,void *,size_t),void * arg)571fedab560Sae112802 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
572fedab560Sae112802 {
573fedab560Sae112802 pfn_t pbase, pend;
574fedab560Sae112802 int vcolor;
575fedab560Sae112802 void *base;
576fedab560Sae112802 size_t size;
577fedab560Sae112802 struct memseg *msp;
578fedab560Sae112802
579fedab560Sae112802 for (msp = memsegs; msp; msp = msp->next) {
580fedab560Sae112802 pbase = msp->pages_base;
581fedab560Sae112802 pend = msp->pages_end;
582fedab560Sae112802 for (vcolor = 0; vcolor < vac_colors; vcolor++) {
583fedab560Sae112802 base = ptob(pbase) + kpm_vbase + kpm_size * vcolor;
584fedab560Sae112802 size = ptob(pend - pbase);
585fedab560Sae112802 func(arg, base, size);
586fedab560Sae112802 }
587fedab560Sae112802 }
588fedab560Sae112802 }
589fedab560Sae112802
590fedab560Sae112802
591fedab560Sae112802 /* -- sfmmu_kpm internal section -- */
592fedab560Sae112802
593fedab560Sae112802 /*
594fedab560Sae112802 * Return the page frame number if a valid segkpm mapping exists
595fedab560Sae112802 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
596fedab560Sae112802 * Should only be used by other sfmmu routines.
597fedab560Sae112802 */
598fedab560Sae112802 pfn_t
sfmmu_kpm_vatopfn(caddr_t vaddr)599fedab560Sae112802 sfmmu_kpm_vatopfn(caddr_t vaddr)
600fedab560Sae112802 {
601fedab560Sae112802 uintptr_t paddr;
602fedab560Sae112802 pfn_t pfn;
603fedab560Sae112802 page_t *pp;
604fedab560Sae112802
605fedab560Sae112802 ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));
606fedab560Sae112802
607fedab560Sae112802 SFMMU_KPM_VTOP(vaddr, paddr);
608fedab560Sae112802 pfn = (pfn_t)btop(paddr);
609fedab560Sae112802 pp = page_numtopp_nolock(pfn);
610fedab560Sae112802 if (pp && pp->p_kpmref)
611fedab560Sae112802 return (pfn);
612fedab560Sae112802 else
613fedab560Sae112802 return ((pfn_t)PFN_INVALID);
614fedab560Sae112802 }
615fedab560Sae112802
616fedab560Sae112802 /*
617fedab560Sae112802 * Lookup a kpme in the p_kpmelist.
618fedab560Sae112802 */
619fedab560Sae112802 static int
sfmmu_kpme_lookup(struct kpme * kpme,page_t * pp)620fedab560Sae112802 sfmmu_kpme_lookup(struct kpme *kpme, page_t *pp)
621fedab560Sae112802 {
622fedab560Sae112802 struct kpme *p;
623fedab560Sae112802
624fedab560Sae112802 for (p = pp->p_kpmelist; p; p = p->kpe_next) {
625fedab560Sae112802 if (p == kpme)
626fedab560Sae112802 return (1);
627fedab560Sae112802 }
628fedab560Sae112802 return (0);
629fedab560Sae112802 }
630fedab560Sae112802
631fedab560Sae112802 /*
632fedab560Sae112802 * Insert a kpme into the p_kpmelist and increment
633fedab560Sae112802 * the per page kpm reference count.
634fedab560Sae112802 */
635fedab560Sae112802 static void
sfmmu_kpme_add(struct kpme * kpme,page_t * pp)636fedab560Sae112802 sfmmu_kpme_add(struct kpme *kpme, page_t *pp)
637fedab560Sae112802 {
638fedab560Sae112802 ASSERT(pp->p_kpmref >= 0);
639fedab560Sae112802
640fedab560Sae112802 /* head insert */
641fedab560Sae112802 kpme->kpe_prev = NULL;
642fedab560Sae112802 kpme->kpe_next = pp->p_kpmelist;
643fedab560Sae112802
644fedab560Sae112802 if (pp->p_kpmelist)
645fedab560Sae112802 pp->p_kpmelist->kpe_prev = kpme;
646fedab560Sae112802
647fedab560Sae112802 pp->p_kpmelist = kpme;
648fedab560Sae112802 kpme->kpe_page = pp;
649fedab560Sae112802 pp->p_kpmref++;
650fedab560Sae112802 }
651fedab560Sae112802
652fedab560Sae112802 /*
653fedab560Sae112802 * Remove a kpme from the p_kpmelist and decrement
654fedab560Sae112802 * the per page kpm reference count.
655fedab560Sae112802 */
656fedab560Sae112802 static void
sfmmu_kpme_sub(struct kpme * kpme,page_t * pp)657fedab560Sae112802 sfmmu_kpme_sub(struct kpme *kpme, page_t *pp)
658fedab560Sae112802 {
659fedab560Sae112802 ASSERT(pp->p_kpmref > 0);
660fedab560Sae112802
661fedab560Sae112802 if (kpme->kpe_prev) {
662fedab560Sae112802 ASSERT(pp->p_kpmelist != kpme);
663fedab560Sae112802 ASSERT(kpme->kpe_prev->kpe_page == pp);
664fedab560Sae112802 kpme->kpe_prev->kpe_next = kpme->kpe_next;
665fedab560Sae112802 } else {
666fedab560Sae112802 ASSERT(pp->p_kpmelist == kpme);
667fedab560Sae112802 pp->p_kpmelist = kpme->kpe_next;
668fedab560Sae112802 }
669fedab560Sae112802
670fedab560Sae112802 if (kpme->kpe_next) {
671fedab560Sae112802 ASSERT(kpme->kpe_next->kpe_page == pp);
672fedab560Sae112802 kpme->kpe_next->kpe_prev = kpme->kpe_prev;
673fedab560Sae112802 }
674fedab560Sae112802
675fedab560Sae112802 kpme->kpe_next = kpme->kpe_prev = NULL;
676fedab560Sae112802 kpme->kpe_page = NULL;
677fedab560Sae112802 pp->p_kpmref--;
678fedab560Sae112802 }
679fedab560Sae112802
680fedab560Sae112802 /*
681fedab560Sae112802 * Mapin a single page, it is called every time a page changes it's state
682fedab560Sae112802 * from kpm-unmapped to kpm-mapped. It may not be called, when only a new
683fedab560Sae112802 * kpm instance does a mapin and wants to share the mapping.
684fedab560Sae112802 * Assumes that the mlist mutex is already grabbed.
685fedab560Sae112802 */
686fedab560Sae112802 static caddr_t
sfmmu_kpm_mapin(page_t * pp)687fedab560Sae112802 sfmmu_kpm_mapin(page_t *pp)
688fedab560Sae112802 {
689fedab560Sae112802 kpm_page_t *kp;
690fedab560Sae112802 kpm_hlk_t *kpmp;
691fedab560Sae112802 caddr_t vaddr;
692fedab560Sae112802 int kpm_vac_range;
693fedab560Sae112802 pfn_t pfn;
694fedab560Sae112802 tte_t tte;
695fedab560Sae112802 kmutex_t *pmtx;
696fedab560Sae112802 int uncached;
697fedab560Sae112802 kpm_spage_t *ksp;
698fedab560Sae112802 kpm_shlk_t *kpmsp;
699fedab560Sae112802 int oldval;
700fedab560Sae112802
701fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
702fedab560Sae112802 ASSERT(pp->p_kpmref == 0);
703fedab560Sae112802
704fedab560Sae112802 vaddr = sfmmu_kpm_getvaddr(pp, &kpm_vac_range);
705fedab560Sae112802
706fedab560Sae112802 ASSERT(IS_KPM_ADDR(vaddr));
707fedab560Sae112802 uncached = PP_ISNC(pp);
708fedab560Sae112802 pfn = pp->p_pagenum;
709fedab560Sae112802
710fedab560Sae112802 if (kpm_smallpages)
711fedab560Sae112802 goto smallpages_mapin;
712fedab560Sae112802
713fedab560Sae112802 PP2KPMPG(pp, kp);
714fedab560Sae112802
715fedab560Sae112802 kpmp = KPMP_HASH(kp);
716fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
717fedab560Sae112802
718fedab560Sae112802 ASSERT(PP_ISKPMC(pp) == 0);
719fedab560Sae112802 ASSERT(PP_ISKPMS(pp) == 0);
720fedab560Sae112802
721fedab560Sae112802 if (uncached) {
722fedab560Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
723fedab560Sae112802 if (kpm_vac_range == 0) {
724fedab560Sae112802 if (kp->kp_refcnts == 0) {
725fedab560Sae112802 /*
726fedab560Sae112802 * Must remove large page mapping if it exists.
727fedab560Sae112802 * Pages in uncached state can only be mapped
728fedab560Sae112802 * small (PAGESIZE) within the regular kpm
729fedab560Sae112802 * range.
730fedab560Sae112802 */
731fedab560Sae112802 if (kp->kp_refcntc == -1) {
732fedab560Sae112802 /* remove go indication */
733fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
734fedab560Sae112802 &kpmp->khl_lock, KPMTSBM_STOP);
735fedab560Sae112802 }
736fedab560Sae112802 if (kp->kp_refcnt > 0 && kp->kp_refcntc == 0)
737fedab560Sae112802 sfmmu_kpm_demap_large(vaddr);
738fedab560Sae112802 }
739fedab560Sae112802 ASSERT(kp->kp_refcntc >= 0);
740fedab560Sae112802 kp->kp_refcntc++;
741fedab560Sae112802 }
742fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
743fedab560Sae112802 PP_SETKPMC(pp);
744fedab560Sae112802 sfmmu_page_exit(pmtx);
745fedab560Sae112802 }
746fedab560Sae112802
747fedab560Sae112802 if ((kp->kp_refcntc > 0 || kp->kp_refcnts > 0) && kpm_vac_range == 0) {
748fedab560Sae112802 /*
749fedab560Sae112802 * Have to do a small (PAGESIZE) mapin within this kpm_page
750fedab560Sae112802 * range since it is marked to be in VAC conflict mode or
751fedab560Sae112802 * when there are still other small mappings around.
752fedab560Sae112802 */
753fedab560Sae112802
754fedab560Sae112802 /* tte assembly */
755fedab560Sae112802 if (uncached == 0)
756fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
757fedab560Sae112802 else
758fedab560Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
759fedab560Sae112802
760fedab560Sae112802 /* tsb dropin */
761fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
762fedab560Sae112802
763fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
764fedab560Sae112802 PP_SETKPMS(pp);
765fedab560Sae112802 sfmmu_page_exit(pmtx);
766fedab560Sae112802
767fedab560Sae112802 kp->kp_refcnts++;
768fedab560Sae112802 ASSERT(kp->kp_refcnts > 0);
769fedab560Sae112802 goto exit;
770fedab560Sae112802 }
771fedab560Sae112802
772fedab560Sae112802 if (kpm_vac_range == 0) {
773fedab560Sae112802 /*
774fedab560Sae112802 * Fast path / regular case, no VAC conflict handling
775fedab560Sae112802 * in progress within this kpm_page range.
776fedab560Sae112802 */
777fedab560Sae112802 if (kp->kp_refcnt == 0) {
778fedab560Sae112802
779fedab560Sae112802 /* tte assembly */
780fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
781fedab560Sae112802
782fedab560Sae112802 /* tsb dropin */
783fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
784fedab560Sae112802
785fedab560Sae112802 /* Set go flag for TL tsbmiss handler */
786fedab560Sae112802 if (kp->kp_refcntc == 0)
787fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
788fedab560Sae112802 &kpmp->khl_lock, KPMTSBM_START);
789fedab560Sae112802
790fedab560Sae112802 ASSERT(kp->kp_refcntc == -1);
791fedab560Sae112802 }
792fedab560Sae112802 kp->kp_refcnt++;
793fedab560Sae112802 ASSERT(kp->kp_refcnt);
794fedab560Sae112802
795fedab560Sae112802 } else {
796fedab560Sae112802 /*
797fedab560Sae112802 * The page is not setup according to the common VAC
798fedab560Sae112802 * prevention rules for the regular and kpm mapping layer
799fedab560Sae112802 * E.g. the page layer was not able to deliver a right
800fedab560Sae112802 * vcolor'ed page for a given vaddr corresponding to
801fedab560Sae112802 * the wanted p_offset. It has to be mapped in small in
802fedab560Sae112802 * within the corresponding kpm vac range in order to
803fedab560Sae112802 * prevent VAC alias conflicts.
804fedab560Sae112802 */
805fedab560Sae112802
806fedab560Sae112802 /* tte assembly */
807fedab560Sae112802 if (uncached == 0) {
808fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
809fedab560Sae112802 } else {
810fedab560Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
811fedab560Sae112802 }
812fedab560Sae112802
813fedab560Sae112802 /* tsb dropin */
814fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
815fedab560Sae112802
816fedab560Sae112802 kp->kp_refcnta++;
817fedab560Sae112802 if (kp->kp_refcntc == -1) {
818fedab560Sae112802 ASSERT(kp->kp_refcnt > 0);
819fedab560Sae112802
820fedab560Sae112802 /* remove go indication */
821fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
822fedab560Sae112802 KPMTSBM_STOP);
823fedab560Sae112802 }
824fedab560Sae112802 ASSERT(kp->kp_refcntc >= 0);
825fedab560Sae112802 }
826fedab560Sae112802 exit:
827fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
828fedab560Sae112802 return (vaddr);
829fedab560Sae112802
830fedab560Sae112802 smallpages_mapin:
831fedab560Sae112802 if (uncached == 0) {
832fedab560Sae112802 /* tte assembly */
833fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
834fedab560Sae112802 } else {
835444ce08eSDonghai Qiao /*
836444ce08eSDonghai Qiao * Just in case this same page was mapped cacheable prior to
837444ce08eSDonghai Qiao * this and the old tte remains in tlb.
838444ce08eSDonghai Qiao */
839444ce08eSDonghai Qiao sfmmu_kpm_demap_small(vaddr);
840444ce08eSDonghai Qiao
841fedab560Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
842fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
843fedab560Sae112802 PP_SETKPMC(pp);
844fedab560Sae112802 sfmmu_page_exit(pmtx);
845fedab560Sae112802 /* tte assembly */
846fedab560Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
847fedab560Sae112802 }
848fedab560Sae112802
849fedab560Sae112802 /* tsb dropin */
850fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
851fedab560Sae112802
852fedab560Sae112802 PP2KPMSPG(pp, ksp);
853fedab560Sae112802 kpmsp = KPMP_SHASH(ksp);
854fedab560Sae112802
855444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
856444ce08eSDonghai Qiao (uncached) ? (KPM_MAPPED_GO | KPM_MAPPEDSC) :
857444ce08eSDonghai Qiao (KPM_MAPPED_GO | KPM_MAPPEDS));
858fedab560Sae112802
859fedab560Sae112802 if (oldval != 0)
860fedab560Sae112802 panic("sfmmu_kpm_mapin: stale smallpages mapping");
861fedab560Sae112802
862fedab560Sae112802 return (vaddr);
863fedab560Sae112802 }
864fedab560Sae112802
865fedab560Sae112802 /*
866fedab560Sae112802 * Mapout a single page, it is called every time a page changes it's state
867fedab560Sae112802 * from kpm-mapped to kpm-unmapped. It may not be called, when only a kpm
868fedab560Sae112802 * instance calls mapout and there are still other instances mapping the
869fedab560Sae112802 * page. Assumes that the mlist mutex is already grabbed.
870fedab560Sae112802 *
871fedab560Sae112802 * Note: In normal mode (no VAC conflict prevention pending) TLB's are
872fedab560Sae112802 * not flushed. This is the core segkpm behavior to avoid xcalls. It is
873fedab560Sae112802 * no problem because a translation from a segkpm virtual address to a
874fedab560Sae112802 * physical address is always the same. The only downside is a slighty
875fedab560Sae112802 * increased window of vulnerability for misbehaving _kernel_ modules.
876fedab560Sae112802 */
877fedab560Sae112802 static void
sfmmu_kpm_mapout(page_t * pp,caddr_t vaddr)878fedab560Sae112802 sfmmu_kpm_mapout(page_t *pp, caddr_t vaddr)
879fedab560Sae112802 {
880fedab560Sae112802 kpm_page_t *kp;
881fedab560Sae112802 kpm_hlk_t *kpmp;
882fedab560Sae112802 int alias_range;
883fedab560Sae112802 kmutex_t *pmtx;
884fedab560Sae112802 kpm_spage_t *ksp;
885fedab560Sae112802 kpm_shlk_t *kpmsp;
886fedab560Sae112802 int oldval;
887fedab560Sae112802
888fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
889fedab560Sae112802 ASSERT(pp->p_kpmref == 0);
890fedab560Sae112802
891fedab560Sae112802 alias_range = IS_KPM_ALIAS_RANGE(vaddr);
892fedab560Sae112802
893fedab560Sae112802 if (kpm_smallpages)
894fedab560Sae112802 goto smallpages_mapout;
895fedab560Sae112802
896fedab560Sae112802 PP2KPMPG(pp, kp);
897fedab560Sae112802 kpmp = KPMP_HASH(kp);
898fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
899fedab560Sae112802
900fedab560Sae112802 if (alias_range) {
901fedab560Sae112802 ASSERT(PP_ISKPMS(pp) == 0);
902fedab560Sae112802 if (kp->kp_refcnta <= 0) {
903fedab560Sae112802 panic("sfmmu_kpm_mapout: bad refcnta kp=%p",
904fedab560Sae112802 (void *)kp);
905fedab560Sae112802 }
906fedab560Sae112802
907fedab560Sae112802 if (PP_ISTNC(pp)) {
908fedab560Sae112802 if (PP_ISKPMC(pp) == 0) {
909fedab560Sae112802 /*
910fedab560Sae112802 * Uncached kpm mappings must always have
911fedab560Sae112802 * forced "small page" mode.
912fedab560Sae112802 */
913fedab560Sae112802 panic("sfmmu_kpm_mapout: uncached page not "
914fedab560Sae112802 "kpm marked");
915fedab560Sae112802 }
916fedab560Sae112802 sfmmu_kpm_demap_small(vaddr);
917fedab560Sae112802
918fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
919fedab560Sae112802 PP_CLRKPMC(pp);
920fedab560Sae112802 sfmmu_page_exit(pmtx);
921fedab560Sae112802
922fedab560Sae112802 /*
923fedab560Sae112802 * Check if we can resume cached mode. This might
924fedab560Sae112802 * be the case if the kpm mapping was the only
925fedab560Sae112802 * mapping in conflict with other non rule
926fedab560Sae112802 * compliant mappings. The page is no more marked
927fedab560Sae112802 * as kpm mapped, so the conv_tnc path will not
928fedab560Sae112802 * change kpm state.
929fedab560Sae112802 */
930fedab560Sae112802 conv_tnc(pp, TTE8K);
931fedab560Sae112802
932fedab560Sae112802 } else if (PP_ISKPMC(pp) == 0) {
933fedab560Sae112802 /* remove TSB entry only */
934fedab560Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
935fedab560Sae112802
936fedab560Sae112802 } else {
937fedab560Sae112802 /* already demapped */
938fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
939fedab560Sae112802 PP_CLRKPMC(pp);
940fedab560Sae112802 sfmmu_page_exit(pmtx);
941fedab560Sae112802 }
942fedab560Sae112802 kp->kp_refcnta--;
943fedab560Sae112802 goto exit;
944fedab560Sae112802 }
945fedab560Sae112802
946fedab560Sae112802 if (kp->kp_refcntc <= 0 && kp->kp_refcnts == 0) {
947fedab560Sae112802 /*
948fedab560Sae112802 * Fast path / regular case.
949fedab560Sae112802 */
950fedab560Sae112802 ASSERT(kp->kp_refcntc >= -1);
951fedab560Sae112802 ASSERT(!(pp->p_nrm & (P_KPMC | P_KPMS | P_TNC | P_PNC)));
952fedab560Sae112802
953fedab560Sae112802 if (kp->kp_refcnt <= 0)
954fedab560Sae112802 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
955fedab560Sae112802
956fedab560Sae112802 if (--kp->kp_refcnt == 0) {
957fedab560Sae112802 /* remove go indication */
958fedab560Sae112802 if (kp->kp_refcntc == -1) {
959fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
960fedab560Sae112802 &kpmp->khl_lock, KPMTSBM_STOP);
961fedab560Sae112802 }
962fedab560Sae112802 ASSERT(kp->kp_refcntc == 0);
963fedab560Sae112802
964fedab560Sae112802 /* remove TSB entry */
965fedab560Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
966fedab560Sae112802 #ifdef DEBUG
967fedab560Sae112802 if (kpm_tlb_flush)
968fedab560Sae112802 sfmmu_kpm_demap_tlbs(vaddr);
969fedab560Sae112802 #endif
970fedab560Sae112802 }
971fedab560Sae112802
972fedab560Sae112802 } else {
973fedab560Sae112802 /*
974fedab560Sae112802 * The VAC alias path.
975fedab560Sae112802 * We come here if the kpm vaddr is not in any alias_range
976fedab560Sae112802 * and we are unmapping a page within the regular kpm_page
977fedab560Sae112802 * range. The kpm_page either holds conflict pages and/or
978fedab560Sae112802 * is in "small page" mode. If the page is not marked
979fedab560Sae112802 * P_KPMS it couldn't have a valid PAGESIZE sized TSB
980fedab560Sae112802 * entry. Dcache flushing is done lazy and follows the
981fedab560Sae112802 * rules of the regular virtual page coloring scheme.
982fedab560Sae112802 *
983fedab560Sae112802 * Per page states and required actions:
984fedab560Sae112802 * P_KPMC: remove a kpm mapping that is conflicting.
985fedab560Sae112802 * P_KPMS: remove a small kpm mapping within a kpm_page.
986fedab560Sae112802 * P_TNC: check if we can re-cache the page.
987fedab560Sae112802 * P_PNC: we cannot re-cache, sorry.
988fedab560Sae112802 * Per kpm_page:
989fedab560Sae112802 * kp_refcntc > 0: page is part of a kpm_page with conflicts.
990fedab560Sae112802 * kp_refcnts > 0: rm a small mapped page within a kpm_page.
991fedab560Sae112802 */
992fedab560Sae112802
993fedab560Sae112802 if (PP_ISKPMS(pp)) {
994fedab560Sae112802 if (kp->kp_refcnts < 1) {
995fedab560Sae112802 panic("sfmmu_kpm_mapout: bad refcnts kp=%p",
996fedab560Sae112802 (void *)kp);
997fedab560Sae112802 }
998fedab560Sae112802 sfmmu_kpm_demap_small(vaddr);
999fedab560Sae112802
1000fedab560Sae112802 /*
1001fedab560Sae112802 * Check if we can resume cached mode. This might
1002fedab560Sae112802 * be the case if the kpm mapping was the only
1003fedab560Sae112802 * mapping in conflict with other non rule
1004fedab560Sae112802 * compliant mappings. The page is no more marked
1005fedab560Sae112802 * as kpm mapped, so the conv_tnc path will not
1006fedab560Sae112802 * change kpm state.
1007fedab560Sae112802 */
1008fedab560Sae112802 if (PP_ISTNC(pp)) {
1009fedab560Sae112802 if (!PP_ISKPMC(pp)) {
1010fedab560Sae112802 /*
1011fedab560Sae112802 * Uncached kpm mappings must always
1012fedab560Sae112802 * have forced "small page" mode.
1013fedab560Sae112802 */
1014fedab560Sae112802 panic("sfmmu_kpm_mapout: uncached "
1015fedab560Sae112802 "page not kpm marked");
1016fedab560Sae112802 }
1017fedab560Sae112802 conv_tnc(pp, TTE8K);
1018fedab560Sae112802 }
1019fedab560Sae112802 kp->kp_refcnts--;
1020fedab560Sae112802 kp->kp_refcnt++;
1021fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1022fedab560Sae112802 PP_CLRKPMS(pp);
1023fedab560Sae112802 sfmmu_page_exit(pmtx);
1024fedab560Sae112802 }
1025fedab560Sae112802
1026fedab560Sae112802 if (PP_ISKPMC(pp)) {
1027fedab560Sae112802 if (kp->kp_refcntc < 1) {
1028fedab560Sae112802 panic("sfmmu_kpm_mapout: bad refcntc kp=%p",
1029fedab560Sae112802 (void *)kp);
1030fedab560Sae112802 }
1031fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1032fedab560Sae112802 PP_CLRKPMC(pp);
1033fedab560Sae112802 sfmmu_page_exit(pmtx);
1034fedab560Sae112802 kp->kp_refcntc--;
1035fedab560Sae112802 }
1036fedab560Sae112802
1037fedab560Sae112802 if (kp->kp_refcnt-- < 1)
1038fedab560Sae112802 panic("sfmmu_kpm_mapout: bad refcnt kp=%p", (void *)kp);
1039fedab560Sae112802 }
1040fedab560Sae112802 exit:
1041fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1042fedab560Sae112802 return;
1043fedab560Sae112802
1044fedab560Sae112802 smallpages_mapout:
1045fedab560Sae112802 PP2KPMSPG(pp, ksp);
1046fedab560Sae112802 kpmsp = KPMP_SHASH(ksp);
1047fedab560Sae112802
1048fedab560Sae112802 if (PP_ISKPMC(pp) == 0) {
1049444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
1050fedab560Sae112802 &kpmsp->kshl_lock, 0);
1051fedab560Sae112802
1052fedab560Sae112802 if (oldval != KPM_MAPPEDS) {
1053fedab560Sae112802 /*
1054fedab560Sae112802 * When we're called after sfmmu_kpm_hme_unload,
1055fedab560Sae112802 * KPM_MAPPEDSC is valid too.
1056fedab560Sae112802 */
1057fedab560Sae112802 if (oldval != KPM_MAPPEDSC)
1058fedab560Sae112802 panic("sfmmu_kpm_mapout: incorrect mapping");
1059fedab560Sae112802 }
1060fedab560Sae112802
1061fedab560Sae112802 /* remove TSB entry */
1062fedab560Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
1063fedab560Sae112802 #ifdef DEBUG
1064fedab560Sae112802 if (kpm_tlb_flush)
1065fedab560Sae112802 sfmmu_kpm_demap_tlbs(vaddr);
1066fedab560Sae112802 #endif
1067fedab560Sae112802
1068fedab560Sae112802 } else if (PP_ISTNC(pp)) {
1069444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
1070fedab560Sae112802 &kpmsp->kshl_lock, 0);
1071fedab560Sae112802
1072fedab560Sae112802 if (oldval != KPM_MAPPEDSC || PP_ISKPMC(pp) == 0)
1073fedab560Sae112802 panic("sfmmu_kpm_mapout: inconsistent TNC mapping");
1074fedab560Sae112802
1075fedab560Sae112802 sfmmu_kpm_demap_small(vaddr);
1076fedab560Sae112802
1077fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1078fedab560Sae112802 PP_CLRKPMC(pp);
1079fedab560Sae112802 sfmmu_page_exit(pmtx);
1080fedab560Sae112802
1081fedab560Sae112802 /*
1082fedab560Sae112802 * Check if we can resume cached mode. This might be
1083fedab560Sae112802 * the case if the kpm mapping was the only mapping
1084fedab560Sae112802 * in conflict with other non rule compliant mappings.
1085fedab560Sae112802 * The page is no more marked as kpm mapped, so the
1086fedab560Sae112802 * conv_tnc path will not change the kpm state.
1087fedab560Sae112802 */
1088fedab560Sae112802 conv_tnc(pp, TTE8K);
1089fedab560Sae112802
1090fedab560Sae112802 } else {
1091444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
1092fedab560Sae112802 &kpmsp->kshl_lock, 0);
1093fedab560Sae112802
1094fedab560Sae112802 if (oldval != KPM_MAPPEDSC)
1095fedab560Sae112802 panic("sfmmu_kpm_mapout: inconsistent mapping");
1096fedab560Sae112802
1097fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1098fedab560Sae112802 PP_CLRKPMC(pp);
1099fedab560Sae112802 sfmmu_page_exit(pmtx);
1100fedab560Sae112802 }
1101fedab560Sae112802 }
1102fedab560Sae112802
1103fedab560Sae112802 #define abs(x) ((x) < 0 ? -(x) : (x))
1104fedab560Sae112802
1105fedab560Sae112802 /*
1106fedab560Sae112802 * Determine appropriate kpm mapping address and handle any kpm/hme
1107fedab560Sae112802 * conflicts. Page mapping list and its vcolor parts must be protected.
1108fedab560Sae112802 */
1109fedab560Sae112802 static caddr_t
sfmmu_kpm_getvaddr(page_t * pp,int * kpm_vac_rangep)1110fedab560Sae112802 sfmmu_kpm_getvaddr(page_t *pp, int *kpm_vac_rangep)
1111fedab560Sae112802 {
1112fedab560Sae112802 int vcolor, vcolor_pa;
1113fedab560Sae112802 caddr_t vaddr;
1114fedab560Sae112802 uintptr_t paddr;
1115fedab560Sae112802
1116fedab560Sae112802
1117fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
1118fedab560Sae112802
1119fedab560Sae112802 paddr = ptob(pp->p_pagenum);
1120fedab560Sae112802 vcolor_pa = addr_to_vcolor(paddr);
1121fedab560Sae112802
1122fedab560Sae112802 if (pp->p_vnode && IS_SWAPFSVP(pp->p_vnode)) {
1123fedab560Sae112802 vcolor = (PP_NEWPAGE(pp) || PP_ISNC(pp)) ?
1124fedab560Sae112802 vcolor_pa : PP_GET_VCOLOR(pp);
1125fedab560Sae112802 } else {
1126fedab560Sae112802 vcolor = addr_to_vcolor(pp->p_offset);
1127fedab560Sae112802 }
1128fedab560Sae112802
1129fedab560Sae112802 vaddr = kpm_vbase + paddr;
1130fedab560Sae112802 *kpm_vac_rangep = 0;
1131fedab560Sae112802
1132fedab560Sae112802 if (vcolor_pa != vcolor) {
1133fedab560Sae112802 *kpm_vac_rangep = abs(vcolor - vcolor_pa);
1134fedab560Sae112802 vaddr += ((uintptr_t)(vcolor - vcolor_pa) << MMU_PAGESHIFT);
1135fedab560Sae112802 vaddr += (vcolor_pa > vcolor) ?
1136fedab560Sae112802 ((uintptr_t)vcolor_pa << kpm_size_shift) :
1137fedab560Sae112802 ((uintptr_t)(vcolor - vcolor_pa) << kpm_size_shift);
1138fedab560Sae112802
1139fedab560Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp));
1140fedab560Sae112802 }
1141fedab560Sae112802
1142fedab560Sae112802 if (PP_ISNC(pp))
1143fedab560Sae112802 return (vaddr);
1144fedab560Sae112802
1145fedab560Sae112802 if (PP_NEWPAGE(pp)) {
1146fedab560Sae112802 PP_SET_VCOLOR(pp, vcolor);
1147fedab560Sae112802 return (vaddr);
1148fedab560Sae112802 }
1149fedab560Sae112802
1150fedab560Sae112802 if (PP_GET_VCOLOR(pp) == vcolor)
1151fedab560Sae112802 return (vaddr);
1152fedab560Sae112802
1153fedab560Sae112802 ASSERT(!PP_ISMAPPED_KPM(pp));
1154fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1155fedab560Sae112802
1156fedab560Sae112802 return (vaddr);
1157fedab560Sae112802 }
1158fedab560Sae112802
1159fedab560Sae112802 /*
1160fedab560Sae112802 * VAC conflict state bit values.
1161fedab560Sae112802 * The following defines are used to make the handling of the
1162fedab560Sae112802 * various input states more concise. For that the kpm states
1163fedab560Sae112802 * per kpm_page and per page are combined in a summary state.
1164fedab560Sae112802 * Each single state has a corresponding bit value in the
1165fedab560Sae112802 * summary state. These defines only apply for kpm large page
1166fedab560Sae112802 * mappings. Within comments the abbreviations "kc, c, ks, s"
1167fedab560Sae112802 * are used as short form of the actual state, e.g. "kc" for
1168fedab560Sae112802 * "kp_refcntc > 0", etc.
1169fedab560Sae112802 */
1170fedab560Sae112802 #define KPM_KC 0x00000008 /* kpm_page: kp_refcntc > 0 */
1171fedab560Sae112802 #define KPM_C 0x00000004 /* page: P_KPMC set */
1172fedab560Sae112802 #define KPM_KS 0x00000002 /* kpm_page: kp_refcnts > 0 */
1173fedab560Sae112802 #define KPM_S 0x00000001 /* page: P_KPMS set */
1174fedab560Sae112802
1175fedab560Sae112802 /*
1176fedab560Sae112802 * Summary states used in sfmmu_kpm_fault (KPM_TSBM_*).
1177fedab560Sae112802 * See also more detailed comments within in the sfmmu_kpm_fault switch.
1178fedab560Sae112802 * Abbreviations used:
1179fedab560Sae112802 * CONFL: VAC conflict(s) within a kpm_page.
1180fedab560Sae112802 * MAPS: Mapped small: Page mapped in using a regular page size kpm mapping.
1181fedab560Sae112802 * RASM: Re-assembling of a large page mapping possible.
1182fedab560Sae112802 * RPLS: Replace: TSB miss due to TSB replacement only.
1183fedab560Sae112802 * BRKO: Breakup Other: A large kpm mapping has to be broken because another
1184fedab560Sae112802 * page within the kpm_page is already involved in a VAC conflict.
1185fedab560Sae112802 * BRKT: Breakup This: A large kpm mapping has to be broken, this page is
1186fedab560Sae112802 * is involved in a VAC conflict.
1187fedab560Sae112802 */
1188fedab560Sae112802 #define KPM_TSBM_CONFL_GONE (0)
1189fedab560Sae112802 #define KPM_TSBM_MAPS_RASM (KPM_KS)
1190fedab560Sae112802 #define KPM_TSBM_RPLS_RASM (KPM_KS | KPM_S)
1191fedab560Sae112802 #define KPM_TSBM_MAPS_BRKO (KPM_KC)
1192fedab560Sae112802 #define KPM_TSBM_MAPS (KPM_KC | KPM_KS)
1193fedab560Sae112802 #define KPM_TSBM_RPLS (KPM_KC | KPM_KS | KPM_S)
1194fedab560Sae112802 #define KPM_TSBM_MAPS_BRKT (KPM_KC | KPM_C)
1195fedab560Sae112802 #define KPM_TSBM_MAPS_CONFL (KPM_KC | KPM_C | KPM_KS)
1196fedab560Sae112802 #define KPM_TSBM_RPLS_CONFL (KPM_KC | KPM_C | KPM_KS | KPM_S)
1197fedab560Sae112802
1198fedab560Sae112802 /*
1199fedab560Sae112802 * kpm fault handler for mappings with large page size.
1200fedab560Sae112802 */
1201fedab560Sae112802 int
sfmmu_kpm_fault(caddr_t vaddr,struct memseg * mseg,page_t * pp)1202fedab560Sae112802 sfmmu_kpm_fault(caddr_t vaddr, struct memseg *mseg, page_t *pp)
1203fedab560Sae112802 {
1204fedab560Sae112802 int error;
1205fedab560Sae112802 pgcnt_t inx;
1206fedab560Sae112802 kpm_page_t *kp;
1207fedab560Sae112802 tte_t tte;
1208fedab560Sae112802 pfn_t pfn = pp->p_pagenum;
1209fedab560Sae112802 kpm_hlk_t *kpmp;
1210fedab560Sae112802 kmutex_t *pml;
1211fedab560Sae112802 int alias_range;
1212fedab560Sae112802 int uncached = 0;
1213fedab560Sae112802 kmutex_t *pmtx;
1214fedab560Sae112802 int badstate;
1215fedab560Sae112802 uint_t tsbmcase;
1216fedab560Sae112802
1217fedab560Sae112802 alias_range = IS_KPM_ALIAS_RANGE(vaddr);
1218fedab560Sae112802
1219fedab560Sae112802 inx = ptokpmp(kpmptop(ptokpmp(pfn)) - mseg->kpm_pbase);
1220fedab560Sae112802 if (inx >= mseg->kpm_nkpmpgs) {
1221fedab560Sae112802 cmn_err(CE_PANIC, "sfmmu_kpm_fault: kpm overflow in memseg "
1222fedab560Sae112802 "0x%p pp 0x%p", (void *)mseg, (void *)pp);
1223fedab560Sae112802 }
1224fedab560Sae112802
1225fedab560Sae112802 kp = &mseg->kpm_pages[inx];
1226fedab560Sae112802 kpmp = KPMP_HASH(kp);
1227fedab560Sae112802
1228fedab560Sae112802 pml = sfmmu_mlist_enter(pp);
1229fedab560Sae112802
1230fedab560Sae112802 if (!PP_ISMAPPED_KPM(pp)) {
1231fedab560Sae112802 sfmmu_mlist_exit(pml);
1232fedab560Sae112802 return (EFAULT);
1233fedab560Sae112802 }
1234fedab560Sae112802
1235fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1236fedab560Sae112802
1237fedab560Sae112802 if (alias_range) {
1238fedab560Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp));
1239fedab560Sae112802 if (kp->kp_refcnta > 0) {
1240fedab560Sae112802 if (PP_ISKPMC(pp)) {
1241fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1242fedab560Sae112802 PP_CLRKPMC(pp);
1243fedab560Sae112802 sfmmu_page_exit(pmtx);
1244fedab560Sae112802 }
1245fedab560Sae112802 /*
1246fedab560Sae112802 * Check for vcolor conflicts. Return here
1247fedab560Sae112802 * w/ either no conflict (fast path), removed hme
1248fedab560Sae112802 * mapping chains (unload conflict) or uncached
1249fedab560Sae112802 * (uncache conflict). VACaches are cleaned and
1250fedab560Sae112802 * p_vcolor and PP_TNC are set accordingly for the
1251fedab560Sae112802 * conflict cases. Drop kpmp for uncache conflict
1252fedab560Sae112802 * cases since it will be grabbed within
1253fedab560Sae112802 * sfmmu_kpm_page_cache in case of an uncache
1254fedab560Sae112802 * conflict.
1255fedab560Sae112802 */
1256fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1257fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1258fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1259fedab560Sae112802
1260fedab560Sae112802 if (PP_ISNC(pp)) {
1261fedab560Sae112802 uncached = 1;
1262fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1263fedab560Sae112802 PP_SETKPMC(pp);
1264fedab560Sae112802 sfmmu_page_exit(pmtx);
1265fedab560Sae112802 }
1266fedab560Sae112802 goto smallexit;
1267fedab560Sae112802
1268fedab560Sae112802 } else {
1269fedab560Sae112802 /*
1270fedab560Sae112802 * We got a tsbmiss on a not active kpm_page range.
1271fedab560Sae112802 * Let segkpm_fault decide how to panic.
1272fedab560Sae112802 */
1273fedab560Sae112802 error = EFAULT;
1274fedab560Sae112802 }
1275fedab560Sae112802 goto exit;
1276fedab560Sae112802 }
1277fedab560Sae112802
1278fedab560Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
1279fedab560Sae112802 if (kp->kp_refcntc == -1) {
1280fedab560Sae112802 /*
1281fedab560Sae112802 * We should come here only if trap level tsb miss
1282fedab560Sae112802 * handler is disabled.
1283fedab560Sae112802 */
1284fedab560Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
1285fedab560Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
1286fedab560Sae112802
1287fedab560Sae112802 if (badstate == 0)
1288fedab560Sae112802 goto largeexit;
1289fedab560Sae112802 }
1290fedab560Sae112802
1291fedab560Sae112802 if (badstate || kp->kp_refcntc < 0)
1292fedab560Sae112802 goto badstate_exit;
1293fedab560Sae112802
1294fedab560Sae112802 /*
1295fedab560Sae112802 * Combine the per kpm_page and per page kpm VAC states to
1296fedab560Sae112802 * a summary state in order to make the kpm fault handling
1297fedab560Sae112802 * more concise.
1298fedab560Sae112802 */
1299fedab560Sae112802 tsbmcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
1300fedab560Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
1301fedab560Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) |
1302fedab560Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0));
1303fedab560Sae112802
1304fedab560Sae112802 switch (tsbmcase) {
1305fedab560Sae112802 case KPM_TSBM_CONFL_GONE: /* - - - - */
1306fedab560Sae112802 /*
1307fedab560Sae112802 * That's fine, we either have no more vac conflict in
1308fedab560Sae112802 * this kpm page or someone raced in and has solved the
1309fedab560Sae112802 * vac conflict for us -- call sfmmu_kpm_vac_conflict
1310fedab560Sae112802 * to take care for correcting the vcolor and flushing
1311fedab560Sae112802 * the dcache if required.
1312fedab560Sae112802 */
1313fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1314fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1315fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1316fedab560Sae112802
1317fedab560Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
1318fedab560Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
1319fedab560Sae112802 panic("sfmmu_kpm_fault: inconsistent CONFL_GONE "
1320fedab560Sae112802 "state, pp=%p", (void *)pp);
1321fedab560Sae112802 }
1322fedab560Sae112802 goto largeexit;
1323fedab560Sae112802
1324fedab560Sae112802 case KPM_TSBM_MAPS_RASM: /* - - ks - */
1325fedab560Sae112802 /*
1326fedab560Sae112802 * All conflicts in this kpm page are gone but there are
1327fedab560Sae112802 * already small mappings around, so we also map this
1328fedab560Sae112802 * page small. This could be the trigger case for a
1329fedab560Sae112802 * small mapping reaper, if this is really needed.
1330fedab560Sae112802 * For now fall thru to the KPM_TSBM_MAPS handling.
1331fedab560Sae112802 */
1332fedab560Sae112802
1333fedab560Sae112802 case KPM_TSBM_MAPS: /* kc - ks - */
1334fedab560Sae112802 /*
1335fedab560Sae112802 * Large page mapping is already broken, this page is not
1336fedab560Sae112802 * conflicting, so map it small. Call sfmmu_kpm_vac_conflict
1337fedab560Sae112802 * to take care for correcting the vcolor and flushing
1338fedab560Sae112802 * the dcache if required.
1339fedab560Sae112802 */
1340fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1341fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1342fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1343fedab560Sae112802
1344fedab560Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
1345fedab560Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
1346fedab560Sae112802 panic("sfmmu_kpm_fault: inconsistent MAPS state, "
1347fedab560Sae112802 "pp=%p", (void *)pp);
1348fedab560Sae112802 }
1349fedab560Sae112802 kp->kp_refcnt--;
1350fedab560Sae112802 kp->kp_refcnts++;
1351fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1352fedab560Sae112802 PP_SETKPMS(pp);
1353fedab560Sae112802 sfmmu_page_exit(pmtx);
1354fedab560Sae112802 goto smallexit;
1355fedab560Sae112802
1356fedab560Sae112802 case KPM_TSBM_RPLS_RASM: /* - - ks s */
1357fedab560Sae112802 /*
1358fedab560Sae112802 * All conflicts in this kpm page are gone but this page
1359fedab560Sae112802 * is mapped small. This could be the trigger case for a
1360fedab560Sae112802 * small mapping reaper, if this is really needed.
1361fedab560Sae112802 * For now we drop it in small again. Fall thru to the
1362fedab560Sae112802 * KPM_TSBM_RPLS handling.
1363fedab560Sae112802 */
1364fedab560Sae112802
1365fedab560Sae112802 case KPM_TSBM_RPLS: /* kc - ks s */
1366fedab560Sae112802 /*
1367fedab560Sae112802 * Large page mapping is already broken, this page is not
1368fedab560Sae112802 * conflicting but already mapped small, so drop it in
1369fedab560Sae112802 * small again.
1370fedab560Sae112802 */
1371fedab560Sae112802 if (PP_ISNC(pp) ||
1372fedab560Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
1373fedab560Sae112802 panic("sfmmu_kpm_fault: inconsistent RPLS state, "
1374fedab560Sae112802 "pp=%p", (void *)pp);
1375fedab560Sae112802 }
1376fedab560Sae112802 goto smallexit;
1377fedab560Sae112802
1378fedab560Sae112802 case KPM_TSBM_MAPS_BRKO: /* kc - - - */
1379fedab560Sae112802 /*
1380fedab560Sae112802 * The kpm page where we live in is marked conflicting
1381fedab560Sae112802 * but this page is not conflicting. So we have to map it
1382fedab560Sae112802 * in small. Call sfmmu_kpm_vac_conflict to take care for
1383fedab560Sae112802 * correcting the vcolor and flushing the dcache if required.
1384fedab560Sae112802 */
1385fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1386fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1387fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1388fedab560Sae112802
1389fedab560Sae112802 if (PP_ISNC(pp) || kp->kp_refcnt <= 0 ||
1390fedab560Sae112802 addr_to_vcolor(vaddr) != PP_GET_VCOLOR(pp)) {
1391fedab560Sae112802 panic("sfmmu_kpm_fault: inconsistent MAPS_BRKO state, "
1392fedab560Sae112802 "pp=%p", (void *)pp);
1393fedab560Sae112802 }
1394fedab560Sae112802 kp->kp_refcnt--;
1395fedab560Sae112802 kp->kp_refcnts++;
1396fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1397fedab560Sae112802 PP_SETKPMS(pp);
1398fedab560Sae112802 sfmmu_page_exit(pmtx);
1399fedab560Sae112802 goto smallexit;
1400fedab560Sae112802
1401fedab560Sae112802 case KPM_TSBM_MAPS_BRKT: /* kc c - - */
1402fedab560Sae112802 case KPM_TSBM_MAPS_CONFL: /* kc c ks - */
1403fedab560Sae112802 if (!PP_ISMAPPED(pp)) {
1404fedab560Sae112802 /*
1405fedab560Sae112802 * We got a tsbmiss on kpm large page range that is
1406fedab560Sae112802 * marked to contain vac conflicting pages introduced
1407fedab560Sae112802 * by hme mappings. The hme mappings are all gone and
1408fedab560Sae112802 * must have bypassed the kpm alias prevention logic.
1409fedab560Sae112802 */
1410fedab560Sae112802 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
1411fedab560Sae112802 (void *)pp);
1412fedab560Sae112802 }
1413fedab560Sae112802
1414fedab560Sae112802 /*
1415fedab560Sae112802 * Check for vcolor conflicts. Return here w/ either no
1416fedab560Sae112802 * conflict (fast path), removed hme mapping chains
1417fedab560Sae112802 * (unload conflict) or uncached (uncache conflict).
1418fedab560Sae112802 * Dcache is cleaned and p_vcolor and P_TNC are set
1419fedab560Sae112802 * accordingly. Drop kpmp for uncache conflict cases
1420fedab560Sae112802 * since it will be grabbed within sfmmu_kpm_page_cache
1421fedab560Sae112802 * in case of an uncache conflict.
1422fedab560Sae112802 */
1423fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1424fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1425fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1426fedab560Sae112802
1427fedab560Sae112802 if (kp->kp_refcnt <= 0)
1428fedab560Sae112802 panic("sfmmu_kpm_fault: bad refcnt kp=%p", (void *)kp);
1429fedab560Sae112802
1430fedab560Sae112802 if (PP_ISNC(pp)) {
1431fedab560Sae112802 uncached = 1;
1432fedab560Sae112802 } else {
1433fedab560Sae112802 /*
1434fedab560Sae112802 * When an unload conflict is solved and there are
1435fedab560Sae112802 * no other small mappings around, we can resume
1436fedab560Sae112802 * largepage mode. Otherwise we have to map or drop
1437fedab560Sae112802 * in small. This could be a trigger for a small
1438fedab560Sae112802 * mapping reaper when this was the last conflict
1439fedab560Sae112802 * within the kpm page and when there are only
1440fedab560Sae112802 * other small mappings around.
1441fedab560Sae112802 */
1442fedab560Sae112802 ASSERT(addr_to_vcolor(vaddr) == PP_GET_VCOLOR(pp));
1443fedab560Sae112802 ASSERT(kp->kp_refcntc > 0);
1444fedab560Sae112802 kp->kp_refcntc--;
1445fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1446fedab560Sae112802 PP_CLRKPMC(pp);
1447fedab560Sae112802 sfmmu_page_exit(pmtx);
1448fedab560Sae112802 ASSERT(PP_ISKPMS(pp) == 0);
1449fedab560Sae112802 if (kp->kp_refcntc == 0 && kp->kp_refcnts == 0)
1450fedab560Sae112802 goto largeexit;
1451fedab560Sae112802 }
1452fedab560Sae112802
1453fedab560Sae112802 kp->kp_refcnt--;
1454fedab560Sae112802 kp->kp_refcnts++;
1455fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1456fedab560Sae112802 PP_SETKPMS(pp);
1457fedab560Sae112802 sfmmu_page_exit(pmtx);
1458fedab560Sae112802 goto smallexit;
1459fedab560Sae112802
1460fedab560Sae112802 case KPM_TSBM_RPLS_CONFL: /* kc c ks s */
1461fedab560Sae112802 if (!PP_ISMAPPED(pp)) {
1462fedab560Sae112802 /*
1463fedab560Sae112802 * We got a tsbmiss on kpm large page range that is
1464fedab560Sae112802 * marked to contain vac conflicting pages introduced
1465fedab560Sae112802 * by hme mappings. They are all gone and must have
1466fedab560Sae112802 * somehow bypassed the kpm alias prevention logic.
1467fedab560Sae112802 */
1468fedab560Sae112802 panic("sfmmu_kpm_fault: stale VAC conflict, pp=%p",
1469fedab560Sae112802 (void *)pp);
1470fedab560Sae112802 }
1471fedab560Sae112802
1472fedab560Sae112802 /*
1473fedab560Sae112802 * This state is only possible for an uncached mapping.
1474fedab560Sae112802 */
1475fedab560Sae112802 if (!PP_ISNC(pp)) {
1476fedab560Sae112802 panic("sfmmu_kpm_fault: page not uncached, pp=%p",
1477fedab560Sae112802 (void *)pp);
1478fedab560Sae112802 }
1479fedab560Sae112802 uncached = 1;
1480fedab560Sae112802 goto smallexit;
1481fedab560Sae112802
1482fedab560Sae112802 default:
1483fedab560Sae112802 badstate_exit:
1484fedab560Sae112802 panic("sfmmu_kpm_fault: inconsistent VAC state, vaddr=%p kp=%p "
1485fedab560Sae112802 "pp=%p", (void *)vaddr, (void *)kp, (void *)pp);
1486fedab560Sae112802 }
1487fedab560Sae112802
1488fedab560Sae112802 smallexit:
1489fedab560Sae112802 /* tte assembly */
1490fedab560Sae112802 if (uncached == 0)
1491fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
1492fedab560Sae112802 else
1493fedab560Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
1494fedab560Sae112802
1495fedab560Sae112802 /* tsb dropin */
1496fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
1497fedab560Sae112802
1498fedab560Sae112802 error = 0;
1499fedab560Sae112802 goto exit;
1500fedab560Sae112802
1501fedab560Sae112802 largeexit:
1502fedab560Sae112802 if (kp->kp_refcnt > 0) {
1503fedab560Sae112802
1504fedab560Sae112802 /* tte assembly */
1505fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE4M);
1506fedab560Sae112802
1507fedab560Sae112802 /* tsb dropin */
1508fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT4M);
1509fedab560Sae112802
1510fedab560Sae112802 if (kp->kp_refcntc == 0) {
1511fedab560Sae112802 /* Set "go" flag for TL tsbmiss handler */
1512fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc, &kpmp->khl_lock,
1513fedab560Sae112802 KPMTSBM_START);
1514fedab560Sae112802 }
1515fedab560Sae112802 ASSERT(kp->kp_refcntc == -1);
1516fedab560Sae112802 error = 0;
1517fedab560Sae112802
1518fedab560Sae112802 } else
1519fedab560Sae112802 error = EFAULT;
1520fedab560Sae112802 exit:
1521fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1522fedab560Sae112802 sfmmu_mlist_exit(pml);
1523fedab560Sae112802 return (error);
1524fedab560Sae112802 }
1525fedab560Sae112802
1526fedab560Sae112802 /*
1527fedab560Sae112802 * kpm fault handler for mappings with small page size.
1528fedab560Sae112802 */
1529fedab560Sae112802 int
sfmmu_kpm_fault_small(caddr_t vaddr,struct memseg * mseg,page_t * pp)1530fedab560Sae112802 sfmmu_kpm_fault_small(caddr_t vaddr, struct memseg *mseg, page_t *pp)
1531fedab560Sae112802 {
1532fedab560Sae112802 int error = 0;
1533fedab560Sae112802 pgcnt_t inx;
1534fedab560Sae112802 kpm_spage_t *ksp;
1535fedab560Sae112802 kpm_shlk_t *kpmsp;
1536fedab560Sae112802 kmutex_t *pml;
1537fedab560Sae112802 pfn_t pfn = pp->p_pagenum;
1538fedab560Sae112802 tte_t tte;
1539fedab560Sae112802 kmutex_t *pmtx;
1540fedab560Sae112802 int oldval;
1541fedab560Sae112802
1542fedab560Sae112802 inx = pfn - mseg->kpm_pbase;
1543fedab560Sae112802 ksp = &mseg->kpm_spages[inx];
1544fedab560Sae112802 kpmsp = KPMP_SHASH(ksp);
1545fedab560Sae112802
1546fedab560Sae112802 pml = sfmmu_mlist_enter(pp);
1547fedab560Sae112802
1548fedab560Sae112802 if (!PP_ISMAPPED_KPM(pp)) {
1549fedab560Sae112802 sfmmu_mlist_exit(pml);
1550fedab560Sae112802 return (EFAULT);
1551fedab560Sae112802 }
1552fedab560Sae112802
1553fedab560Sae112802 /*
1554fedab560Sae112802 * kp_mapped lookup protected by mlist mutex
1555fedab560Sae112802 */
1556fedab560Sae112802 if (ksp->kp_mapped == KPM_MAPPEDS) {
1557fedab560Sae112802 /*
1558fedab560Sae112802 * Fast path tsbmiss
1559fedab560Sae112802 */
1560fedab560Sae112802 ASSERT(!PP_ISKPMC(pp));
1561fedab560Sae112802 ASSERT(!PP_ISNC(pp));
1562fedab560Sae112802
1563fedab560Sae112802 /* tte assembly */
1564fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
1565fedab560Sae112802
1566fedab560Sae112802 /* tsb dropin */
1567fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
1568fedab560Sae112802
1569fedab560Sae112802 } else if (ksp->kp_mapped == KPM_MAPPEDSC) {
1570fedab560Sae112802 /*
1571fedab560Sae112802 * Got here due to existing or gone kpm/hme VAC conflict.
1572fedab560Sae112802 * Recheck for vcolor conflicts. Return here w/ either
1573fedab560Sae112802 * no conflict, removed hme mapping chain (unload
1574fedab560Sae112802 * conflict) or uncached (uncache conflict). VACaches
1575fedab560Sae112802 * are cleaned and p_vcolor and PP_TNC are set accordingly
1576fedab560Sae112802 * for the conflict cases.
1577fedab560Sae112802 */
1578fedab560Sae112802 sfmmu_kpm_vac_conflict(pp, vaddr);
1579fedab560Sae112802
1580fedab560Sae112802 if (PP_ISNC(pp)) {
1581fedab560Sae112802 /* ASSERT(pp->p_share); XXX use hat_page_getshare */
1582fedab560Sae112802
1583fedab560Sae112802 /* tte assembly */
1584fedab560Sae112802 KPM_TTE_VUNCACHED(tte.ll, pfn, TTE8K);
1585fedab560Sae112802
1586fedab560Sae112802 /* tsb dropin */
1587fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
1588fedab560Sae112802
1589444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
1590444ce08eSDonghai Qiao &kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDSC));
1591444ce08eSDonghai Qiao
1592444ce08eSDonghai Qiao if (oldval != KPM_MAPPEDSC)
1593444ce08eSDonghai Qiao panic("sfmmu_kpm_fault_small: "
1594444ce08eSDonghai Qiao "stale smallpages mapping");
1595fedab560Sae112802 } else {
1596fedab560Sae112802 if (PP_ISKPMC(pp)) {
1597fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1598fedab560Sae112802 PP_CLRKPMC(pp);
1599fedab560Sae112802 sfmmu_page_exit(pmtx);
1600fedab560Sae112802 }
1601fedab560Sae112802
1602fedab560Sae112802 /* tte assembly */
1603fedab560Sae112802 KPM_TTE_VCACHED(tte.ll, pfn, TTE8K);
1604fedab560Sae112802
1605fedab560Sae112802 /* tsb dropin */
1606fedab560Sae112802 sfmmu_kpm_load_tsb(vaddr, &tte, MMU_PAGESHIFT);
1607fedab560Sae112802
1608444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
1609444ce08eSDonghai Qiao &kpmsp->kshl_lock, (KPM_MAPPED_GO | KPM_MAPPEDS));
1610fedab560Sae112802
1611fedab560Sae112802 if (oldval != KPM_MAPPEDSC)
1612fedab560Sae112802 panic("sfmmu_kpm_fault_small: "
1613fedab560Sae112802 "stale smallpages mapping");
1614fedab560Sae112802 }
1615fedab560Sae112802
1616fedab560Sae112802 } else {
1617fedab560Sae112802 /*
1618fedab560Sae112802 * We got a tsbmiss on a not active kpm_page range.
1619fedab560Sae112802 * Let decide segkpm_fault how to panic.
1620fedab560Sae112802 */
1621fedab560Sae112802 error = EFAULT;
1622fedab560Sae112802 }
1623fedab560Sae112802
1624fedab560Sae112802 sfmmu_mlist_exit(pml);
1625fedab560Sae112802 return (error);
1626fedab560Sae112802 }
1627fedab560Sae112802
1628fedab560Sae112802 /*
1629fedab560Sae112802 * Check/handle potential hme/kpm mapping conflicts
1630fedab560Sae112802 */
1631fedab560Sae112802 static void
sfmmu_kpm_vac_conflict(page_t * pp,caddr_t vaddr)1632fedab560Sae112802 sfmmu_kpm_vac_conflict(page_t *pp, caddr_t vaddr)
1633fedab560Sae112802 {
1634fedab560Sae112802 int vcolor;
1635fedab560Sae112802 struct sf_hment *sfhmep;
1636fedab560Sae112802 struct hat *tmphat;
1637fedab560Sae112802 struct sf_hment *tmphme = NULL;
1638fedab560Sae112802 struct hme_blk *hmeblkp;
1639fedab560Sae112802 tte_t tte;
1640fedab560Sae112802
1641fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
1642fedab560Sae112802
1643fedab560Sae112802 if (PP_ISNC(pp))
1644fedab560Sae112802 return;
1645fedab560Sae112802
1646fedab560Sae112802 vcolor = addr_to_vcolor(vaddr);
1647fedab560Sae112802 if (PP_GET_VCOLOR(pp) == vcolor)
1648fedab560Sae112802 return;
1649fedab560Sae112802
1650fedab560Sae112802 /*
1651fedab560Sae112802 * There could be no vcolor conflict between a large cached
1652fedab560Sae112802 * hme page and a non alias range kpm page (neither large nor
1653fedab560Sae112802 * small mapped). So if a hme conflict already exists between
1654fedab560Sae112802 * a constituent page of a large hme mapping and a shared small
1655fedab560Sae112802 * conflicting hme mapping, both mappings must be already
1656fedab560Sae112802 * uncached at this point.
1657fedab560Sae112802 */
1658fedab560Sae112802 ASSERT(!PP_ISMAPPED_LARGE(pp));
1659fedab560Sae112802
1660fedab560Sae112802 if (!PP_ISMAPPED(pp)) {
1661fedab560Sae112802 /*
1662fedab560Sae112802 * Previous hme user of page had a different color
1663fedab560Sae112802 * but since there are no current users
1664fedab560Sae112802 * we just flush the cache and change the color.
1665fedab560Sae112802 */
1666fedab560Sae112802 SFMMU_STAT(sf_pgcolor_conflict);
1667fedab560Sae112802 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
1668fedab560Sae112802 PP_SET_VCOLOR(pp, vcolor);
1669fedab560Sae112802 return;
1670fedab560Sae112802 }
1671fedab560Sae112802
1672fedab560Sae112802 /*
1673fedab560Sae112802 * If we get here we have a vac conflict with a current hme
1674fedab560Sae112802 * mapping. This must have been established by forcing a wrong
1675fedab560Sae112802 * colored mapping, e.g. by using mmap(2) with MAP_FIXED.
1676fedab560Sae112802 */
1677fedab560Sae112802
1678fedab560Sae112802 /*
1679fedab560Sae112802 * Check if any mapping is in same as or if it is locked
1680fedab560Sae112802 * since in that case we need to uncache.
1681fedab560Sae112802 */
1682fedab560Sae112802 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
1683fedab560Sae112802 tmphme = sfhmep->hme_next;
16847dacfc44Spaulsan if (IS_PAHME(sfhmep))
16857dacfc44Spaulsan continue;
1686fedab560Sae112802 hmeblkp = sfmmu_hmetohblk(sfhmep);
1687fedab560Sae112802 if (hmeblkp->hblk_xhat_bit)
1688fedab560Sae112802 continue;
1689fedab560Sae112802 tmphat = hblktosfmmu(hmeblkp);
1690fedab560Sae112802 sfmmu_copytte(&sfhmep->hme_tte, &tte);
1691fedab560Sae112802 ASSERT(TTE_IS_VALID(&tte));
1692fedab560Sae112802 if ((tmphat == ksfmmup) || hmeblkp->hblk_lckcnt) {
1693fedab560Sae112802 /*
1694fedab560Sae112802 * We have an uncache conflict
1695fedab560Sae112802 */
1696fedab560Sae112802 SFMMU_STAT(sf_uncache_conflict);
1697fedab560Sae112802 sfmmu_page_cache_array(pp, HAT_TMPNC, CACHE_FLUSH, 1);
1698fedab560Sae112802 return;
1699fedab560Sae112802 }
1700fedab560Sae112802 }
1701fedab560Sae112802
1702fedab560Sae112802 /*
1703fedab560Sae112802 * We have an unload conflict
1704fedab560Sae112802 */
1705fedab560Sae112802 SFMMU_STAT(sf_unload_conflict);
1706fedab560Sae112802
1707fedab560Sae112802 for (sfhmep = pp->p_mapping; sfhmep; sfhmep = tmphme) {
1708fedab560Sae112802 tmphme = sfhmep->hme_next;
17097dacfc44Spaulsan if (IS_PAHME(sfhmep))
17107dacfc44Spaulsan continue;
1711fedab560Sae112802 hmeblkp = sfmmu_hmetohblk(sfhmep);
1712fedab560Sae112802 if (hmeblkp->hblk_xhat_bit)
1713fedab560Sae112802 continue;
1714fedab560Sae112802 (void) sfmmu_pageunload(pp, sfhmep, TTE8K);
1715fedab560Sae112802 }
1716fedab560Sae112802
1717fedab560Sae112802 /*
1718fedab560Sae112802 * Unloads only does tlb flushes so we need to flush the
1719fedab560Sae112802 * dcache vcolor here.
1720fedab560Sae112802 */
1721fedab560Sae112802 sfmmu_cache_flush(pp->p_pagenum, PP_GET_VCOLOR(pp));
1722fedab560Sae112802 PP_SET_VCOLOR(pp, vcolor);
1723fedab560Sae112802 }
1724fedab560Sae112802
1725fedab560Sae112802 /*
1726fedab560Sae112802 * Remove all kpm mappings using kpme's for pp and check that
1727fedab560Sae112802 * all kpm mappings (w/ and w/o kpme's) are gone.
1728fedab560Sae112802 */
1729fedab560Sae112802 void
sfmmu_kpm_pageunload(page_t * pp)1730fedab560Sae112802 sfmmu_kpm_pageunload(page_t *pp)
1731fedab560Sae112802 {
1732fedab560Sae112802 caddr_t vaddr;
1733fedab560Sae112802 struct kpme *kpme, *nkpme;
1734fedab560Sae112802
1735fedab560Sae112802 ASSERT(pp != NULL);
1736fedab560Sae112802 ASSERT(pp->p_kpmref);
1737fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
1738fedab560Sae112802
1739fedab560Sae112802 vaddr = hat_kpm_page2va(pp, 1);
1740fedab560Sae112802
1741fedab560Sae112802 for (kpme = pp->p_kpmelist; kpme; kpme = nkpme) {
1742fedab560Sae112802 ASSERT(kpme->kpe_page == pp);
1743fedab560Sae112802
1744fedab560Sae112802 if (pp->p_kpmref == 0)
1745fedab560Sae112802 panic("sfmmu_kpm_pageunload: stale p_kpmref pp=%p "
1746fedab560Sae112802 "kpme=%p", (void *)pp, (void *)kpme);
1747fedab560Sae112802
1748fedab560Sae112802 nkpme = kpme->kpe_next;
1749fedab560Sae112802
1750fedab560Sae112802 /* Add instance callback here here if needed later */
1751fedab560Sae112802 sfmmu_kpme_sub(kpme, pp);
1752fedab560Sae112802 }
1753fedab560Sae112802
1754fedab560Sae112802 /*
1755fedab560Sae112802 * Also correct after mixed kpme/nonkpme mappings. If nonkpme
1756fedab560Sae112802 * segkpm clients have unlocked the page and forgot to mapout
1757fedab560Sae112802 * we panic here.
1758fedab560Sae112802 */
1759fedab560Sae112802 if (pp->p_kpmref != 0)
1760fedab560Sae112802 panic("sfmmu_kpm_pageunload: bad refcnt pp=%p", (void *)pp);
1761fedab560Sae112802
1762fedab560Sae112802 sfmmu_kpm_mapout(pp, vaddr);
1763fedab560Sae112802 }
1764fedab560Sae112802
1765fedab560Sae112802 /*
1766fedab560Sae112802 * Remove a large kpm mapping from kernel TSB and all TLB's.
1767fedab560Sae112802 */
1768fedab560Sae112802 static void
sfmmu_kpm_demap_large(caddr_t vaddr)1769fedab560Sae112802 sfmmu_kpm_demap_large(caddr_t vaddr)
1770fedab560Sae112802 {
1771fedab560Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT4M);
1772fedab560Sae112802 sfmmu_kpm_demap_tlbs(vaddr);
1773fedab560Sae112802 }
1774fedab560Sae112802
1775fedab560Sae112802 /*
1776fedab560Sae112802 * Remove a small kpm mapping from kernel TSB and all TLB's.
1777fedab560Sae112802 */
1778fedab560Sae112802 static void
sfmmu_kpm_demap_small(caddr_t vaddr)1779fedab560Sae112802 sfmmu_kpm_demap_small(caddr_t vaddr)
1780fedab560Sae112802 {
1781fedab560Sae112802 sfmmu_kpm_unload_tsb(vaddr, MMU_PAGESHIFT);
1782fedab560Sae112802 sfmmu_kpm_demap_tlbs(vaddr);
1783fedab560Sae112802 }
1784fedab560Sae112802
1785fedab560Sae112802 /*
1786fedab560Sae112802 * Demap a kpm mapping in all TLB's.
1787fedab560Sae112802 */
1788fedab560Sae112802 static void
sfmmu_kpm_demap_tlbs(caddr_t vaddr)1789fedab560Sae112802 sfmmu_kpm_demap_tlbs(caddr_t vaddr)
1790fedab560Sae112802 {
1791fedab560Sae112802 cpuset_t cpuset;
1792fedab560Sae112802
1793fedab560Sae112802 kpreempt_disable();
1794fedab560Sae112802 cpuset = ksfmmup->sfmmu_cpusran;
1795fedab560Sae112802 CPUSET_AND(cpuset, cpu_ready_set);
1796fedab560Sae112802 CPUSET_DEL(cpuset, CPU->cpu_id);
1797fedab560Sae112802 SFMMU_XCALL_STATS(ksfmmup);
1798fedab560Sae112802
1799fedab560Sae112802 xt_some(cpuset, vtag_flushpage_tl1, (uint64_t)vaddr,
1800fedab560Sae112802 (uint64_t)ksfmmup);
1801fedab560Sae112802 vtag_flushpage(vaddr, (uint64_t)ksfmmup);
1802fedab560Sae112802
1803fedab560Sae112802 kpreempt_enable();
1804fedab560Sae112802 }
1805fedab560Sae112802
1806fedab560Sae112802 /*
1807fedab560Sae112802 * Summary states used in sfmmu_kpm_vac_unload (KPM_VUL__*).
1808fedab560Sae112802 * See also more detailed comments within in the sfmmu_kpm_vac_unload switch.
1809fedab560Sae112802 * Abbreviations used:
1810fedab560Sae112802 * BIG: Large page kpm mapping in use.
1811fedab560Sae112802 * CONFL: VAC conflict(s) within a kpm_page.
1812fedab560Sae112802 * INCR: Count of conflicts within a kpm_page is going to be incremented.
1813fedab560Sae112802 * DECR: Count of conflicts within a kpm_page is going to be decremented.
1814fedab560Sae112802 * UNMAP_SMALL: A small (regular page size) mapping is going to be unmapped.
1815fedab560Sae112802 * TNC: Temporary non cached: a kpm mapped page is mapped in TNC state.
1816fedab560Sae112802 */
1817fedab560Sae112802 #define KPM_VUL_BIG (0)
1818fedab560Sae112802 #define KPM_VUL_CONFL_INCR1 (KPM_KS)
1819fedab560Sae112802 #define KPM_VUL_UNMAP_SMALL1 (KPM_KS | KPM_S)
1820fedab560Sae112802 #define KPM_VUL_CONFL_INCR2 (KPM_KC)
1821fedab560Sae112802 #define KPM_VUL_CONFL_INCR3 (KPM_KC | KPM_KS)
1822fedab560Sae112802 #define KPM_VUL_UNMAP_SMALL2 (KPM_KC | KPM_KS | KPM_S)
1823fedab560Sae112802 #define KPM_VUL_CONFL_DECR1 (KPM_KC | KPM_C)
1824fedab560Sae112802 #define KPM_VUL_CONFL_DECR2 (KPM_KC | KPM_C | KPM_KS)
1825fedab560Sae112802 #define KPM_VUL_TNC (KPM_KC | KPM_C | KPM_KS | KPM_S)
1826fedab560Sae112802
1827fedab560Sae112802 /*
1828fedab560Sae112802 * Handle VAC unload conflicts introduced by hme mappings or vice
1829fedab560Sae112802 * versa when a hme conflict mapping is replaced by a non conflict
1830fedab560Sae112802 * one. Perform actions and state transitions according to the
1831fedab560Sae112802 * various page and kpm_page entry states. VACache flushes are in
1832fedab560Sae112802 * the responsibiliy of the caller. We still hold the mlist lock.
1833fedab560Sae112802 */
1834fedab560Sae112802 void
sfmmu_kpm_vac_unload(page_t * pp,caddr_t vaddr)1835fedab560Sae112802 sfmmu_kpm_vac_unload(page_t *pp, caddr_t vaddr)
1836fedab560Sae112802 {
1837fedab560Sae112802 kpm_page_t *kp;
1838fedab560Sae112802 kpm_hlk_t *kpmp;
1839fedab560Sae112802 caddr_t kpmvaddr = hat_kpm_page2va(pp, 1);
1840fedab560Sae112802 int newcolor;
1841fedab560Sae112802 kmutex_t *pmtx;
1842fedab560Sae112802 uint_t vacunlcase;
1843fedab560Sae112802 int badstate = 0;
1844fedab560Sae112802 kpm_spage_t *ksp;
1845fedab560Sae112802 kpm_shlk_t *kpmsp;
1846fedab560Sae112802
1847fedab560Sae112802 ASSERT(PAGE_LOCKED(pp));
1848fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
1849fedab560Sae112802 ASSERT(!PP_ISNC(pp));
1850fedab560Sae112802
1851fedab560Sae112802 newcolor = addr_to_vcolor(kpmvaddr) != addr_to_vcolor(vaddr);
1852fedab560Sae112802 if (kpm_smallpages)
1853fedab560Sae112802 goto smallpages_vac_unload;
1854fedab560Sae112802
1855fedab560Sae112802 PP2KPMPG(pp, kp);
1856fedab560Sae112802 kpmp = KPMP_HASH(kp);
1857fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
1858fedab560Sae112802
1859fedab560Sae112802 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
1860fedab560Sae112802 if (kp->kp_refcnta < 1) {
1861fedab560Sae112802 panic("sfmmu_kpm_vac_unload: bad refcnta kpm_page=%p\n",
1862fedab560Sae112802 (void *)kp);
1863fedab560Sae112802 }
1864fedab560Sae112802
1865fedab560Sae112802 if (PP_ISKPMC(pp) == 0) {
1866fedab560Sae112802 if (newcolor == 0)
1867fedab560Sae112802 goto exit;
1868fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
1869fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1870fedab560Sae112802 PP_SETKPMC(pp);
1871fedab560Sae112802 sfmmu_page_exit(pmtx);
1872fedab560Sae112802
1873fedab560Sae112802 } else if (newcolor == 0) {
1874fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1875fedab560Sae112802 PP_CLRKPMC(pp);
1876fedab560Sae112802 sfmmu_page_exit(pmtx);
1877fedab560Sae112802
1878fedab560Sae112802 } else {
1879fedab560Sae112802 badstate++;
1880fedab560Sae112802 }
1881fedab560Sae112802
1882fedab560Sae112802 goto exit;
1883fedab560Sae112802 }
1884fedab560Sae112802
1885fedab560Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
1886fedab560Sae112802 if (kp->kp_refcntc == -1) {
1887fedab560Sae112802 /*
1888fedab560Sae112802 * We should come here only if trap level tsb miss
1889fedab560Sae112802 * handler is disabled.
1890fedab560Sae112802 */
1891fedab560Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
1892fedab560Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
1893fedab560Sae112802 } else {
1894fedab560Sae112802 badstate |= (kp->kp_refcntc < 0);
1895fedab560Sae112802 }
1896fedab560Sae112802
1897fedab560Sae112802 if (badstate)
1898fedab560Sae112802 goto exit;
1899fedab560Sae112802
1900fedab560Sae112802 if (PP_ISKPMC(pp) == 0 && newcolor == 0) {
1901fedab560Sae112802 ASSERT(PP_ISKPMS(pp) == 0);
1902fedab560Sae112802 goto exit;
1903fedab560Sae112802 }
1904fedab560Sae112802
1905fedab560Sae112802 /*
1906fedab560Sae112802 * Combine the per kpm_page and per page kpm VAC states
1907fedab560Sae112802 * to a summary state in order to make the vac unload
1908fedab560Sae112802 * handling more concise.
1909fedab560Sae112802 */
1910fedab560Sae112802 vacunlcase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
1911fedab560Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
1912fedab560Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) |
1913fedab560Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0));
1914fedab560Sae112802
1915fedab560Sae112802 switch (vacunlcase) {
1916fedab560Sae112802 case KPM_VUL_BIG: /* - - - - */
1917fedab560Sae112802 /*
1918fedab560Sae112802 * Have to breakup the large page mapping to be
1919fedab560Sae112802 * able to handle the conflicting hme vaddr.
1920fedab560Sae112802 */
1921fedab560Sae112802 if (kp->kp_refcntc == -1) {
1922fedab560Sae112802 /* remove go indication */
1923fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
1924fedab560Sae112802 &kpmp->khl_lock, KPMTSBM_STOP);
1925fedab560Sae112802 }
1926fedab560Sae112802 sfmmu_kpm_demap_large(kpmvaddr);
1927fedab560Sae112802
1928fedab560Sae112802 ASSERT(kp->kp_refcntc == 0);
1929fedab560Sae112802 kp->kp_refcntc++;
1930fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1931fedab560Sae112802 PP_SETKPMC(pp);
1932fedab560Sae112802 sfmmu_page_exit(pmtx);
1933fedab560Sae112802 break;
1934fedab560Sae112802
1935fedab560Sae112802 case KPM_VUL_UNMAP_SMALL1: /* - - ks s */
1936fedab560Sae112802 case KPM_VUL_UNMAP_SMALL2: /* kc - ks s */
1937fedab560Sae112802 /*
1938fedab560Sae112802 * New conflict w/ an active kpm page, actually mapped
1939fedab560Sae112802 * in by small TSB/TLB entries. Remove the mapping and
1940fedab560Sae112802 * update states.
1941fedab560Sae112802 */
1942fedab560Sae112802 ASSERT(newcolor);
1943fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
1944fedab560Sae112802 kp->kp_refcnts--;
1945fedab560Sae112802 kp->kp_refcnt++;
1946fedab560Sae112802 kp->kp_refcntc++;
1947fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1948fedab560Sae112802 PP_CLRKPMS(pp);
1949fedab560Sae112802 PP_SETKPMC(pp);
1950fedab560Sae112802 sfmmu_page_exit(pmtx);
1951fedab560Sae112802 break;
1952fedab560Sae112802
1953fedab560Sae112802 case KPM_VUL_CONFL_INCR1: /* - - ks - */
1954fedab560Sae112802 case KPM_VUL_CONFL_INCR2: /* kc - - - */
1955fedab560Sae112802 case KPM_VUL_CONFL_INCR3: /* kc - ks - */
1956fedab560Sae112802 /*
1957fedab560Sae112802 * New conflict on a active kpm mapped page not yet in
1958fedab560Sae112802 * TSB/TLB. Mark page and increment the kpm_page conflict
1959fedab560Sae112802 * count.
1960fedab560Sae112802 */
1961fedab560Sae112802 ASSERT(newcolor);
1962fedab560Sae112802 kp->kp_refcntc++;
1963fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1964fedab560Sae112802 PP_SETKPMC(pp);
1965fedab560Sae112802 sfmmu_page_exit(pmtx);
1966fedab560Sae112802 break;
1967fedab560Sae112802
1968fedab560Sae112802 case KPM_VUL_CONFL_DECR1: /* kc c - - */
1969fedab560Sae112802 case KPM_VUL_CONFL_DECR2: /* kc c ks - */
1970fedab560Sae112802 /*
1971fedab560Sae112802 * A conflicting hme mapping is removed for an active
1972fedab560Sae112802 * kpm page not yet in TSB/TLB. Unmark page and decrement
1973fedab560Sae112802 * the kpm_page conflict count.
1974fedab560Sae112802 */
1975fedab560Sae112802 ASSERT(newcolor == 0);
1976fedab560Sae112802 kp->kp_refcntc--;
1977fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
1978fedab560Sae112802 PP_CLRKPMC(pp);
1979fedab560Sae112802 sfmmu_page_exit(pmtx);
1980fedab560Sae112802 break;
1981fedab560Sae112802
1982fedab560Sae112802 case KPM_VUL_TNC: /* kc c ks s */
1983fedab560Sae112802 cmn_err(CE_NOTE, "sfmmu_kpm_vac_unload: "
1984fedab560Sae112802 "page not in NC state");
1985fedab560Sae112802 /* FALLTHRU */
1986fedab560Sae112802
1987fedab560Sae112802 default:
1988fedab560Sae112802 badstate++;
1989fedab560Sae112802 }
1990fedab560Sae112802 exit:
1991fedab560Sae112802 if (badstate) {
1992fedab560Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent VAC state, "
1993fedab560Sae112802 "kpmvaddr=%p kp=%p pp=%p",
1994fedab560Sae112802 (void *)kpmvaddr, (void *)kp, (void *)pp);
1995fedab560Sae112802 }
1996fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
1997fedab560Sae112802
1998fedab560Sae112802 return;
1999fedab560Sae112802
2000fedab560Sae112802 smallpages_vac_unload:
2001fedab560Sae112802 if (newcolor == 0)
2002fedab560Sae112802 return;
2003fedab560Sae112802
2004fedab560Sae112802 PP2KPMSPG(pp, ksp);
2005fedab560Sae112802 kpmsp = KPMP_SHASH(ksp);
2006fedab560Sae112802
2007fedab560Sae112802 if (PP_ISKPMC(pp) == 0) {
2008fedab560Sae112802 if (ksp->kp_mapped == KPM_MAPPEDS) {
2009fedab560Sae112802 /*
2010fedab560Sae112802 * Stop TL tsbmiss handling
2011fedab560Sae112802 */
2012444ce08eSDonghai Qiao (void) sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag,
2013fedab560Sae112802 &kpmsp->kshl_lock, KPM_MAPPEDSC);
2014fedab560Sae112802
2015fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
2016fedab560Sae112802
2017fedab560Sae112802 } else if (ksp->kp_mapped != KPM_MAPPEDSC) {
2018fedab560Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent mapping");
2019fedab560Sae112802 }
2020fedab560Sae112802
2021fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
2022fedab560Sae112802 PP_SETKPMC(pp);
2023fedab560Sae112802 sfmmu_page_exit(pmtx);
2024fedab560Sae112802
2025fedab560Sae112802 } else {
2026fedab560Sae112802 if (ksp->kp_mapped != KPM_MAPPEDSC)
2027fedab560Sae112802 panic("sfmmu_kpm_vac_unload: inconsistent mapping");
2028fedab560Sae112802 }
2029fedab560Sae112802 }
2030fedab560Sae112802
2031fedab560Sae112802 /*
2032fedab560Sae112802 * Page is marked to be in VAC conflict to an existing kpm mapping
2033fedab560Sae112802 * or is kpm mapped using only the regular pagesize. Called from
2034fedab560Sae112802 * sfmmu_hblk_unload when a mlist is completely removed.
2035fedab560Sae112802 */
2036fedab560Sae112802 void
sfmmu_kpm_hme_unload(page_t * pp)2037fedab560Sae112802 sfmmu_kpm_hme_unload(page_t *pp)
2038fedab560Sae112802 {
2039fedab560Sae112802 /* tte assembly */
2040fedab560Sae112802 kpm_page_t *kp;
2041fedab560Sae112802 kpm_hlk_t *kpmp;
2042fedab560Sae112802 caddr_t vaddr;
2043fedab560Sae112802 kmutex_t *pmtx;
2044fedab560Sae112802 uint_t flags;
2045fedab560Sae112802 kpm_spage_t *ksp;
2046fedab560Sae112802
2047fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
2048fedab560Sae112802 ASSERT(PP_ISMAPPED_KPM(pp));
2049fedab560Sae112802
2050fedab560Sae112802 flags = pp->p_nrm & (P_KPMC | P_KPMS);
2051fedab560Sae112802 if (kpm_smallpages)
2052fedab560Sae112802 goto smallpages_hme_unload;
2053fedab560Sae112802
2054fedab560Sae112802 if (flags == (P_KPMC | P_KPMS)) {
2055fedab560Sae112802 panic("sfmmu_kpm_hme_unload: page should be uncached");
2056fedab560Sae112802
2057fedab560Sae112802 } else if (flags == P_KPMS) {
2058fedab560Sae112802 /*
2059fedab560Sae112802 * Page mapped small but not involved in VAC conflict
2060fedab560Sae112802 */
2061fedab560Sae112802 return;
2062fedab560Sae112802 }
2063fedab560Sae112802
2064fedab560Sae112802 vaddr = hat_kpm_page2va(pp, 1);
2065fedab560Sae112802
2066fedab560Sae112802 PP2KPMPG(pp, kp);
2067fedab560Sae112802 kpmp = KPMP_HASH(kp);
2068fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
2069fedab560Sae112802
2070fedab560Sae112802 if (IS_KPM_ALIAS_RANGE(vaddr)) {
2071fedab560Sae112802 if (kp->kp_refcnta < 1) {
2072fedab560Sae112802 panic("sfmmu_kpm_hme_unload: bad refcnta kpm_page=%p\n",
2073fedab560Sae112802 (void *)kp);
2074fedab560Sae112802 }
2075fedab560Sae112802 } else {
2076fedab560Sae112802 if (kp->kp_refcntc < 1) {
2077fedab560Sae112802 panic("sfmmu_kpm_hme_unload: bad refcntc kpm_page=%p\n",
2078fedab560Sae112802 (void *)kp);
2079fedab560Sae112802 }
2080fedab560Sae112802 kp->kp_refcntc--;
2081fedab560Sae112802 }
2082fedab560Sae112802
2083fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
2084fedab560Sae112802 PP_CLRKPMC(pp);
2085fedab560Sae112802 sfmmu_page_exit(pmtx);
2086fedab560Sae112802
2087fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
2088fedab560Sae112802 return;
2089fedab560Sae112802
2090fedab560Sae112802 smallpages_hme_unload:
2091fedab560Sae112802 if (flags != P_KPMC)
2092fedab560Sae112802 panic("sfmmu_kpm_hme_unload: page should be uncached");
2093fedab560Sae112802
2094fedab560Sae112802 vaddr = hat_kpm_page2va(pp, 1);
2095fedab560Sae112802 PP2KPMSPG(pp, ksp);
2096fedab560Sae112802
2097fedab560Sae112802 if (ksp->kp_mapped != KPM_MAPPEDSC)
2098fedab560Sae112802 panic("sfmmu_kpm_hme_unload: inconsistent mapping");
2099fedab560Sae112802
2100fedab560Sae112802 /*
2101fedab560Sae112802 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
2102fedab560Sae112802 * prevents TL tsbmiss handling and force a hat_kpm_fault.
2103fedab560Sae112802 * There we can start over again.
2104fedab560Sae112802 */
2105fedab560Sae112802
2106fedab560Sae112802 pmtx = sfmmu_page_enter(pp);
2107fedab560Sae112802 PP_CLRKPMC(pp);
2108fedab560Sae112802 sfmmu_page_exit(pmtx);
2109fedab560Sae112802 }
2110fedab560Sae112802
2111fedab560Sae112802 /*
2112fedab560Sae112802 * Special hooks for sfmmu_page_cache_array() when changing the
2113fedab560Sae112802 * cacheability of a page. It is used to obey the hat_kpm lock
2114fedab560Sae112802 * ordering (mlist -> kpmp -> spl, and back).
2115fedab560Sae112802 */
2116fedab560Sae112802 kpm_hlk_t *
sfmmu_kpm_kpmp_enter(page_t * pp,pgcnt_t npages)2117fedab560Sae112802 sfmmu_kpm_kpmp_enter(page_t *pp, pgcnt_t npages)
2118fedab560Sae112802 {
2119fedab560Sae112802 kpm_page_t *kp;
2120fedab560Sae112802 kpm_hlk_t *kpmp;
2121fedab560Sae112802
2122fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
2123fedab560Sae112802
2124fedab560Sae112802 if (kpm_smallpages || PP_ISMAPPED_KPM(pp) == 0)
2125fedab560Sae112802 return (NULL);
2126fedab560Sae112802
2127fedab560Sae112802 ASSERT(npages <= kpmpnpgs);
2128fedab560Sae112802
2129fedab560Sae112802 PP2KPMPG(pp, kp);
2130fedab560Sae112802 kpmp = KPMP_HASH(kp);
2131fedab560Sae112802 mutex_enter(&kpmp->khl_mutex);
2132fedab560Sae112802
2133fedab560Sae112802 return (kpmp);
2134fedab560Sae112802 }
2135fedab560Sae112802
2136fedab560Sae112802 void
sfmmu_kpm_kpmp_exit(kpm_hlk_t * kpmp)2137fedab560Sae112802 sfmmu_kpm_kpmp_exit(kpm_hlk_t *kpmp)
2138fedab560Sae112802 {
2139fedab560Sae112802 if (kpm_smallpages || kpmp == NULL)
2140fedab560Sae112802 return;
2141fedab560Sae112802
2142fedab560Sae112802 mutex_exit(&kpmp->khl_mutex);
2143fedab560Sae112802 }
2144fedab560Sae112802
2145fedab560Sae112802 /*
2146fedab560Sae112802 * Summary states used in sfmmu_kpm_page_cache (KPM_*).
2147fedab560Sae112802 * See also more detailed comments within in the sfmmu_kpm_page_cache switch.
2148fedab560Sae112802 * Abbreviations used:
2149fedab560Sae112802 * UNC: Input state for an uncache request.
2150fedab560Sae112802 * BIG: Large page kpm mapping in use.
2151fedab560Sae112802 * SMALL: Page has a small kpm mapping within a kpm_page range.
2152fedab560Sae112802 * NODEMAP: No demap needed.
2153fedab560Sae112802 * NOP: No operation needed on this input state.
2154fedab560Sae112802 * CACHE: Input state for a re-cache request.
2155fedab560Sae112802 * MAPS: Page is in TNC and kpm VAC conflict state and kpm mapped small.
2156fedab560Sae112802 * NOMAP: Page is in TNC and kpm VAC conflict state, but not small kpm
2157fedab560Sae112802 * mapped.
2158fedab560Sae112802 * NOMAPO: Page is in TNC and kpm VAC conflict state, but not small kpm
2159fedab560Sae112802 * mapped. There are also other small kpm mappings within this
2160fedab560Sae112802 * kpm_page.
2161fedab560Sae112802 */
2162fedab560Sae112802 #define KPM_UNC_BIG (0)
2163fedab560Sae112802 #define KPM_UNC_NODEMAP1 (KPM_KS)
2164fedab560Sae112802 #define KPM_UNC_SMALL1 (KPM_KS | KPM_S)
2165fedab560Sae112802 #define KPM_UNC_NODEMAP2 (KPM_KC)
2166fedab560Sae112802 #define KPM_UNC_NODEMAP3 (KPM_KC | KPM_KS)
2167fedab560Sae112802 #define KPM_UNC_SMALL2 (KPM_KC | KPM_KS | KPM_S)
2168fedab560Sae112802 #define KPM_UNC_NOP1 (KPM_KC | KPM_C)
2169fedab560Sae112802 #define KPM_UNC_NOP2 (KPM_KC | KPM_C | KPM_KS)
2170fedab560Sae112802 #define KPM_CACHE_NOMAP (KPM_KC | KPM_C)
2171fedab560Sae112802 #define KPM_CACHE_NOMAPO (KPM_KC | KPM_C | KPM_KS)
2172fedab560Sae112802 #define KPM_CACHE_MAPS (KPM_KC | KPM_C | KPM_KS | KPM_S)
2173fedab560Sae112802
2174fedab560Sae112802 /*
2175fedab560Sae112802 * This function is called when the virtual cacheability of a page
2176fedab560Sae112802 * is changed and the page has an actice kpm mapping. The mlist mutex,
2177fedab560Sae112802 * the spl hash lock and the kpmp mutex (if needed) are already grabbed.
2178fedab560Sae112802 */
2179fedab560Sae112802 /*ARGSUSED2*/
2180fedab560Sae112802 void
sfmmu_kpm_page_cache(page_t * pp,int flags,int cache_flush_tag)2181fedab560Sae112802 sfmmu_kpm_page_cache(page_t *pp, int flags, int cache_flush_tag)
2182fedab560Sae112802 {
2183fedab560Sae112802 kpm_page_t *kp;
2184fedab560Sae112802 kpm_hlk_t *kpmp;
2185fedab560Sae112802 caddr_t kpmvaddr;
2186fedab560Sae112802 int badstate = 0;
2187fedab560Sae112802 uint_t pgcacase;
2188fedab560Sae112802 kpm_spage_t *ksp;
2189fedab560Sae112802 kpm_shlk_t *kpmsp;
2190fedab560Sae112802 int oldval;
2191fedab560Sae112802
2192fedab560Sae112802 ASSERT(PP_ISMAPPED_KPM(pp));
2193fedab560Sae112802 ASSERT(sfmmu_mlist_held(pp));
2194fedab560Sae112802 ASSERT(sfmmu_page_spl_held(pp));
2195fedab560Sae112802
2196fedab560Sae112802 if (flags != HAT_TMPNC && flags != HAT_CACHE)
2197fedab560Sae112802 panic("sfmmu_kpm_page_cache: bad flags");
2198fedab560Sae112802
2199fedab560Sae112802 kpmvaddr = hat_kpm_page2va(pp, 1);
2200fedab560Sae112802
2201fedab560Sae112802 if (flags == HAT_TMPNC && cache_flush_tag == CACHE_FLUSH) {
2202fedab560Sae112802 pfn_t pfn = pp->p_pagenum;
2203fedab560Sae112802 int vcolor = addr_to_vcolor(kpmvaddr);
2204fedab560Sae112802 cpuset_t cpuset = cpu_ready_set;
2205fedab560Sae112802
2206fedab560Sae112802 /* Flush vcolor in DCache */
2207fedab560Sae112802 CPUSET_DEL(cpuset, CPU->cpu_id);
2208fedab560Sae112802 SFMMU_XCALL_STATS(ksfmmup);
2209fedab560Sae112802 xt_some(cpuset, vac_flushpage_tl1, pfn, vcolor);
2210fedab560Sae112802 vac_flushpage(pfn, vcolor);
2211fedab560Sae112802 }
2212fedab560Sae112802
2213fedab560Sae112802 if (kpm_smallpages)
2214fedab560Sae112802 goto smallpages_page_cache;
2215fedab560Sae112802
2216fedab560Sae112802 PP2KPMPG(pp, kp);
2217fedab560Sae112802 kpmp = KPMP_HASH(kp);
2218fedab560Sae112802 ASSERT(MUTEX_HELD(&kpmp->khl_mutex));
2219fedab560Sae112802
2220fedab560Sae112802 if (IS_KPM_ALIAS_RANGE(kpmvaddr)) {
2221fedab560Sae112802 if (kp->kp_refcnta < 1) {
2222fedab560Sae112802 panic("sfmmu_kpm_page_cache: bad refcnta "
2223fedab560Sae112802 "kpm_page=%p\n", (void *)kp);
2224fedab560Sae112802 }
2225fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
2226fedab560Sae112802 if (flags == HAT_TMPNC) {
2227fedab560Sae112802 PP_SETKPMC(pp);
2228fedab560Sae112802 ASSERT(!PP_ISKPMS(pp));
2229fedab560Sae112802 } else {
2230fedab560Sae112802 ASSERT(PP_ISKPMC(pp));
2231fedab560Sae112802 PP_CLRKPMC(pp);
2232fedab560Sae112802 }
2233fedab560Sae112802 goto exit;
2234fedab560Sae112802 }
2235fedab560Sae112802
2236fedab560Sae112802 badstate = (kp->kp_refcnt < 0 || kp->kp_refcnts < 0);
2237fedab560Sae112802 if (kp->kp_refcntc == -1) {
2238fedab560Sae112802 /*
2239fedab560Sae112802 * We should come here only if trap level tsb miss
2240fedab560Sae112802 * handler is disabled.
2241fedab560Sae112802 */
2242fedab560Sae112802 badstate |= (kp->kp_refcnt == 0 || kp->kp_refcnts > 0 ||
2243fedab560Sae112802 PP_ISKPMC(pp) || PP_ISKPMS(pp) || PP_ISNC(pp));
2244fedab560Sae112802 } else {
2245fedab560Sae112802 badstate |= (kp->kp_refcntc < 0);
2246fedab560Sae112802 }
2247fedab560Sae112802
2248fedab560Sae112802 if (badstate)
2249fedab560Sae112802 goto exit;
2250fedab560Sae112802
2251fedab560Sae112802 /*
2252fedab560Sae112802 * Combine the per kpm_page and per page kpm VAC states to
2253fedab560Sae112802 * a summary state in order to make the VAC cache/uncache
2254fedab560Sae112802 * handling more concise.
2255fedab560Sae112802 */
2256fedab560Sae112802 pgcacase = (((kp->kp_refcntc > 0) ? KPM_KC : 0) |
2257fedab560Sae112802 ((kp->kp_refcnts > 0) ? KPM_KS : 0) |
2258fedab560Sae112802 (PP_ISKPMC(pp) ? KPM_C : 0) |
2259fedab560Sae112802 (PP_ISKPMS(pp) ? KPM_S : 0));
2260fedab560Sae112802
2261fedab560Sae112802 if (flags == HAT_CACHE) {
2262fedab560Sae112802 switch (pgcacase) {
2263fedab560Sae112802 case KPM_CACHE_MAPS: /* kc c ks s */
2264fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
2265fedab560Sae112802 if (kp->kp_refcnts < 1) {
2266fedab560Sae112802 panic("sfmmu_kpm_page_cache: bad refcnts "
2267fedab560Sae112802 "kpm_page=%p\n", (void *)kp);
2268fedab560Sae112802 }
2269fedab560Sae112802 kp->kp_refcnts--;
2270fedab560Sae112802 kp->kp_refcnt++;
2271fedab560Sae112802 PP_CLRKPMS(pp);
2272fedab560Sae112802 /* FALLTHRU */
2273fedab560Sae112802
2274fedab560Sae112802 case KPM_CACHE_NOMAP: /* kc c - - */
2275fedab560Sae112802 case KPM_CACHE_NOMAPO: /* kc c ks - */
2276fedab560Sae112802 kp->kp_refcntc--;
2277fedab560Sae112802 PP_CLRKPMC(pp);
2278fedab560Sae112802 break;
2279fedab560Sae112802
2280fedab560Sae112802 default:
2281fedab560Sae112802 badstate++;
2282fedab560Sae112802 }
2283fedab560Sae112802 goto exit;
2284fedab560Sae112802 }
2285fedab560Sae112802
2286fedab560Sae112802 switch (pgcacase) {
2287fedab560Sae112802 case KPM_UNC_BIG: /* - - - - */
2288fedab560Sae112802 if (kp->kp_refcnt < 1) {
2289fedab560Sae112802 panic("sfmmu_kpm_page_cache: bad refcnt "
2290fedab560Sae112802 "kpm_page=%p\n", (void *)kp);
2291fedab560Sae112802 }
2292fedab560Sae112802
2293fedab560Sae112802 /*
2294fedab560Sae112802 * Have to breakup the large page mapping in preparation
2295fedab560Sae112802 * to the upcoming TNC mode handled by small mappings.
2296fedab560Sae112802 * The demap can already be done due to another conflict
2297fedab560Sae112802 * within the kpm_page.
2298fedab560Sae112802 */
2299fedab560Sae112802 if (kp->kp_refcntc == -1) {
2300fedab560Sae112802 /* remove go indication */
2301fedab560Sae112802 sfmmu_kpm_tsbmtl(&kp->kp_refcntc,
2302fedab560Sae112802 &kpmp->khl_lock, KPMTSBM_STOP);
2303fedab560Sae112802 }
2304fedab560Sae112802 ASSERT(kp->kp_refcntc == 0);
2305fedab560Sae112802 sfmmu_kpm_demap_large(kpmvaddr);
2306fedab560Sae112802 kp->kp_refcntc++;
2307fedab560Sae112802 PP_SETKPMC(pp);
2308fedab560Sae112802 break;
2309fedab560Sae112802
2310fedab560Sae112802 case KPM_UNC_SMALL1: /* - - ks s */
2311fedab560Sae112802 case KPM_UNC_SMALL2: /* kc - ks s */
2312fedab560Sae112802 /*
2313fedab560Sae112802 * Have to demap an already small kpm mapping in preparation
2314fedab560Sae112802 * to the upcoming TNC mode. The demap can already be done
2315fedab560Sae112802 * due to another conflict within the kpm_page.
2316fedab560Sae112802 */
2317fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
2318fedab560Sae112802 kp->kp_refcntc++;
2319fedab560Sae112802 kp->kp_refcnts--;
2320fedab560Sae112802 kp->kp_refcnt++;
2321fedab560Sae112802 PP_CLRKPMS(pp);
2322fedab560Sae112802 PP_SETKPMC(pp);
2323fedab560Sae112802 break;
2324fedab560Sae112802
2325fedab560Sae112802 case KPM_UNC_NODEMAP1: /* - - ks - */
2326fedab560Sae112802 /* fallthru */
2327fedab560Sae112802
2328fedab560Sae112802 case KPM_UNC_NODEMAP2: /* kc - - - */
2329fedab560Sae112802 case KPM_UNC_NODEMAP3: /* kc - ks - */
2330fedab560Sae112802 kp->kp_refcntc++;
2331fedab560Sae112802 PP_SETKPMC(pp);
2332fedab560Sae112802 break;
2333fedab560Sae112802
2334fedab560Sae112802 case KPM_UNC_NOP1: /* kc c - - */
2335fedab560Sae112802 case KPM_UNC_NOP2: /* kc c ks - */
2336fedab560Sae112802 break;
2337fedab560Sae112802
2338fedab560Sae112802 default:
2339fedab560Sae112802 badstate++;
2340fedab560Sae112802 }
2341fedab560Sae112802 exit:
2342fedab560Sae112802 if (badstate) {
2343fedab560Sae112802 panic("sfmmu_kpm_page_cache: inconsistent VAC state "
2344fedab560Sae112802 "kpmvaddr=%p kp=%p pp=%p", (void *)kpmvaddr,
2345fedab560Sae112802 (void *)kp, (void *)pp);
2346fedab560Sae112802 }
2347fedab560Sae112802 return;
2348fedab560Sae112802
2349fedab560Sae112802 smallpages_page_cache:
2350fedab560Sae112802 PP2KPMSPG(pp, ksp);
2351fedab560Sae112802 kpmsp = KPMP_SHASH(ksp);
2352fedab560Sae112802
2353444ce08eSDonghai Qiao /*
2354444ce08eSDonghai Qiao * marked as nogo for we will fault in and resolve it
2355444ce08eSDonghai Qiao * through sfmmu_kpm_fault_small
2356444ce08eSDonghai Qiao */
2357444ce08eSDonghai Qiao oldval = sfmmu_kpm_stsbmtl(&ksp->kp_mapped_flag, &kpmsp->kshl_lock,
2358444ce08eSDonghai Qiao KPM_MAPPEDSC);
2359fedab560Sae112802
2360fedab560Sae112802 if (!(oldval == KPM_MAPPEDS || oldval == KPM_MAPPEDSC))
2361fedab560Sae112802 panic("smallpages_page_cache: inconsistent mapping");
2362fedab560Sae112802
2363fedab560Sae112802 sfmmu_kpm_demap_small(kpmvaddr);
2364fedab560Sae112802
2365fedab560Sae112802 if (flags == HAT_TMPNC) {
2366fedab560Sae112802 PP_SETKPMC(pp);
2367fedab560Sae112802 ASSERT(!PP_ISKPMS(pp));
2368fedab560Sae112802
2369fedab560Sae112802 } else {
2370fedab560Sae112802 ASSERT(PP_ISKPMC(pp));
2371fedab560Sae112802 PP_CLRKPMC(pp);
2372fedab560Sae112802 }
2373fedab560Sae112802
2374fedab560Sae112802 /*
2375fedab560Sae112802 * Keep KPM_MAPPEDSC until the next kpm tsbmiss where it
2376fedab560Sae112802 * prevents TL tsbmiss handling and force a hat_kpm_fault.
2377fedab560Sae112802 * There we can start over again.
2378fedab560Sae112802 */
2379fedab560Sae112802 }
2380