1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2014 by Delphix. All rights reserved.
25 * Copyright 2015 Joyent, Inc.
26 */
27
28 #include <sys/types.h>
29 #include <sys/sysmacros.h>
30 #include <sys/kmem.h>
31 #include <sys/atomic.h>
32 #include <sys/bitmap.h>
33 #include <sys/machparam.h>
34 #include <sys/machsystm.h>
35 #include <sys/mman.h>
36 #include <sys/systm.h>
37 #include <sys/cpuvar.h>
38 #include <sys/thread.h>
39 #include <sys/proc.h>
40 #include <sys/cpu.h>
41 #include <sys/kmem.h>
42 #include <sys/disp.h>
43 #include <sys/vmem.h>
44 #include <sys/vmsystm.h>
45 #include <sys/promif.h>
46 #include <sys/var.h>
47 #include <sys/x86_archext.h>
48 #include <sys/archsystm.h>
49 #include <sys/bootconf.h>
50 #include <sys/dumphdr.h>
51 #include <vm/seg_kmem.h>
52 #include <vm/seg_kpm.h>
53 #include <vm/hat.h>
54 #include <vm/hat_i86.h>
55 #include <sys/cmn_err.h>
56 #include <sys/panic.h>
57
58 #ifdef __xpv
59 #include <sys/hypervisor.h>
60 #include <sys/xpv_panic.h>
61 #endif
62
63 #include <sys/bootinfo.h>
64 #include <vm/kboot_mmu.h>
65
66 static void x86pte_zero(htable_t *dest, uint_t entry, uint_t count);
67
68 kmem_cache_t *htable_cache;
69
70 /*
71 * The variable htable_reserve_amount, rather than HTABLE_RESERVE_AMOUNT,
72 * is used in order to facilitate testing of the htable_steal() code.
73 * By resetting htable_reserve_amount to a lower value, we can force
74 * stealing to occur. The reserve amount is a guess to get us through boot.
75 */
76 #define HTABLE_RESERVE_AMOUNT (200)
77 uint_t htable_reserve_amount = HTABLE_RESERVE_AMOUNT;
78 kmutex_t htable_reserve_mutex;
79 uint_t htable_reserve_cnt;
80 htable_t *htable_reserve_pool;
81
82 /*
83 * Used to hand test htable_steal().
84 */
85 #ifdef DEBUG
86 ulong_t force_steal = 0;
87 ulong_t ptable_cnt = 0;
88 #endif
89
90 /*
91 * This variable is so that we can tune this via /etc/system
92 * Any value works, but a power of two <= mmu.ptes_per_table is best.
93 */
94 uint_t htable_steal_passes = 8;
95
96 /*
97 * mutex stuff for access to htable hash
98 */
99 #define NUM_HTABLE_MUTEX 128
100 kmutex_t htable_mutex[NUM_HTABLE_MUTEX];
101 #define HTABLE_MUTEX_HASH(h) ((h) & (NUM_HTABLE_MUTEX - 1))
102
103 #define HTABLE_ENTER(h) mutex_enter(&htable_mutex[HTABLE_MUTEX_HASH(h)]);
104 #define HTABLE_EXIT(h) mutex_exit(&htable_mutex[HTABLE_MUTEX_HASH(h)]);
105
106 /*
107 * forward declarations
108 */
109 static void link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr);
110 static void unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr);
111 static void htable_free(htable_t *ht);
112 static x86pte_t *x86pte_access_pagetable(htable_t *ht, uint_t index);
113 static void x86pte_release_pagetable(htable_t *ht);
114 static x86pte_t x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old,
115 x86pte_t new);
116
117 /*
118 * A counter to track if we are stealing or reaping htables. When non-zero
119 * htable_free() will directly free htables (either to the reserve or kmem)
120 * instead of putting them in a hat's htable cache.
121 */
122 uint32_t htable_dont_cache = 0;
123
124 /*
125 * Track the number of active pagetables, so we can know how many to reap
126 */
127 static uint32_t active_ptables = 0;
128
129 #ifdef __xpv
130 /*
131 * Deal with hypervisor complications.
132 */
133 void
xen_flush_va(caddr_t va)134 xen_flush_va(caddr_t va)
135 {
136 struct mmuext_op t;
137 uint_t count;
138
139 if (IN_XPV_PANIC()) {
140 mmu_tlbflush_entry((caddr_t)va);
141 } else {
142 t.cmd = MMUEXT_INVLPG_LOCAL;
143 t.arg1.linear_addr = (uintptr_t)va;
144 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
145 panic("HYPERVISOR_mmuext_op() failed");
146 ASSERT(count == 1);
147 }
148 }
149
150 void
xen_gflush_va(caddr_t va,cpuset_t cpus)151 xen_gflush_va(caddr_t va, cpuset_t cpus)
152 {
153 struct mmuext_op t;
154 uint_t count;
155
156 if (IN_XPV_PANIC()) {
157 mmu_tlbflush_entry((caddr_t)va);
158 return;
159 }
160
161 t.cmd = MMUEXT_INVLPG_MULTI;
162 t.arg1.linear_addr = (uintptr_t)va;
163 /*LINTED: constant in conditional context*/
164 set_xen_guest_handle(t.arg2.vcpumask, &cpus);
165 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
166 panic("HYPERVISOR_mmuext_op() failed");
167 ASSERT(count == 1);
168 }
169
170 void
xen_flush_tlb()171 xen_flush_tlb()
172 {
173 struct mmuext_op t;
174 uint_t count;
175
176 if (IN_XPV_PANIC()) {
177 xpv_panic_reload_cr3();
178 } else {
179 t.cmd = MMUEXT_TLB_FLUSH_LOCAL;
180 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
181 panic("HYPERVISOR_mmuext_op() failed");
182 ASSERT(count == 1);
183 }
184 }
185
186 void
xen_gflush_tlb(cpuset_t cpus)187 xen_gflush_tlb(cpuset_t cpus)
188 {
189 struct mmuext_op t;
190 uint_t count;
191
192 ASSERT(!IN_XPV_PANIC());
193 t.cmd = MMUEXT_TLB_FLUSH_MULTI;
194 /*LINTED: constant in conditional context*/
195 set_xen_guest_handle(t.arg2.vcpumask, &cpus);
196 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
197 panic("HYPERVISOR_mmuext_op() failed");
198 ASSERT(count == 1);
199 }
200
201 /*
202 * Install/Adjust a kpm mapping under the hypervisor.
203 * Value of "how" should be:
204 * PT_WRITABLE | PT_VALID - regular kpm mapping
205 * PT_VALID - make mapping read-only
206 * 0 - remove mapping
207 *
208 * returns 0 on success. non-zero for failure.
209 */
210 int
xen_kpm_page(pfn_t pfn,uint_t how)211 xen_kpm_page(pfn_t pfn, uint_t how)
212 {
213 paddr_t pa = mmu_ptob((paddr_t)pfn);
214 x86pte_t pte = PT_NOCONSIST | PT_REF | PT_MOD;
215
216 if (kpm_vbase == NULL)
217 return (0);
218
219 if (how)
220 pte |= pa_to_ma(pa) | how;
221 else
222 pte = 0;
223 return (HYPERVISOR_update_va_mapping((uintptr_t)kpm_vbase + pa,
224 pte, UVMF_INVLPG | UVMF_ALL));
225 }
226
227 void
xen_pin(pfn_t pfn,level_t lvl)228 xen_pin(pfn_t pfn, level_t lvl)
229 {
230 struct mmuext_op t;
231 uint_t count;
232
233 t.cmd = MMUEXT_PIN_L1_TABLE + lvl;
234 t.arg1.mfn = pfn_to_mfn(pfn);
235 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
236 panic("HYPERVISOR_mmuext_op() failed");
237 ASSERT(count == 1);
238 }
239
240 void
xen_unpin(pfn_t pfn)241 xen_unpin(pfn_t pfn)
242 {
243 struct mmuext_op t;
244 uint_t count;
245
246 t.cmd = MMUEXT_UNPIN_TABLE;
247 t.arg1.mfn = pfn_to_mfn(pfn);
248 if (HYPERVISOR_mmuext_op(&t, 1, &count, DOMID_SELF) < 0)
249 panic("HYPERVISOR_mmuext_op() failed");
250 ASSERT(count == 1);
251 }
252
253 static void
xen_map(uint64_t pte,caddr_t va)254 xen_map(uint64_t pte, caddr_t va)
255 {
256 if (HYPERVISOR_update_va_mapping((uintptr_t)va, pte,
257 UVMF_INVLPG | UVMF_LOCAL))
258 panic("HYPERVISOR_update_va_mapping() failed");
259 }
260 #endif /* __xpv */
261
262 /*
263 * Allocate a memory page for a hardware page table.
264 *
265 * A wrapper around page_get_physical(), with some extra checks.
266 */
267 static pfn_t
ptable_alloc(uintptr_t seed)268 ptable_alloc(uintptr_t seed)
269 {
270 pfn_t pfn;
271 page_t *pp;
272
273 pfn = PFN_INVALID;
274
275 /*
276 * The first check is to see if there is memory in the system. If we
277 * drop to throttlefree, then fail the ptable_alloc() and let the
278 * stealing code kick in. Note that we have to do this test here,
279 * since the test in page_create_throttle() would let the NOSLEEP
280 * allocation go through and deplete the page reserves.
281 *
282 * The !NOMEMWAIT() lets pageout, fsflush, etc. skip this check.
283 */
284 if (!NOMEMWAIT() && freemem <= throttlefree + 1)
285 return (PFN_INVALID);
286
287 #ifdef DEBUG
288 /*
289 * This code makes htable_steal() easier to test. By setting
290 * force_steal we force pagetable allocations to fall
291 * into the stealing code. Roughly 1 in ever "force_steal"
292 * page table allocations will fail.
293 */
294 if (proc_pageout != NULL && force_steal > 1 &&
295 ++ptable_cnt > force_steal) {
296 ptable_cnt = 0;
297 return (PFN_INVALID);
298 }
299 #endif /* DEBUG */
300
301 pp = page_get_physical(seed);
302 if (pp == NULL)
303 return (PFN_INVALID);
304 ASSERT(PAGE_SHARED(pp));
305 pfn = pp->p_pagenum;
306 if (pfn == PFN_INVALID)
307 panic("ptable_alloc(): Invalid PFN!!");
308 atomic_inc_32(&active_ptables);
309 HATSTAT_INC(hs_ptable_allocs);
310 return (pfn);
311 }
312
313 /*
314 * Free an htable's associated page table page. See the comments
315 * for ptable_alloc().
316 */
317 static void
ptable_free(pfn_t pfn)318 ptable_free(pfn_t pfn)
319 {
320 page_t *pp = page_numtopp_nolock(pfn);
321
322 /*
323 * need to destroy the page used for the pagetable
324 */
325 ASSERT(pfn != PFN_INVALID);
326 HATSTAT_INC(hs_ptable_frees);
327 atomic_dec_32(&active_ptables);
328 if (pp == NULL)
329 panic("ptable_free(): no page for pfn!");
330 ASSERT(PAGE_SHARED(pp));
331 ASSERT(pfn == pp->p_pagenum);
332 ASSERT(!IN_XPV_PANIC());
333
334 /*
335 * Get an exclusive lock, might have to wait for a kmem reader.
336 */
337 if (!page_tryupgrade(pp)) {
338 u_offset_t off = pp->p_offset;
339 page_unlock(pp);
340 pp = page_lookup(&kvp, off, SE_EXCL);
341 if (pp == NULL)
342 panic("page not found");
343 }
344 #ifdef __xpv
345 if (kpm_vbase && xen_kpm_page(pfn, PT_VALID | PT_WRITABLE) < 0)
346 panic("failure making kpm r/w pfn=0x%lx", pfn);
347 #endif
348 page_hashout(pp, NULL);
349 page_free(pp, 1);
350 page_unresv(1);
351 }
352
353 /*
354 * Put one htable on the reserve list.
355 */
356 static void
htable_put_reserve(htable_t * ht)357 htable_put_reserve(htable_t *ht)
358 {
359 ht->ht_hat = NULL; /* no longer tied to a hat */
360 ASSERT(ht->ht_pfn == PFN_INVALID);
361 HATSTAT_INC(hs_htable_rputs);
362 mutex_enter(&htable_reserve_mutex);
363 ht->ht_next = htable_reserve_pool;
364 htable_reserve_pool = ht;
365 ++htable_reserve_cnt;
366 mutex_exit(&htable_reserve_mutex);
367 }
368
369 /*
370 * Take one htable from the reserve.
371 */
372 static htable_t *
htable_get_reserve(void)373 htable_get_reserve(void)
374 {
375 htable_t *ht = NULL;
376
377 mutex_enter(&htable_reserve_mutex);
378 if (htable_reserve_cnt != 0) {
379 ht = htable_reserve_pool;
380 ASSERT(ht != NULL);
381 ASSERT(ht->ht_pfn == PFN_INVALID);
382 htable_reserve_pool = ht->ht_next;
383 --htable_reserve_cnt;
384 HATSTAT_INC(hs_htable_rgets);
385 }
386 mutex_exit(&htable_reserve_mutex);
387 return (ht);
388 }
389
390 /*
391 * Allocate initial htables and put them on the reserve list
392 */
393 void
htable_initial_reserve(uint_t count)394 htable_initial_reserve(uint_t count)
395 {
396 htable_t *ht;
397
398 count += HTABLE_RESERVE_AMOUNT;
399 while (count > 0) {
400 ht = kmem_cache_alloc(htable_cache, KM_NOSLEEP);
401 ASSERT(ht != NULL);
402
403 ASSERT(use_boot_reserve);
404 ht->ht_pfn = PFN_INVALID;
405 htable_put_reserve(ht);
406 --count;
407 }
408 }
409
410 /*
411 * Readjust the reserves after a thread finishes using them.
412 */
413 void
htable_adjust_reserve()414 htable_adjust_reserve()
415 {
416 htable_t *ht;
417
418 /*
419 * Free any excess htables in the reserve list
420 */
421 while (htable_reserve_cnt > htable_reserve_amount &&
422 !USE_HAT_RESERVES()) {
423 ht = htable_get_reserve();
424 if (ht == NULL)
425 return;
426 ASSERT(ht->ht_pfn == PFN_INVALID);
427 kmem_cache_free(htable_cache, ht);
428 }
429 }
430
431 /*
432 * Search the active htables for one to steal. Start at a different hash
433 * bucket every time to help spread the pain of stealing
434 */
435 static void
htable_steal_active(hat_t * hat,uint_t cnt,uint_t threshold,uint_t * stolen,htable_t ** list)436 htable_steal_active(hat_t *hat, uint_t cnt, uint_t threshold,
437 uint_t *stolen, htable_t **list)
438 {
439 static uint_t h_seed = 0;
440 htable_t *higher, *ht;
441 uint_t h, e, h_start;
442 uintptr_t va;
443 x86pte_t pte;
444
445 h = h_start = h_seed++ % hat->hat_num_hash;
446 do {
447 higher = NULL;
448 HTABLE_ENTER(h);
449 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
450
451 /*
452 * Can we rule out reaping?
453 */
454 if (ht->ht_busy != 0 ||
455 (ht->ht_flags & HTABLE_SHARED_PFN) ||
456 ht->ht_level > 0 || ht->ht_valid_cnt > threshold ||
457 ht->ht_lock_cnt != 0)
458 continue;
459
460 /*
461 * Increment busy so the htable can't disappear. We
462 * drop the htable mutex to avoid deadlocks with
463 * hat_pageunload() and the hment mutex while we
464 * call hat_pte_unmap()
465 */
466 ++ht->ht_busy;
467 HTABLE_EXIT(h);
468
469 /*
470 * Try stealing.
471 * - unload and invalidate all PTEs
472 */
473 for (e = 0, va = ht->ht_vaddr;
474 e < HTABLE_NUM_PTES(ht) && ht->ht_valid_cnt > 0 &&
475 ht->ht_busy == 1 && ht->ht_lock_cnt == 0;
476 ++e, va += MMU_PAGESIZE) {
477 pte = x86pte_get(ht, e);
478 if (!PTE_ISVALID(pte))
479 continue;
480 hat_pte_unmap(ht, e, HAT_UNLOAD, pte, NULL,
481 B_TRUE);
482 }
483
484 /*
485 * Reacquire htable lock. If we didn't remove all
486 * mappings in the table, or another thread added a new
487 * mapping behind us, give up on this table.
488 */
489 HTABLE_ENTER(h);
490 if (ht->ht_busy != 1 || ht->ht_valid_cnt != 0 ||
491 ht->ht_lock_cnt != 0) {
492 --ht->ht_busy;
493 continue;
494 }
495
496 /*
497 * Steal it and unlink the page table.
498 */
499 higher = ht->ht_parent;
500 unlink_ptp(higher, ht, ht->ht_vaddr);
501
502 /*
503 * remove from the hash list
504 */
505 if (ht->ht_next)
506 ht->ht_next->ht_prev = ht->ht_prev;
507
508 if (ht->ht_prev) {
509 ht->ht_prev->ht_next = ht->ht_next;
510 } else {
511 ASSERT(hat->hat_ht_hash[h] == ht);
512 hat->hat_ht_hash[h] = ht->ht_next;
513 }
514
515 /*
516 * Break to outer loop to release the
517 * higher (ht_parent) pagetable. This
518 * spreads out the pain caused by
519 * pagefaults.
520 */
521 ht->ht_next = *list;
522 *list = ht;
523 ++*stolen;
524 break;
525 }
526 HTABLE_EXIT(h);
527 if (higher != NULL)
528 htable_release(higher);
529 if (++h == hat->hat_num_hash)
530 h = 0;
531 } while (*stolen < cnt && h != h_start);
532 }
533
534 /*
535 * Move hat to the end of the kas list
536 */
537 static void
move_victim(hat_t * hat)538 move_victim(hat_t *hat)
539 {
540 ASSERT(MUTEX_HELD(&hat_list_lock));
541
542 /* unlink victim hat */
543 if (hat->hat_prev)
544 hat->hat_prev->hat_next = hat->hat_next;
545 else
546 kas.a_hat->hat_next = hat->hat_next;
547
548 if (hat->hat_next)
549 hat->hat_next->hat_prev = hat->hat_prev;
550 else
551 kas.a_hat->hat_prev = hat->hat_prev;
552 /* relink at end of hat list */
553 hat->hat_next = NULL;
554 hat->hat_prev = kas.a_hat->hat_prev;
555 if (hat->hat_prev)
556 hat->hat_prev->hat_next = hat;
557 else
558 kas.a_hat->hat_next = hat;
559
560 kas.a_hat->hat_prev = hat;
561 }
562
563 /*
564 * This routine steals htables from user processes. Called by htable_reap
565 * (reap=TRUE) or htable_alloc (reap=FALSE).
566 */
567 static htable_t *
htable_steal(uint_t cnt,boolean_t reap)568 htable_steal(uint_t cnt, boolean_t reap)
569 {
570 hat_t *hat = kas.a_hat; /* list starts with khat */
571 htable_t *list = NULL;
572 htable_t *ht;
573 uint_t stolen = 0;
574 uint_t pass;
575 uint_t threshold;
576
577 /*
578 * Limit htable_steal_passes to something reasonable
579 */
580 if (htable_steal_passes == 0)
581 htable_steal_passes = 1;
582 if (htable_steal_passes > mmu.ptes_per_table)
583 htable_steal_passes = mmu.ptes_per_table;
584
585 /*
586 * Loop through all user hats. The 1st pass takes cached htables that
587 * aren't in use. The later passes steal by removing mappings, too.
588 */
589 atomic_inc_32(&htable_dont_cache);
590 for (pass = 0; pass <= htable_steal_passes && stolen < cnt; ++pass) {
591 threshold = pass * mmu.ptes_per_table / htable_steal_passes;
592
593 mutex_enter(&hat_list_lock);
594
595 /* skip the first hat (kernel) */
596 hat = kas.a_hat->hat_next;
597 for (;;) {
598 /*
599 * Skip any hat that is already being stolen from.
600 *
601 * We skip SHARED hats, as these are dummy
602 * hats that host ISM shared page tables.
603 *
604 * We also skip if HAT_FREEING because hat_pte_unmap()
605 * won't zero out the PTE's. That would lead to hitting
606 * stale PTEs either here or under hat_unload() when we
607 * steal and unload the same page table in competing
608 * threads.
609 */
610 while (hat != NULL &&
611 (hat->hat_flags &
612 (HAT_VICTIM | HAT_SHARED | HAT_FREEING)) != 0)
613 hat = hat->hat_next;
614
615 if (hat == NULL)
616 break;
617
618 /*
619 * Mark the HAT as a stealing victim so that it is
620 * not freed from under us, e.g. in as_free()
621 */
622 hat->hat_flags |= HAT_VICTIM;
623 mutex_exit(&hat_list_lock);
624
625 /*
626 * Take any htables from the hat's cached "free" list.
627 */
628 hat_enter(hat);
629 while ((ht = hat->hat_ht_cached) != NULL &&
630 stolen < cnt) {
631 hat->hat_ht_cached = ht->ht_next;
632 ht->ht_next = list;
633 list = ht;
634 ++stolen;
635 }
636 hat_exit(hat);
637
638 /*
639 * Don't steal active htables on first pass.
640 */
641 if (pass != 0 && (stolen < cnt))
642 htable_steal_active(hat, cnt, threshold,
643 &stolen, &list);
644
645 /*
646 * do synchronous teardown for the reap case so that
647 * we can forget hat; at this time, hat is
648 * guaranteed to be around because HAT_VICTIM is set
649 * (see htable_free() for similar code)
650 */
651 for (ht = list; (ht) && (reap); ht = ht->ht_next) {
652 if (ht->ht_hat == NULL)
653 continue;
654 ASSERT(ht->ht_hat == hat);
655 #if defined(__xpv) && defined(__amd64)
656 if (!(ht->ht_flags & HTABLE_VLP) &&
657 ht->ht_level == mmu.max_level) {
658 ptable_free(hat->hat_user_ptable);
659 hat->hat_user_ptable = PFN_INVALID;
660 }
661 #endif
662 /*
663 * forget the hat
664 */
665 ht->ht_hat = NULL;
666 }
667
668 mutex_enter(&hat_list_lock);
669
670 /*
671 * Are we finished?
672 */
673 if (stolen == cnt) {
674 /*
675 * Try to spread the pain of stealing,
676 * move victim HAT to the end of the HAT list.
677 */
678 if (pass >= 1 && cnt == 1 &&
679 kas.a_hat->hat_prev != hat)
680 move_victim(hat);
681 /*
682 * We are finished
683 */
684 }
685
686 /*
687 * Clear the victim flag, hat can go away now (once
688 * the lock is dropped)
689 */
690 if (hat->hat_flags & HAT_VICTIM) {
691 ASSERT(hat != kas.a_hat);
692 hat->hat_flags &= ~HAT_VICTIM;
693 cv_broadcast(&hat_list_cv);
694 }
695
696 /* move on to the next hat */
697 hat = hat->hat_next;
698 }
699
700 mutex_exit(&hat_list_lock);
701
702 }
703 ASSERT(!MUTEX_HELD(&hat_list_lock));
704
705 atomic_dec_32(&htable_dont_cache);
706 return (list);
707 }
708
709 /*
710 * This is invoked from kmem when the system is low on memory. We try
711 * to free hments, htables, and ptables to improve the memory situation.
712 */
713 /*ARGSUSED*/
714 static void
htable_reap(void * handle)715 htable_reap(void *handle)
716 {
717 uint_t reap_cnt;
718 htable_t *list;
719 htable_t *ht;
720
721 HATSTAT_INC(hs_reap_attempts);
722 if (!can_steal_post_boot)
723 return;
724
725 /*
726 * Try to reap 5% of the page tables bounded by a maximum of
727 * 5% of physmem and a minimum of 10.
728 */
729 reap_cnt = MAX(MIN(physmem / 20, active_ptables / 20), 10);
730
731 /*
732 * Note: htable_dont_cache should be set at the time of
733 * invoking htable_free()
734 */
735 atomic_inc_32(&htable_dont_cache);
736 /*
737 * Let htable_steal() do the work, we just call htable_free()
738 */
739 XPV_DISALLOW_MIGRATE();
740 list = htable_steal(reap_cnt, B_TRUE);
741 XPV_ALLOW_MIGRATE();
742 while ((ht = list) != NULL) {
743 list = ht->ht_next;
744 HATSTAT_INC(hs_reaped);
745 htable_free(ht);
746 }
747 atomic_dec_32(&htable_dont_cache);
748
749 /*
750 * Free up excess reserves
751 */
752 htable_adjust_reserve();
753 hment_adjust_reserve();
754 }
755
756 /*
757 * Allocate an htable, stealing one or using the reserve if necessary
758 */
759 static htable_t *
htable_alloc(hat_t * hat,uintptr_t vaddr,level_t level,htable_t * shared)760 htable_alloc(
761 hat_t *hat,
762 uintptr_t vaddr,
763 level_t level,
764 htable_t *shared)
765 {
766 htable_t *ht = NULL;
767 uint_t is_vlp;
768 uint_t is_bare = 0;
769 uint_t need_to_zero = 1;
770 int kmflags = (can_steal_post_boot ? KM_NOSLEEP : KM_SLEEP);
771
772 if (level < 0 || level > TOP_LEVEL(hat))
773 panic("htable_alloc(): level %d out of range\n", level);
774
775 is_vlp = (hat->hat_flags & HAT_VLP) && level == VLP_LEVEL;
776 if (is_vlp || shared != NULL)
777 is_bare = 1;
778
779 /*
780 * First reuse a cached htable from the hat_ht_cached field, this
781 * avoids unnecessary trips through kmem/page allocators.
782 */
783 if (hat->hat_ht_cached != NULL && !is_bare) {
784 hat_enter(hat);
785 ht = hat->hat_ht_cached;
786 if (ht != NULL) {
787 hat->hat_ht_cached = ht->ht_next;
788 need_to_zero = 0;
789 /* XX64 ASSERT() they're all zero somehow */
790 ASSERT(ht->ht_pfn != PFN_INVALID);
791 }
792 hat_exit(hat);
793 }
794
795 if (ht == NULL) {
796 /*
797 * Allocate an htable, possibly refilling the reserves.
798 */
799 if (USE_HAT_RESERVES()) {
800 ht = htable_get_reserve();
801 } else {
802 /*
803 * Donate successful htable allocations to the reserve.
804 */
805 for (;;) {
806 ht = kmem_cache_alloc(htable_cache, kmflags);
807 if (ht == NULL)
808 break;
809 ht->ht_pfn = PFN_INVALID;
810 if (USE_HAT_RESERVES() ||
811 htable_reserve_cnt >= htable_reserve_amount)
812 break;
813 htable_put_reserve(ht);
814 }
815 }
816
817 /*
818 * allocate a page for the hardware page table if needed
819 */
820 if (ht != NULL && !is_bare) {
821 ht->ht_hat = hat;
822 ht->ht_pfn = ptable_alloc((uintptr_t)ht);
823 if (ht->ht_pfn == PFN_INVALID) {
824 if (USE_HAT_RESERVES())
825 htable_put_reserve(ht);
826 else
827 kmem_cache_free(htable_cache, ht);
828 ht = NULL;
829 }
830 }
831 }
832
833 /*
834 * If allocations failed, kick off a kmem_reap() and resort to
835 * htable steal(). We may spin here if the system is very low on
836 * memory. If the kernel itself has consumed all memory and kmem_reap()
837 * can't free up anything, then we'll really get stuck here.
838 * That should only happen in a system where the administrator has
839 * misconfigured VM parameters via /etc/system.
840 */
841 while (ht == NULL && can_steal_post_boot) {
842 kmem_reap();
843 ht = htable_steal(1, B_FALSE);
844 HATSTAT_INC(hs_steals);
845
846 /*
847 * If we stole for a bare htable, release the pagetable page.
848 */
849 if (ht != NULL) {
850 if (is_bare) {
851 ptable_free(ht->ht_pfn);
852 ht->ht_pfn = PFN_INVALID;
853 #if defined(__xpv) && defined(__amd64)
854 /*
855 * make stolen page table writable again in kpm
856 */
857 } else if (kpm_vbase && xen_kpm_page(ht->ht_pfn,
858 PT_VALID | PT_WRITABLE) < 0) {
859 panic("failure making kpm r/w pfn=0x%lx",
860 ht->ht_pfn);
861 #endif
862 }
863 }
864 }
865
866 /*
867 * All attempts to allocate or steal failed. This should only happen
868 * if we run out of memory during boot, due perhaps to a huge
869 * boot_archive. At this point there's no way to continue.
870 */
871 if (ht == NULL)
872 panic("htable_alloc(): couldn't steal\n");
873
874 #if defined(__amd64) && defined(__xpv)
875 /*
876 * Under the 64-bit hypervisor, we have 2 top level page tables.
877 * If this allocation fails, we'll resort to stealing.
878 * We use the stolen page indirectly, by freeing the
879 * stolen htable first.
880 */
881 if (level == mmu.max_level) {
882 for (;;) {
883 htable_t *stolen;
884
885 hat->hat_user_ptable = ptable_alloc((uintptr_t)ht + 1);
886 if (hat->hat_user_ptable != PFN_INVALID)
887 break;
888 stolen = htable_steal(1, B_FALSE);
889 if (stolen == NULL)
890 panic("2nd steal ptable failed\n");
891 htable_free(stolen);
892 }
893 block_zero_no_xmm(kpm_vbase + pfn_to_pa(hat->hat_user_ptable),
894 MMU_PAGESIZE);
895 }
896 #endif
897
898 /*
899 * Shared page tables have all entries locked and entries may not
900 * be added or deleted.
901 */
902 ht->ht_flags = 0;
903 if (shared != NULL) {
904 ASSERT(shared->ht_valid_cnt > 0);
905 ht->ht_flags |= HTABLE_SHARED_PFN;
906 ht->ht_pfn = shared->ht_pfn;
907 ht->ht_lock_cnt = 0;
908 ht->ht_valid_cnt = 0; /* updated in hat_share() */
909 ht->ht_shares = shared;
910 need_to_zero = 0;
911 } else {
912 ht->ht_shares = NULL;
913 ht->ht_lock_cnt = 0;
914 ht->ht_valid_cnt = 0;
915 }
916
917 /*
918 * setup flags, etc. for VLP htables
919 */
920 if (is_vlp) {
921 ht->ht_flags |= HTABLE_VLP;
922 ASSERT(ht->ht_pfn == PFN_INVALID);
923 need_to_zero = 0;
924 }
925
926 /*
927 * fill in the htable
928 */
929 ht->ht_hat = hat;
930 ht->ht_parent = NULL;
931 ht->ht_vaddr = vaddr;
932 ht->ht_level = level;
933 ht->ht_busy = 1;
934 ht->ht_next = NULL;
935 ht->ht_prev = NULL;
936
937 /*
938 * Zero out any freshly allocated page table
939 */
940 if (need_to_zero)
941 x86pte_zero(ht, 0, mmu.ptes_per_table);
942
943 #if defined(__amd64) && defined(__xpv)
944 if (!is_bare && kpm_vbase) {
945 (void) xen_kpm_page(ht->ht_pfn, PT_VALID);
946 if (level == mmu.max_level)
947 (void) xen_kpm_page(hat->hat_user_ptable, PT_VALID);
948 }
949 #endif
950
951 return (ht);
952 }
953
954 /*
955 * Free up an htable, either to a hat's cached list, the reserves or
956 * back to kmem.
957 */
958 static void
htable_free(htable_t * ht)959 htable_free(htable_t *ht)
960 {
961 hat_t *hat = ht->ht_hat;
962
963 /*
964 * If the process isn't exiting, cache the free htable in the hat
965 * structure. We always do this for the boot time reserve. We don't
966 * do this if the hat is exiting or we are stealing/reaping htables.
967 */
968 if (hat != NULL &&
969 !(ht->ht_flags & HTABLE_SHARED_PFN) &&
970 (use_boot_reserve ||
971 (!(hat->hat_flags & HAT_FREEING) && !htable_dont_cache))) {
972 ASSERT((ht->ht_flags & HTABLE_VLP) == 0);
973 ASSERT(ht->ht_pfn != PFN_INVALID);
974 hat_enter(hat);
975 ht->ht_next = hat->hat_ht_cached;
976 hat->hat_ht_cached = ht;
977 hat_exit(hat);
978 return;
979 }
980
981 /*
982 * If we have a hardware page table, free it.
983 * We don't free page tables that are accessed by sharing.
984 */
985 if (ht->ht_flags & HTABLE_SHARED_PFN) {
986 ASSERT(ht->ht_pfn != PFN_INVALID);
987 } else if (!(ht->ht_flags & HTABLE_VLP)) {
988 ptable_free(ht->ht_pfn);
989 #if defined(__amd64) && defined(__xpv)
990 if (ht->ht_level == mmu.max_level && hat != NULL) {
991 ptable_free(hat->hat_user_ptable);
992 hat->hat_user_ptable = PFN_INVALID;
993 }
994 #endif
995 }
996 ht->ht_pfn = PFN_INVALID;
997
998 /*
999 * Free it or put into reserves.
1000 */
1001 if (USE_HAT_RESERVES() || htable_reserve_cnt < htable_reserve_amount) {
1002 htable_put_reserve(ht);
1003 } else {
1004 kmem_cache_free(htable_cache, ht);
1005 htable_adjust_reserve();
1006 }
1007 }
1008
1009
1010 /*
1011 * This is called when a hat is being destroyed or swapped out. We reap all
1012 * the remaining htables in the hat cache. If destroying all left over
1013 * htables are also destroyed.
1014 *
1015 * We also don't need to invalidate any of the PTPs nor do any demapping.
1016 */
1017 void
htable_purge_hat(hat_t * hat)1018 htable_purge_hat(hat_t *hat)
1019 {
1020 htable_t *ht;
1021 int h;
1022
1023 /*
1024 * Purge the htable cache if just reaping.
1025 */
1026 if (!(hat->hat_flags & HAT_FREEING)) {
1027 atomic_inc_32(&htable_dont_cache);
1028 for (;;) {
1029 hat_enter(hat);
1030 ht = hat->hat_ht_cached;
1031 if (ht == NULL) {
1032 hat_exit(hat);
1033 break;
1034 }
1035 hat->hat_ht_cached = ht->ht_next;
1036 hat_exit(hat);
1037 htable_free(ht);
1038 }
1039 atomic_dec_32(&htable_dont_cache);
1040 return;
1041 }
1042
1043 /*
1044 * if freeing, no locking is needed
1045 */
1046 while ((ht = hat->hat_ht_cached) != NULL) {
1047 hat->hat_ht_cached = ht->ht_next;
1048 htable_free(ht);
1049 }
1050
1051 /*
1052 * walk thru the htable hash table and free all the htables in it.
1053 */
1054 for (h = 0; h < hat->hat_num_hash; ++h) {
1055 while ((ht = hat->hat_ht_hash[h]) != NULL) {
1056 if (ht->ht_next)
1057 ht->ht_next->ht_prev = ht->ht_prev;
1058
1059 if (ht->ht_prev) {
1060 ht->ht_prev->ht_next = ht->ht_next;
1061 } else {
1062 ASSERT(hat->hat_ht_hash[h] == ht);
1063 hat->hat_ht_hash[h] = ht->ht_next;
1064 }
1065 htable_free(ht);
1066 }
1067 }
1068 }
1069
1070 /*
1071 * Unlink an entry for a table at vaddr and level out of the existing table
1072 * one level higher. We are always holding the HASH_ENTER() when doing this.
1073 */
1074 static void
unlink_ptp(htable_t * higher,htable_t * old,uintptr_t vaddr)1075 unlink_ptp(htable_t *higher, htable_t *old, uintptr_t vaddr)
1076 {
1077 uint_t entry = htable_va2entry(vaddr, higher);
1078 x86pte_t expect = MAKEPTP(old->ht_pfn, old->ht_level);
1079 x86pte_t found;
1080 hat_t *hat = old->ht_hat;
1081
1082 ASSERT(higher->ht_busy > 0);
1083 ASSERT(higher->ht_valid_cnt > 0);
1084 ASSERT(old->ht_valid_cnt == 0);
1085 found = x86pte_cas(higher, entry, expect, 0);
1086 #ifdef __xpv
1087 /*
1088 * This is weird, but Xen apparently automatically unlinks empty
1089 * pagetables from the upper page table. So allow PTP to be 0 already.
1090 */
1091 if (found != expect && found != 0)
1092 #else
1093 if (found != expect)
1094 #endif
1095 panic("Bad PTP found=" FMT_PTE ", expected=" FMT_PTE,
1096 found, expect);
1097
1098 /*
1099 * When a top level VLP page table entry changes, we must issue
1100 * a reload of cr3 on all processors.
1101 *
1102 * If we don't need do do that, then we still have to INVLPG against
1103 * an address covered by the inner page table, as the latest processors
1104 * have TLB-like caches for non-leaf page table entries.
1105 */
1106 if (!(hat->hat_flags & HAT_FREEING)) {
1107 hat_tlb_inval(hat, (higher->ht_flags & HTABLE_VLP) ?
1108 DEMAP_ALL_ADDR : old->ht_vaddr);
1109 }
1110
1111 HTABLE_DEC(higher->ht_valid_cnt);
1112 }
1113
1114 /*
1115 * Link an entry for a new table at vaddr and level into the existing table
1116 * one level higher. We are always holding the HASH_ENTER() when doing this.
1117 */
1118 static void
link_ptp(htable_t * higher,htable_t * new,uintptr_t vaddr)1119 link_ptp(htable_t *higher, htable_t *new, uintptr_t vaddr)
1120 {
1121 uint_t entry = htable_va2entry(vaddr, higher);
1122 x86pte_t newptp = MAKEPTP(new->ht_pfn, new->ht_level);
1123 x86pte_t found;
1124
1125 ASSERT(higher->ht_busy > 0);
1126
1127 ASSERT(new->ht_level != mmu.max_level);
1128
1129 HTABLE_INC(higher->ht_valid_cnt);
1130
1131 found = x86pte_cas(higher, entry, 0, newptp);
1132 if ((found & ~PT_REF) != 0)
1133 panic("HAT: ptp not 0, found=" FMT_PTE, found);
1134
1135 /*
1136 * When any top level VLP page table entry changes, we must issue
1137 * a reload of cr3 on all processors using it.
1138 * We also need to do this for the kernel hat on PAE 32 bit kernel.
1139 */
1140 if (
1141 #ifdef __i386
1142 (higher->ht_hat == kas.a_hat && higher->ht_level == VLP_LEVEL) ||
1143 #endif
1144 (higher->ht_flags & HTABLE_VLP))
1145 hat_tlb_inval(higher->ht_hat, DEMAP_ALL_ADDR);
1146 }
1147
1148 /*
1149 * Release of hold on an htable. If this is the last use and the pagetable
1150 * is empty we may want to free it, then recursively look at the pagetable
1151 * above it. The recursion is handled by the outer while() loop.
1152 *
1153 * On the metal, during process exit, we don't bother unlinking the tables from
1154 * upper level pagetables. They are instead handled in bulk by hat_free_end().
1155 * We can't do this on the hypervisor as we need the page table to be
1156 * implicitly unpinnned before it goes to the free page lists. This can't
1157 * happen unless we fully unlink it from the page table hierarchy.
1158 */
1159 void
htable_release(htable_t * ht)1160 htable_release(htable_t *ht)
1161 {
1162 uint_t hashval;
1163 htable_t *shared;
1164 htable_t *higher;
1165 hat_t *hat;
1166 uintptr_t va;
1167 level_t level;
1168
1169 while (ht != NULL) {
1170 shared = NULL;
1171 for (;;) {
1172 hat = ht->ht_hat;
1173 va = ht->ht_vaddr;
1174 level = ht->ht_level;
1175 hashval = HTABLE_HASH(hat, va, level);
1176
1177 /*
1178 * The common case is that this isn't the last use of
1179 * an htable so we don't want to free the htable.
1180 */
1181 HTABLE_ENTER(hashval);
1182 ASSERT(ht->ht_valid_cnt >= 0);
1183 ASSERT(ht->ht_busy > 0);
1184 if (ht->ht_valid_cnt > 0)
1185 break;
1186 if (ht->ht_busy > 1)
1187 break;
1188 ASSERT(ht->ht_lock_cnt == 0);
1189
1190 #if !defined(__xpv)
1191 /*
1192 * we always release empty shared htables
1193 */
1194 if (!(ht->ht_flags & HTABLE_SHARED_PFN)) {
1195
1196 /*
1197 * don't release if in address space tear down
1198 */
1199 if (hat->hat_flags & HAT_FREEING)
1200 break;
1201
1202 /*
1203 * At and above max_page_level, free if it's for
1204 * a boot-time kernel mapping below kernelbase.
1205 */
1206 if (level >= mmu.max_page_level &&
1207 (hat != kas.a_hat || va >= kernelbase))
1208 break;
1209 }
1210 #endif /* __xpv */
1211
1212 /*
1213 * Remember if we destroy an htable that shares its PFN
1214 * from elsewhere.
1215 */
1216 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1217 ASSERT(shared == NULL);
1218 shared = ht->ht_shares;
1219 HATSTAT_INC(hs_htable_unshared);
1220 }
1221
1222 /*
1223 * Handle release of a table and freeing the htable_t.
1224 * Unlink it from the table higher (ie. ht_parent).
1225 */
1226 higher = ht->ht_parent;
1227 ASSERT(higher != NULL);
1228
1229 /*
1230 * Unlink the pagetable.
1231 */
1232 unlink_ptp(higher, ht, va);
1233
1234 /*
1235 * remove this htable from its hash list
1236 */
1237 if (ht->ht_next)
1238 ht->ht_next->ht_prev = ht->ht_prev;
1239
1240 if (ht->ht_prev) {
1241 ht->ht_prev->ht_next = ht->ht_next;
1242 } else {
1243 ASSERT(hat->hat_ht_hash[hashval] == ht);
1244 hat->hat_ht_hash[hashval] = ht->ht_next;
1245 }
1246 HTABLE_EXIT(hashval);
1247 htable_free(ht);
1248 ht = higher;
1249 }
1250
1251 ASSERT(ht->ht_busy >= 1);
1252 --ht->ht_busy;
1253 HTABLE_EXIT(hashval);
1254
1255 /*
1256 * If we released a shared htable, do a release on the htable
1257 * from which it shared
1258 */
1259 ht = shared;
1260 }
1261 }
1262
1263 /*
1264 * Find the htable for the pagetable at the given level for the given address.
1265 * If found acquires a hold that eventually needs to be htable_release()d
1266 */
1267 htable_t *
htable_lookup(hat_t * hat,uintptr_t vaddr,level_t level)1268 htable_lookup(hat_t *hat, uintptr_t vaddr, level_t level)
1269 {
1270 uintptr_t base;
1271 uint_t hashval;
1272 htable_t *ht = NULL;
1273
1274 ASSERT(level >= 0);
1275 ASSERT(level <= TOP_LEVEL(hat));
1276
1277 if (level == TOP_LEVEL(hat)) {
1278 #if defined(__amd64)
1279 /*
1280 * 32 bit address spaces on 64 bit kernels need to check
1281 * for overflow of the 32 bit address space
1282 */
1283 if ((hat->hat_flags & HAT_VLP) && vaddr >= ((uint64_t)1 << 32))
1284 return (NULL);
1285 #endif
1286 base = 0;
1287 } else {
1288 base = vaddr & LEVEL_MASK(level + 1);
1289 }
1290
1291 hashval = HTABLE_HASH(hat, base, level);
1292 HTABLE_ENTER(hashval);
1293 for (ht = hat->hat_ht_hash[hashval]; ht; ht = ht->ht_next) {
1294 if (ht->ht_hat == hat &&
1295 ht->ht_vaddr == base &&
1296 ht->ht_level == level)
1297 break;
1298 }
1299 if (ht)
1300 ++ht->ht_busy;
1301
1302 HTABLE_EXIT(hashval);
1303 return (ht);
1304 }
1305
1306 /*
1307 * Acquires a hold on a known htable (from a locked hment entry).
1308 */
1309 void
htable_acquire(htable_t * ht)1310 htable_acquire(htable_t *ht)
1311 {
1312 hat_t *hat = ht->ht_hat;
1313 level_t level = ht->ht_level;
1314 uintptr_t base = ht->ht_vaddr;
1315 uint_t hashval = HTABLE_HASH(hat, base, level);
1316
1317 HTABLE_ENTER(hashval);
1318 #ifdef DEBUG
1319 /*
1320 * make sure the htable is there
1321 */
1322 {
1323 htable_t *h;
1324
1325 for (h = hat->hat_ht_hash[hashval];
1326 h && h != ht;
1327 h = h->ht_next)
1328 ;
1329 ASSERT(h == ht);
1330 }
1331 #endif /* DEBUG */
1332 ++ht->ht_busy;
1333 HTABLE_EXIT(hashval);
1334 }
1335
1336 /*
1337 * Find the htable for the pagetable at the given level for the given address.
1338 * If found acquires a hold that eventually needs to be htable_release()d
1339 * If not found the table is created.
1340 *
1341 * Since we can't hold a hash table mutex during allocation, we have to
1342 * drop it and redo the search on a create. Then we may have to free the newly
1343 * allocated htable if another thread raced in and created it ahead of us.
1344 */
1345 htable_t *
htable_create(hat_t * hat,uintptr_t vaddr,level_t level,htable_t * shared)1346 htable_create(
1347 hat_t *hat,
1348 uintptr_t vaddr,
1349 level_t level,
1350 htable_t *shared)
1351 {
1352 uint_t h;
1353 level_t l;
1354 uintptr_t base;
1355 htable_t *ht;
1356 htable_t *higher = NULL;
1357 htable_t *new = NULL;
1358
1359 if (level < 0 || level > TOP_LEVEL(hat))
1360 panic("htable_create(): level %d out of range\n", level);
1361
1362 /*
1363 * Create the page tables in top down order.
1364 */
1365 for (l = TOP_LEVEL(hat); l >= level; --l) {
1366 new = NULL;
1367 if (l == TOP_LEVEL(hat))
1368 base = 0;
1369 else
1370 base = vaddr & LEVEL_MASK(l + 1);
1371
1372 h = HTABLE_HASH(hat, base, l);
1373 try_again:
1374 /*
1375 * look up the htable at this level
1376 */
1377 HTABLE_ENTER(h);
1378 if (l == TOP_LEVEL(hat)) {
1379 ht = hat->hat_htable;
1380 } else {
1381 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
1382 ASSERT(ht->ht_hat == hat);
1383 if (ht->ht_vaddr == base &&
1384 ht->ht_level == l)
1385 break;
1386 }
1387 }
1388
1389 /*
1390 * if we found the htable, increment its busy cnt
1391 * and if we had allocated a new htable, free it.
1392 */
1393 if (ht != NULL) {
1394 /*
1395 * If we find a pre-existing shared table, it must
1396 * share from the same place.
1397 */
1398 if (l == level && shared && ht->ht_shares &&
1399 ht->ht_shares != shared) {
1400 panic("htable shared from wrong place "
1401 "found htable=%p shared=%p",
1402 (void *)ht, (void *)shared);
1403 }
1404 ++ht->ht_busy;
1405 HTABLE_EXIT(h);
1406 if (new)
1407 htable_free(new);
1408 if (higher != NULL)
1409 htable_release(higher);
1410 higher = ht;
1411
1412 /*
1413 * if we didn't find it on the first search
1414 * allocate a new one and search again
1415 */
1416 } else if (new == NULL) {
1417 HTABLE_EXIT(h);
1418 new = htable_alloc(hat, base, l,
1419 l == level ? shared : NULL);
1420 goto try_again;
1421
1422 /*
1423 * 2nd search and still not there, use "new" table
1424 * Link new table into higher, when not at top level.
1425 */
1426 } else {
1427 ht = new;
1428 if (higher != NULL) {
1429 link_ptp(higher, ht, base);
1430 ht->ht_parent = higher;
1431 }
1432 ht->ht_next = hat->hat_ht_hash[h];
1433 ASSERT(ht->ht_prev == NULL);
1434 if (hat->hat_ht_hash[h])
1435 hat->hat_ht_hash[h]->ht_prev = ht;
1436 hat->hat_ht_hash[h] = ht;
1437 HTABLE_EXIT(h);
1438
1439 /*
1440 * Note we don't do htable_release(higher).
1441 * That happens recursively when "new" is removed by
1442 * htable_release() or htable_steal().
1443 */
1444 higher = ht;
1445
1446 /*
1447 * If we just created a new shared page table we
1448 * increment the shared htable's busy count, so that
1449 * it can't be the victim of a steal even if it's empty.
1450 */
1451 if (l == level && shared) {
1452 (void) htable_lookup(shared->ht_hat,
1453 shared->ht_vaddr, shared->ht_level);
1454 HATSTAT_INC(hs_htable_shared);
1455 }
1456 }
1457 }
1458
1459 return (ht);
1460 }
1461
1462 /*
1463 * Inherit initial pagetables from the boot program. On the 64-bit
1464 * hypervisor we also temporarily mark the p_index field of page table
1465 * pages, so we know not to try making them writable in seg_kpm.
1466 */
1467 void
htable_attach(hat_t * hat,uintptr_t base,level_t level,htable_t * parent,pfn_t pfn)1468 htable_attach(
1469 hat_t *hat,
1470 uintptr_t base,
1471 level_t level,
1472 htable_t *parent,
1473 pfn_t pfn)
1474 {
1475 htable_t *ht;
1476 uint_t h;
1477 uint_t i;
1478 x86pte_t pte;
1479 x86pte_t *ptep;
1480 page_t *pp;
1481 extern page_t *boot_claim_page(pfn_t);
1482
1483 ht = htable_get_reserve();
1484 if (level == mmu.max_level)
1485 kas.a_hat->hat_htable = ht;
1486 ht->ht_hat = hat;
1487 ht->ht_parent = parent;
1488 ht->ht_vaddr = base;
1489 ht->ht_level = level;
1490 ht->ht_busy = 1;
1491 ht->ht_next = NULL;
1492 ht->ht_prev = NULL;
1493 ht->ht_flags = 0;
1494 ht->ht_pfn = pfn;
1495 ht->ht_lock_cnt = 0;
1496 ht->ht_valid_cnt = 0;
1497 if (parent != NULL)
1498 ++parent->ht_busy;
1499
1500 h = HTABLE_HASH(hat, base, level);
1501 HTABLE_ENTER(h);
1502 ht->ht_next = hat->hat_ht_hash[h];
1503 ASSERT(ht->ht_prev == NULL);
1504 if (hat->hat_ht_hash[h])
1505 hat->hat_ht_hash[h]->ht_prev = ht;
1506 hat->hat_ht_hash[h] = ht;
1507 HTABLE_EXIT(h);
1508
1509 /*
1510 * make sure the page table physical page is not FREE
1511 */
1512 if (page_resv(1, KM_NOSLEEP) == 0)
1513 panic("page_resv() failed in ptable alloc");
1514
1515 pp = boot_claim_page(pfn);
1516 ASSERT(pp != NULL);
1517
1518 /*
1519 * Page table pages that were allocated by dboot or
1520 * in very early startup didn't go through boot_mapin()
1521 * and so won't have vnode/offsets. Fix that here.
1522 */
1523 if (pp->p_vnode == NULL) {
1524 /* match offset calculation in page_get_physical() */
1525 u_offset_t offset = (uintptr_t)ht;
1526 if (offset > kernelbase)
1527 offset -= kernelbase;
1528 offset <<= MMU_PAGESHIFT;
1529 #if defined(__amd64)
1530 offset += mmu.hole_start; /* something in VA hole */
1531 #else
1532 offset += 1ULL << 40; /* something > 4 Gig */
1533 #endif
1534 ASSERT(page_exists(&kvp, offset) == NULL);
1535 (void) page_hashin(pp, &kvp, offset, NULL);
1536 }
1537 page_downgrade(pp);
1538 #if defined(__xpv) && defined(__amd64)
1539 /*
1540 * Record in the page_t that is a pagetable for segkpm setup.
1541 */
1542 if (kpm_vbase)
1543 pp->p_index = 1;
1544 #endif
1545
1546 /*
1547 * Count valid mappings and recursively attach lower level pagetables.
1548 */
1549 ptep = kbm_remap_window(pfn_to_pa(pfn), 0);
1550 for (i = 0; i < HTABLE_NUM_PTES(ht); ++i) {
1551 if (mmu.pae_hat)
1552 pte = ptep[i];
1553 else
1554 pte = ((x86pte32_t *)ptep)[i];
1555 if (!IN_HYPERVISOR_VA(base) && PTE_ISVALID(pte)) {
1556 ++ht->ht_valid_cnt;
1557 if (!PTE_ISPAGE(pte, level)) {
1558 htable_attach(hat, base, level - 1,
1559 ht, PTE2PFN(pte, level));
1560 ptep = kbm_remap_window(pfn_to_pa(pfn), 0);
1561 }
1562 }
1563 base += LEVEL_SIZE(level);
1564 if (base == mmu.hole_start)
1565 base = (mmu.hole_end + MMU_PAGEOFFSET) & MMU_PAGEMASK;
1566 }
1567
1568 /*
1569 * As long as all the mappings we had were below kernel base
1570 * we can release the htable.
1571 */
1572 if (base < kernelbase)
1573 htable_release(ht);
1574 }
1575
1576 /*
1577 * Walk through a given htable looking for the first valid entry. This
1578 * routine takes both a starting and ending address. The starting address
1579 * is required to be within the htable provided by the caller, but there is
1580 * no such restriction on the ending address.
1581 *
1582 * If the routine finds a valid entry in the htable (at or beyond the
1583 * starting address), the PTE (and its address) will be returned.
1584 * This PTE may correspond to either a page or a pagetable - it is the
1585 * caller's responsibility to determine which. If no valid entry is
1586 * found, 0 (and invalid PTE) and the next unexamined address will be
1587 * returned.
1588 *
1589 * The loop has been carefully coded for optimization.
1590 */
1591 static x86pte_t
htable_scan(htable_t * ht,uintptr_t * vap,uintptr_t eaddr)1592 htable_scan(htable_t *ht, uintptr_t *vap, uintptr_t eaddr)
1593 {
1594 uint_t e;
1595 x86pte_t found_pte = (x86pte_t)0;
1596 caddr_t pte_ptr;
1597 caddr_t end_pte_ptr;
1598 int l = ht->ht_level;
1599 uintptr_t va = *vap & LEVEL_MASK(l);
1600 size_t pgsize = LEVEL_SIZE(l);
1601
1602 ASSERT(va >= ht->ht_vaddr);
1603 ASSERT(va <= HTABLE_LAST_PAGE(ht));
1604
1605 /*
1606 * Compute the starting index and ending virtual address
1607 */
1608 e = htable_va2entry(va, ht);
1609
1610 /*
1611 * The following page table scan code knows that the valid
1612 * bit of a PTE is in the lowest byte AND that x86 is little endian!!
1613 */
1614 pte_ptr = (caddr_t)x86pte_access_pagetable(ht, 0);
1615 end_pte_ptr = (caddr_t)PT_INDEX_PTR(pte_ptr, HTABLE_NUM_PTES(ht));
1616 pte_ptr = (caddr_t)PT_INDEX_PTR((x86pte_t *)pte_ptr, e);
1617 while (!PTE_ISVALID(*pte_ptr)) {
1618 va += pgsize;
1619 if (va >= eaddr)
1620 break;
1621 pte_ptr += mmu.pte_size;
1622 ASSERT(pte_ptr <= end_pte_ptr);
1623 if (pte_ptr == end_pte_ptr)
1624 break;
1625 }
1626
1627 /*
1628 * if we found a valid PTE, load the entire PTE
1629 */
1630 if (va < eaddr && pte_ptr != end_pte_ptr)
1631 found_pte = GET_PTE((x86pte_t *)pte_ptr);
1632 x86pte_release_pagetable(ht);
1633
1634 #if defined(__amd64)
1635 /*
1636 * deal with VA hole on amd64
1637 */
1638 if (l == mmu.max_level && va >= mmu.hole_start && va <= mmu.hole_end)
1639 va = mmu.hole_end + va - mmu.hole_start;
1640 #endif /* __amd64 */
1641
1642 *vap = va;
1643 return (found_pte);
1644 }
1645
1646 /*
1647 * Find the address and htable for the first populated translation at or
1648 * above the given virtual address. The caller may also specify an upper
1649 * limit to the address range to search. Uses level information to quickly
1650 * skip unpopulated sections of virtual address spaces.
1651 *
1652 * If not found returns NULL. When found, returns the htable and virt addr
1653 * and has a hold on the htable.
1654 */
1655 x86pte_t
htable_walk(struct hat * hat,htable_t ** htp,uintptr_t * vaddr,uintptr_t eaddr)1656 htable_walk(
1657 struct hat *hat,
1658 htable_t **htp,
1659 uintptr_t *vaddr,
1660 uintptr_t eaddr)
1661 {
1662 uintptr_t va = *vaddr;
1663 htable_t *ht;
1664 htable_t *prev = *htp;
1665 level_t l;
1666 level_t max_mapped_level;
1667 x86pte_t pte;
1668
1669 ASSERT(eaddr > va);
1670
1671 /*
1672 * If this is a user address, then we know we need not look beyond
1673 * kernelbase.
1674 */
1675 ASSERT(hat == kas.a_hat || eaddr <= kernelbase ||
1676 eaddr == HTABLE_WALK_TO_END);
1677 if (hat != kas.a_hat && eaddr == HTABLE_WALK_TO_END)
1678 eaddr = kernelbase;
1679
1680 /*
1681 * If we're coming in with a previous page table, search it first
1682 * without doing an htable_lookup(), this should be frequent.
1683 */
1684 if (prev) {
1685 ASSERT(prev->ht_busy > 0);
1686 ASSERT(prev->ht_vaddr <= va);
1687 l = prev->ht_level;
1688 if (va <= HTABLE_LAST_PAGE(prev)) {
1689 pte = htable_scan(prev, &va, eaddr);
1690
1691 if (PTE_ISPAGE(pte, l)) {
1692 *vaddr = va;
1693 *htp = prev;
1694 return (pte);
1695 }
1696 }
1697
1698 /*
1699 * We found nothing in the htable provided by the caller,
1700 * so fall through and do the full search
1701 */
1702 htable_release(prev);
1703 }
1704
1705 /*
1706 * Find the level of the largest pagesize used by this HAT.
1707 */
1708 if (hat->hat_ism_pgcnt > 0) {
1709 max_mapped_level = mmu.umax_page_level;
1710 } else {
1711 max_mapped_level = 0;
1712 for (l = 1; l <= mmu.max_page_level; ++l)
1713 if (hat->hat_pages_mapped[l] != 0)
1714 max_mapped_level = l;
1715 }
1716
1717 while (va < eaddr && va >= *vaddr) {
1718 /*
1719 * Find lowest table with any entry for given address.
1720 */
1721 for (l = 0; l <= TOP_LEVEL(hat); ++l) {
1722 ht = htable_lookup(hat, va, l);
1723 if (ht != NULL) {
1724 pte = htable_scan(ht, &va, eaddr);
1725 if (PTE_ISPAGE(pte, l)) {
1726 VERIFY(!IN_VA_HOLE(va));
1727 *vaddr = va;
1728 *htp = ht;
1729 return (pte);
1730 }
1731 htable_release(ht);
1732 break;
1733 }
1734
1735 /*
1736 * No htable at this level for the address. If there
1737 * is no larger page size that could cover it, we can
1738 * skip right to the start of the next page table.
1739 */
1740 ASSERT(l < TOP_LEVEL(hat));
1741 if (l >= max_mapped_level) {
1742 va = NEXT_ENTRY_VA(va, l + 1);
1743 if (va >= eaddr)
1744 break;
1745 }
1746 }
1747 }
1748
1749 *vaddr = 0;
1750 *htp = NULL;
1751 return (0);
1752 }
1753
1754 /*
1755 * Find the htable and page table entry index of the given virtual address
1756 * with pagesize at or below given level.
1757 * If not found returns NULL. When found, returns the htable, sets
1758 * entry, and has a hold on the htable.
1759 */
1760 htable_t *
htable_getpte(struct hat * hat,uintptr_t vaddr,uint_t * entry,x86pte_t * pte,level_t level)1761 htable_getpte(
1762 struct hat *hat,
1763 uintptr_t vaddr,
1764 uint_t *entry,
1765 x86pte_t *pte,
1766 level_t level)
1767 {
1768 htable_t *ht;
1769 level_t l;
1770 uint_t e;
1771
1772 ASSERT(level <= mmu.max_page_level);
1773
1774 for (l = 0; l <= level; ++l) {
1775 ht = htable_lookup(hat, vaddr, l);
1776 if (ht == NULL)
1777 continue;
1778 e = htable_va2entry(vaddr, ht);
1779 if (entry != NULL)
1780 *entry = e;
1781 if (pte != NULL)
1782 *pte = x86pte_get(ht, e);
1783 return (ht);
1784 }
1785 return (NULL);
1786 }
1787
1788 /*
1789 * Find the htable and page table entry index of the given virtual address.
1790 * There must be a valid page mapped at the given address.
1791 * If not found returns NULL. When found, returns the htable, sets
1792 * entry, and has a hold on the htable.
1793 */
1794 htable_t *
htable_getpage(struct hat * hat,uintptr_t vaddr,uint_t * entry)1795 htable_getpage(struct hat *hat, uintptr_t vaddr, uint_t *entry)
1796 {
1797 htable_t *ht;
1798 uint_t e;
1799 x86pte_t pte;
1800
1801 ht = htable_getpte(hat, vaddr, &e, &pte, mmu.max_page_level);
1802 if (ht == NULL)
1803 return (NULL);
1804
1805 if (entry)
1806 *entry = e;
1807
1808 if (PTE_ISPAGE(pte, ht->ht_level))
1809 return (ht);
1810 htable_release(ht);
1811 return (NULL);
1812 }
1813
1814
1815 void
htable_init()1816 htable_init()
1817 {
1818 /*
1819 * To save on kernel VA usage, we avoid debug information in 32 bit
1820 * kernels.
1821 */
1822 #if defined(__amd64)
1823 int kmem_flags = KMC_NOHASH;
1824 #elif defined(__i386)
1825 int kmem_flags = KMC_NOHASH | KMC_NODEBUG;
1826 #endif
1827
1828 /*
1829 * initialize kmem caches
1830 */
1831 htable_cache = kmem_cache_create("htable_t",
1832 sizeof (htable_t), 0, NULL, NULL,
1833 htable_reap, NULL, hat_memload_arena, kmem_flags);
1834 }
1835
1836 /*
1837 * get the pte index for the virtual address in the given htable's pagetable
1838 */
1839 uint_t
htable_va2entry(uintptr_t va,htable_t * ht)1840 htable_va2entry(uintptr_t va, htable_t *ht)
1841 {
1842 level_t l = ht->ht_level;
1843
1844 ASSERT(va >= ht->ht_vaddr);
1845 ASSERT(va <= HTABLE_LAST_PAGE(ht));
1846 return ((va >> LEVEL_SHIFT(l)) & (HTABLE_NUM_PTES(ht) - 1));
1847 }
1848
1849 /*
1850 * Given an htable and the index of a pte in it, return the virtual address
1851 * of the page.
1852 */
1853 uintptr_t
htable_e2va(htable_t * ht,uint_t entry)1854 htable_e2va(htable_t *ht, uint_t entry)
1855 {
1856 level_t l = ht->ht_level;
1857 uintptr_t va;
1858
1859 ASSERT(entry < HTABLE_NUM_PTES(ht));
1860 va = ht->ht_vaddr + ((uintptr_t)entry << LEVEL_SHIFT(l));
1861
1862 /*
1863 * Need to skip over any VA hole in top level table
1864 */
1865 #if defined(__amd64)
1866 if (ht->ht_level == mmu.max_level && va >= mmu.hole_start)
1867 va += ((mmu.hole_end - mmu.hole_start) + 1);
1868 #endif
1869
1870 return (va);
1871 }
1872
1873 /*
1874 * The code uses compare and swap instructions to read/write PTE's to
1875 * avoid atomicity problems, since PTEs can be 8 bytes on 32 bit systems.
1876 * will naturally be atomic.
1877 *
1878 * The combination of using kpreempt_disable()/_enable() and the hci_mutex
1879 * are used to ensure that an interrupt won't overwrite a temporary mapping
1880 * while it's in use. If an interrupt thread tries to access a PTE, it will
1881 * yield briefly back to the pinned thread which holds the cpu's hci_mutex.
1882 */
1883 void
x86pte_cpu_init(cpu_t * cpu)1884 x86pte_cpu_init(cpu_t *cpu)
1885 {
1886 struct hat_cpu_info *hci;
1887
1888 hci = kmem_zalloc(sizeof (*hci), KM_SLEEP);
1889 mutex_init(&hci->hci_mutex, NULL, MUTEX_DEFAULT, NULL);
1890 cpu->cpu_hat_info = hci;
1891 }
1892
1893 void
x86pte_cpu_fini(cpu_t * cpu)1894 x86pte_cpu_fini(cpu_t *cpu)
1895 {
1896 struct hat_cpu_info *hci = cpu->cpu_hat_info;
1897
1898 kmem_free(hci, sizeof (*hci));
1899 cpu->cpu_hat_info = NULL;
1900 }
1901
1902 #ifdef __i386
1903 /*
1904 * On 32 bit kernels, loading a 64 bit PTE is a little tricky
1905 */
1906 x86pte_t
get_pte64(x86pte_t * ptr)1907 get_pte64(x86pte_t *ptr)
1908 {
1909 volatile uint32_t *p = (uint32_t *)ptr;
1910 x86pte_t t;
1911
1912 ASSERT(mmu.pae_hat != 0);
1913 for (;;) {
1914 t = p[0];
1915 t |= (uint64_t)p[1] << 32;
1916 if ((t & 0xffffffff) == p[0])
1917 return (t);
1918 }
1919 }
1920 #endif /* __i386 */
1921
1922 /*
1923 * Disable preemption and establish a mapping to the pagetable with the
1924 * given pfn. This is optimized for there case where it's the same
1925 * pfn as we last used referenced from this CPU.
1926 */
1927 static x86pte_t *
x86pte_access_pagetable(htable_t * ht,uint_t index)1928 x86pte_access_pagetable(htable_t *ht, uint_t index)
1929 {
1930 /*
1931 * VLP pagetables are contained in the hat_t
1932 */
1933 if (ht->ht_flags & HTABLE_VLP)
1934 return (PT_INDEX_PTR(ht->ht_hat->hat_vlp_ptes, index));
1935 return (x86pte_mapin(ht->ht_pfn, index, ht));
1936 }
1937
1938 /*
1939 * map the given pfn into the page table window.
1940 */
1941 /*ARGSUSED*/
1942 x86pte_t *
x86pte_mapin(pfn_t pfn,uint_t index,htable_t * ht)1943 x86pte_mapin(pfn_t pfn, uint_t index, htable_t *ht)
1944 {
1945 x86pte_t *pteptr;
1946 x86pte_t pte = 0;
1947 x86pte_t newpte;
1948 int x;
1949
1950 ASSERT(pfn != PFN_INVALID);
1951
1952 if (!khat_running) {
1953 caddr_t va = kbm_remap_window(pfn_to_pa(pfn), 1);
1954 return (PT_INDEX_PTR(va, index));
1955 }
1956
1957 /*
1958 * If kpm is available, use it.
1959 */
1960 if (kpm_vbase)
1961 return (PT_INDEX_PTR(hat_kpm_pfn2va(pfn), index));
1962
1963 /*
1964 * Disable preemption and grab the CPU's hci_mutex
1965 */
1966 kpreempt_disable();
1967 ASSERT(CPU->cpu_hat_info != NULL);
1968 mutex_enter(&CPU->cpu_hat_info->hci_mutex);
1969 x = PWIN_TABLE(CPU->cpu_id);
1970 pteptr = (x86pte_t *)PWIN_PTE_VA(x);
1971 #ifndef __xpv
1972 if (mmu.pae_hat)
1973 pte = *pteptr;
1974 else
1975 pte = *(x86pte32_t *)pteptr;
1976 #endif
1977
1978 newpte = MAKEPTE(pfn, 0) | mmu.pt_global | mmu.pt_nx;
1979
1980 /*
1981 * For hardware we can use a writable mapping.
1982 */
1983 #ifdef __xpv
1984 if (IN_XPV_PANIC())
1985 #endif
1986 newpte |= PT_WRITABLE;
1987
1988 if (!PTE_EQUIV(newpte, pte)) {
1989
1990 #ifdef __xpv
1991 if (!IN_XPV_PANIC()) {
1992 xen_map(newpte, PWIN_VA(x));
1993 } else
1994 #endif
1995 {
1996 XPV_ALLOW_PAGETABLE_UPDATES();
1997 if (mmu.pae_hat)
1998 *pteptr = newpte;
1999 else
2000 *(x86pte32_t *)pteptr = newpte;
2001 XPV_DISALLOW_PAGETABLE_UPDATES();
2002 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
2003 }
2004 }
2005 return (PT_INDEX_PTR(PWIN_VA(x), index));
2006 }
2007
2008 /*
2009 * Release access to a page table.
2010 */
2011 static void
x86pte_release_pagetable(htable_t * ht)2012 x86pte_release_pagetable(htable_t *ht)
2013 {
2014 /*
2015 * nothing to do for VLP htables
2016 */
2017 if (ht->ht_flags & HTABLE_VLP)
2018 return;
2019
2020 x86pte_mapout();
2021 }
2022
2023 void
x86pte_mapout(void)2024 x86pte_mapout(void)
2025 {
2026 if (kpm_vbase != NULL || !khat_running)
2027 return;
2028
2029 /*
2030 * Drop the CPU's hci_mutex and restore preemption.
2031 */
2032 #ifdef __xpv
2033 if (!IN_XPV_PANIC()) {
2034 uintptr_t va;
2035
2036 /*
2037 * We need to always clear the mapping in case a page
2038 * that was once a page table page is ballooned out.
2039 */
2040 va = (uintptr_t)PWIN_VA(PWIN_TABLE(CPU->cpu_id));
2041 (void) HYPERVISOR_update_va_mapping(va, 0,
2042 UVMF_INVLPG | UVMF_LOCAL);
2043 }
2044 #endif
2045 mutex_exit(&CPU->cpu_hat_info->hci_mutex);
2046 kpreempt_enable();
2047 }
2048
2049 /*
2050 * Atomic retrieval of a pagetable entry
2051 */
2052 x86pte_t
x86pte_get(htable_t * ht,uint_t entry)2053 x86pte_get(htable_t *ht, uint_t entry)
2054 {
2055 x86pte_t pte;
2056 x86pte_t *ptep;
2057
2058 /*
2059 * Be careful that loading PAE entries in 32 bit kernel is atomic.
2060 */
2061 ASSERT(entry < mmu.ptes_per_table);
2062 ptep = x86pte_access_pagetable(ht, entry);
2063 pte = GET_PTE(ptep);
2064 x86pte_release_pagetable(ht);
2065 return (pte);
2066 }
2067
2068 /*
2069 * Atomic unconditional set of a page table entry, it returns the previous
2070 * value. For pre-existing mappings if the PFN changes, then we don't care
2071 * about the old pte's REF / MOD bits. If the PFN remains the same, we leave
2072 * the MOD/REF bits unchanged.
2073 *
2074 * If asked to overwrite a link to a lower page table with a large page
2075 * mapping, this routine returns the special value of LPAGE_ERROR. This
2076 * allows the upper HAT layers to retry with a smaller mapping size.
2077 */
2078 x86pte_t
x86pte_set(htable_t * ht,uint_t entry,x86pte_t new,void * ptr)2079 x86pte_set(htable_t *ht, uint_t entry, x86pte_t new, void *ptr)
2080 {
2081 x86pte_t old;
2082 x86pte_t prev;
2083 x86pte_t *ptep;
2084 level_t l = ht->ht_level;
2085 x86pte_t pfn_mask = (l != 0) ? PT_PADDR_LGPG : PT_PADDR;
2086 x86pte_t n;
2087 uintptr_t addr = htable_e2va(ht, entry);
2088 hat_t *hat = ht->ht_hat;
2089
2090 ASSERT(new != 0); /* don't use to invalidate a PTE, see x86pte_update */
2091 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
2092 if (ptr == NULL)
2093 ptep = x86pte_access_pagetable(ht, entry);
2094 else
2095 ptep = ptr;
2096
2097 /*
2098 * Install the new PTE. If remapping the same PFN, then
2099 * copy existing REF/MOD bits to new mapping.
2100 */
2101 do {
2102 prev = GET_PTE(ptep);
2103 n = new;
2104 if (PTE_ISVALID(n) && (prev & pfn_mask) == (new & pfn_mask))
2105 n |= prev & (PT_REF | PT_MOD);
2106
2107 /*
2108 * Another thread may have installed this mapping already,
2109 * flush the local TLB and be done.
2110 */
2111 if (prev == n) {
2112 old = new;
2113 #ifdef __xpv
2114 if (!IN_XPV_PANIC())
2115 xen_flush_va((caddr_t)addr);
2116 else
2117 #endif
2118 mmu_tlbflush_entry((caddr_t)addr);
2119 goto done;
2120 }
2121
2122 /*
2123 * Detect if we have a collision of installing a large
2124 * page mapping where there already is a lower page table.
2125 */
2126 if (l > 0 && (prev & PT_VALID) && !(prev & PT_PAGESIZE)) {
2127 old = LPAGE_ERROR;
2128 goto done;
2129 }
2130
2131 XPV_ALLOW_PAGETABLE_UPDATES();
2132 old = CAS_PTE(ptep, prev, n);
2133 XPV_DISALLOW_PAGETABLE_UPDATES();
2134 } while (old != prev);
2135
2136 /*
2137 * Do a TLB demap if needed, ie. the old pte was valid.
2138 *
2139 * Note that a stale TLB writeback to the PTE here either can't happen
2140 * or doesn't matter. The PFN can only change for NOSYNC|NOCONSIST
2141 * mappings, but they were created with REF and MOD already set, so
2142 * no stale writeback will happen.
2143 *
2144 * Segmap is the only place where remaps happen on the same pfn and for
2145 * that we want to preserve the stale REF/MOD bits.
2146 */
2147 if (old & PT_REF)
2148 hat_tlb_inval(hat, addr);
2149
2150 done:
2151 if (ptr == NULL)
2152 x86pte_release_pagetable(ht);
2153 return (old);
2154 }
2155
2156 /*
2157 * Atomic compare and swap of a page table entry. No TLB invalidates are done.
2158 * This is used for links between pagetables of different levels.
2159 * Note we always create these links with dirty/access set, so they should
2160 * never change.
2161 */
2162 x86pte_t
x86pte_cas(htable_t * ht,uint_t entry,x86pte_t old,x86pte_t new)2163 x86pte_cas(htable_t *ht, uint_t entry, x86pte_t old, x86pte_t new)
2164 {
2165 x86pte_t pte;
2166 x86pte_t *ptep;
2167 #ifdef __xpv
2168 /*
2169 * We can't use writable pagetables for upper level tables, so fake it.
2170 */
2171 mmu_update_t t[2];
2172 int cnt = 1;
2173 int count;
2174 maddr_t ma;
2175
2176 if (!IN_XPV_PANIC()) {
2177 ASSERT(!(ht->ht_flags & HTABLE_VLP)); /* no VLP yet */
2178 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry));
2179 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE;
2180 t[0].val = new;
2181
2182 #if defined(__amd64)
2183 /*
2184 * On the 64-bit hypervisor we need to maintain the user mode
2185 * top page table too.
2186 */
2187 if (ht->ht_level == mmu.max_level && ht->ht_hat != kas.a_hat) {
2188 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(
2189 ht->ht_hat->hat_user_ptable), entry));
2190 t[1].ptr = ma | MMU_NORMAL_PT_UPDATE;
2191 t[1].val = new;
2192 ++cnt;
2193 }
2194 #endif /* __amd64 */
2195
2196 if (HYPERVISOR_mmu_update(t, cnt, &count, DOMID_SELF))
2197 panic("HYPERVISOR_mmu_update() failed");
2198 ASSERT(count == cnt);
2199 return (old);
2200 }
2201 #endif
2202 ptep = x86pte_access_pagetable(ht, entry);
2203 XPV_ALLOW_PAGETABLE_UPDATES();
2204 pte = CAS_PTE(ptep, old, new);
2205 XPV_DISALLOW_PAGETABLE_UPDATES();
2206 x86pte_release_pagetable(ht);
2207 return (pte);
2208 }
2209
2210 /*
2211 * Invalidate a page table entry as long as it currently maps something that
2212 * matches the value determined by expect.
2213 *
2214 * If tlb is set, also invalidates any TLB entries.
2215 *
2216 * Returns the previous value of the PTE.
2217 */
2218 x86pte_t
x86pte_inval(htable_t * ht,uint_t entry,x86pte_t expect,x86pte_t * pte_ptr,boolean_t tlb)2219 x86pte_inval(
2220 htable_t *ht,
2221 uint_t entry,
2222 x86pte_t expect,
2223 x86pte_t *pte_ptr,
2224 boolean_t tlb)
2225 {
2226 x86pte_t *ptep;
2227 x86pte_t oldpte;
2228 x86pte_t found;
2229
2230 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
2231 ASSERT(ht->ht_level <= mmu.max_page_level);
2232
2233 if (pte_ptr != NULL)
2234 ptep = pte_ptr;
2235 else
2236 ptep = x86pte_access_pagetable(ht, entry);
2237
2238 #if defined(__xpv)
2239 /*
2240 * If exit()ing just use HYPERVISOR_mmu_update(), as we can't be racing
2241 * with anything else.
2242 */
2243 if ((ht->ht_hat->hat_flags & HAT_FREEING) && !IN_XPV_PANIC()) {
2244 int count;
2245 mmu_update_t t[1];
2246 maddr_t ma;
2247
2248 oldpte = GET_PTE(ptep);
2249 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR))
2250 goto done;
2251 ma = pa_to_ma(PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry));
2252 t[0].ptr = ma | MMU_NORMAL_PT_UPDATE;
2253 t[0].val = 0;
2254 if (HYPERVISOR_mmu_update(t, 1, &count, DOMID_SELF))
2255 panic("HYPERVISOR_mmu_update() failed");
2256 ASSERT(count == 1);
2257 goto done;
2258 }
2259 #endif /* __xpv */
2260
2261 /*
2262 * Note that the loop is needed to handle changes due to h/w updating
2263 * of PT_MOD/PT_REF.
2264 */
2265 do {
2266 oldpte = GET_PTE(ptep);
2267 if (expect != 0 && (oldpte & PT_PADDR) != (expect & PT_PADDR))
2268 goto done;
2269 XPV_ALLOW_PAGETABLE_UPDATES();
2270 found = CAS_PTE(ptep, oldpte, 0);
2271 XPV_DISALLOW_PAGETABLE_UPDATES();
2272 } while (found != oldpte);
2273 if (tlb && (oldpte & (PT_REF | PT_MOD)))
2274 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry));
2275
2276 done:
2277 if (pte_ptr == NULL)
2278 x86pte_release_pagetable(ht);
2279 return (oldpte);
2280 }
2281
2282 /*
2283 * Change a page table entry af it currently matches the value in expect.
2284 */
2285 x86pte_t
x86pte_update(htable_t * ht,uint_t entry,x86pte_t expect,x86pte_t new)2286 x86pte_update(
2287 htable_t *ht,
2288 uint_t entry,
2289 x86pte_t expect,
2290 x86pte_t new)
2291 {
2292 x86pte_t *ptep;
2293 x86pte_t found;
2294
2295 ASSERT(new != 0);
2296 ASSERT(!(ht->ht_flags & HTABLE_SHARED_PFN));
2297 ASSERT(ht->ht_level <= mmu.max_page_level);
2298
2299 ptep = x86pte_access_pagetable(ht, entry);
2300 XPV_ALLOW_PAGETABLE_UPDATES();
2301 found = CAS_PTE(ptep, expect, new);
2302 XPV_DISALLOW_PAGETABLE_UPDATES();
2303 if (found == expect) {
2304 hat_tlb_inval(ht->ht_hat, htable_e2va(ht, entry));
2305
2306 /*
2307 * When removing write permission *and* clearing the
2308 * MOD bit, check if a write happened via a stale
2309 * TLB entry before the TLB shootdown finished.
2310 *
2311 * If it did happen, simply re-enable write permission and
2312 * act like the original CAS failed.
2313 */
2314 if ((expect & (PT_WRITABLE | PT_MOD)) == PT_WRITABLE &&
2315 (new & (PT_WRITABLE | PT_MOD)) == 0 &&
2316 (GET_PTE(ptep) & PT_MOD) != 0) {
2317 do {
2318 found = GET_PTE(ptep);
2319 XPV_ALLOW_PAGETABLE_UPDATES();
2320 found =
2321 CAS_PTE(ptep, found, found | PT_WRITABLE);
2322 XPV_DISALLOW_PAGETABLE_UPDATES();
2323 } while ((found & PT_WRITABLE) == 0);
2324 }
2325 }
2326 x86pte_release_pagetable(ht);
2327 return (found);
2328 }
2329
2330 #ifndef __xpv
2331 /*
2332 * Copy page tables - this is just a little more complicated than the
2333 * previous routines. Note that it's also not atomic! It also is never
2334 * used for VLP pagetables.
2335 */
2336 void
x86pte_copy(htable_t * src,htable_t * dest,uint_t entry,uint_t count)2337 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count)
2338 {
2339 caddr_t src_va;
2340 caddr_t dst_va;
2341 size_t size;
2342 x86pte_t *pteptr;
2343 x86pte_t pte;
2344
2345 ASSERT(khat_running);
2346 ASSERT(!(dest->ht_flags & HTABLE_VLP));
2347 ASSERT(!(src->ht_flags & HTABLE_VLP));
2348 ASSERT(!(src->ht_flags & HTABLE_SHARED_PFN));
2349 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
2350
2351 /*
2352 * Acquire access to the CPU pagetable windows for the dest and source.
2353 */
2354 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry);
2355 if (kpm_vbase) {
2356 src_va = (caddr_t)
2357 PT_INDEX_PTR(hat_kpm_pfn2va(src->ht_pfn), entry);
2358 } else {
2359 uint_t x = PWIN_SRC(CPU->cpu_id);
2360
2361 /*
2362 * Finish defining the src pagetable mapping
2363 */
2364 src_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry);
2365 pte = MAKEPTE(src->ht_pfn, 0) | mmu.pt_global | mmu.pt_nx;
2366 pteptr = (x86pte_t *)PWIN_PTE_VA(x);
2367 if (mmu.pae_hat)
2368 *pteptr = pte;
2369 else
2370 *(x86pte32_t *)pteptr = pte;
2371 mmu_tlbflush_entry((caddr_t)(PWIN_VA(x)));
2372 }
2373
2374 /*
2375 * now do the copy
2376 */
2377 size = count << mmu.pte_size_shift;
2378 bcopy(src_va, dst_va, size);
2379
2380 x86pte_release_pagetable(dest);
2381 }
2382
2383 #else /* __xpv */
2384
2385 /*
2386 * The hypervisor only supports writable pagetables at level 0, so we have
2387 * to install these 1 by 1 the slow way.
2388 */
2389 void
x86pte_copy(htable_t * src,htable_t * dest,uint_t entry,uint_t count)2390 x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, uint_t count)
2391 {
2392 caddr_t src_va;
2393 x86pte_t pte;
2394
2395 ASSERT(!IN_XPV_PANIC());
2396 src_va = (caddr_t)x86pte_access_pagetable(src, entry);
2397 while (count) {
2398 if (mmu.pae_hat)
2399 pte = *(x86pte_t *)src_va;
2400 else
2401 pte = *(x86pte32_t *)src_va;
2402 if (pte != 0) {
2403 set_pteval(pfn_to_pa(dest->ht_pfn), entry,
2404 dest->ht_level, pte);
2405 #ifdef __amd64
2406 if (dest->ht_level == mmu.max_level &&
2407 htable_e2va(dest, entry) < HYPERVISOR_VIRT_END)
2408 set_pteval(
2409 pfn_to_pa(dest->ht_hat->hat_user_ptable),
2410 entry, dest->ht_level, pte);
2411 #endif
2412 }
2413 --count;
2414 ++entry;
2415 src_va += mmu.pte_size;
2416 }
2417 x86pte_release_pagetable(src);
2418 }
2419 #endif /* __xpv */
2420
2421 /*
2422 * Zero page table entries - Note this doesn't use atomic stores!
2423 */
2424 static void
x86pte_zero(htable_t * dest,uint_t entry,uint_t count)2425 x86pte_zero(htable_t *dest, uint_t entry, uint_t count)
2426 {
2427 caddr_t dst_va;
2428 size_t size;
2429 #ifdef __xpv
2430 int x;
2431 x86pte_t newpte;
2432 #endif
2433
2434 /*
2435 * Map in the page table to be zeroed.
2436 */
2437 ASSERT(!(dest->ht_flags & HTABLE_SHARED_PFN));
2438 ASSERT(!(dest->ht_flags & HTABLE_VLP));
2439
2440 /*
2441 * On the hypervisor we don't use x86pte_access_pagetable() since
2442 * in this case the page is not pinned yet.
2443 */
2444 #ifdef __xpv
2445 if (kpm_vbase == NULL) {
2446 kpreempt_disable();
2447 ASSERT(CPU->cpu_hat_info != NULL);
2448 mutex_enter(&CPU->cpu_hat_info->hci_mutex);
2449 x = PWIN_TABLE(CPU->cpu_id);
2450 newpte = MAKEPTE(dest->ht_pfn, 0) | PT_WRITABLE;
2451 xen_map(newpte, PWIN_VA(x));
2452 dst_va = (caddr_t)PT_INDEX_PTR(PWIN_VA(x), entry);
2453 } else
2454 #endif
2455 dst_va = (caddr_t)x86pte_access_pagetable(dest, entry);
2456
2457 size = count << mmu.pte_size_shift;
2458 ASSERT(size > BLOCKZEROALIGN);
2459 #ifdef __i386
2460 if (!is_x86_feature(x86_featureset, X86FSET_SSE2))
2461 bzero(dst_va, size);
2462 else
2463 #endif
2464 block_zero_no_xmm(dst_va, size);
2465
2466 #ifdef __xpv
2467 if (kpm_vbase == NULL) {
2468 xen_map(0, PWIN_VA(x));
2469 mutex_exit(&CPU->cpu_hat_info->hci_mutex);
2470 kpreempt_enable();
2471 } else
2472 #endif
2473 x86pte_release_pagetable(dest);
2474 }
2475
2476 /*
2477 * Called to ensure that all pagetables are in the system dump
2478 */
2479 void
hat_dump(void)2480 hat_dump(void)
2481 {
2482 hat_t *hat;
2483 uint_t h;
2484 htable_t *ht;
2485
2486 /*
2487 * Dump all page tables
2488 */
2489 for (hat = kas.a_hat; hat != NULL; hat = hat->hat_next) {
2490 for (h = 0; h < hat->hat_num_hash; ++h) {
2491 for (ht = hat->hat_ht_hash[h]; ht; ht = ht->ht_next) {
2492 if ((ht->ht_flags & HTABLE_VLP) == 0)
2493 dump_page(ht->ht_pfn);
2494 }
2495 }
2496 }
2497 }
2498