1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 */
24 /*
25 * Copyright (c) 2010, Intel Corporation.
26 * All rights reserved.
27 */
28 /*
29 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
30 * Copyright 2018 Joyent, Inc. All rights reserved.
31 * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
32 * Copyright 2022 Tintri by DDN, Inc. All rights reserved.
33 */
34
35 /*
36 * VM - Hardware Address Translation management for i386 and amd64
37 *
38 * Implementation of the interfaces described in <common/vm/hat.h>
39 *
40 * Nearly all the details of how the hardware is managed should not be
41 * visible outside this layer except for misc. machine specific functions
42 * that work in conjunction with this code.
43 *
44 * Routines used only inside of i86pc/vm start with hati_ for HAT Internal.
45 */
46
47 /*
48 * amd64 HAT Design
49 *
50 * ----------
51 * Background
52 * ----------
53 *
54 * On x86, the address space is shared between a user process and the kernel.
55 * This is different from SPARC. Conventionally, the kernel lives at the top of
56 * the address space and the user process gets to enjoy the rest of it. If you
57 * look at the image of the address map in uts/i86pc/os/startup.c, you'll get a
58 * rough sense of how the address space is laid out and used.
59 *
60 * Every unique address space is represented by an instance of a HAT structure
61 * called a 'hat_t'. In addition to a hat_t structure for each process, there is
62 * also one that is used for the kernel (kas.a_hat), and each CPU ultimately
63 * also has a HAT.
64 *
65 * Each HAT contains a pointer to its root page table. This root page table is
66 * what we call an L3 page table in illumos and Intel calls the PML4. It is the
67 * physical address of the L3 table that we place in the %cr3 register which the
68 * processor uses.
69 *
70 * Each of the many layers of the page table is represented by a structure
71 * called an htable_t. The htable_t manages a set of 512 8-byte entries. The
72 * number of entries in a given page table is constant across all different
73 * level page tables. Note, this is only true on amd64. This has not always been
74 * the case on x86.
75 *
76 * Each entry in a page table, generally referred to as a PTE, may refer to
77 * another page table or a memory location, depending on the level of the page
78 * table and the use of large pages. Importantly, the top-level L3 page table
79 * (PML4) only supports linking to further page tables. This is also true on
80 * systems which support a 5th level page table (which we do not currently
81 * support).
82 *
83 * Historically, on x86, when a process was running on CPU, the root of the page
84 * table was inserted into %cr3 on each CPU on which it was currently running.
85 * When processes would switch (by calling hat_switch()), then the value in %cr3
86 * on that CPU would change to that of the new HAT. While this behavior is still
87 * maintained in the xpv kernel, this is not what is done today.
88 *
89 * -------------------
90 * Per-CPU Page Tables
91 * -------------------
92 *
93 * Throughout the system the 64-bit kernel has a notion of what it calls a
94 * per-CPU page table or PCP. The notion of a per-CPU page table was originally
95 * introduced as part of the original work to support x86 PAE. On the 64-bit
96 * kernel, it was originally used for 32-bit processes running on the 64-bit
97 * kernel. The rationale behind this was that each 32-bit process could have all
98 * of its memory represented in a single L2 page table as each L2 page table
99 * entry represents 1 GbE of memory.
100 *
101 * Following on from this, the idea was that given that all of the L3 page table
102 * entries for 32-bit processes are basically going to be identical with the
103 * exception of the first entry in the page table, why not share those page
104 * table entries. This gave rise to the idea of a per-CPU page table.
105 *
106 * The way this works is that we have a member in the machcpu_t called the
107 * mcpu_hat_info. That structure contains two different 4k pages: one that
108 * represents the L3 page table and one that represents an L2 page table. When
109 * the CPU starts up, the L3 page table entries are copied in from the kernel's
110 * page table. The L3 kernel entries do not change throughout the lifetime of
111 * the kernel. The kernel portion of these L3 pages for each CPU have the same
112 * records, meaning that they point to the same L2 page tables and thus see a
113 * consistent view of the world.
114 *
115 * When a 32-bit process is loaded into this world, we copy the 32-bit process's
116 * four top-level page table entries into the CPU's L2 page table and then set
117 * the CPU's first L3 page table entry to point to the CPU's L2 page.
118 * Specifically, in hat_pcp_update(), we're copying from the process's
119 * HAT_COPIED_32 HAT into the page tables specific to this CPU.
120 *
121 * As part of the implementation of kernel page table isolation, this was also
122 * extended to 64-bit processes. When a 64-bit process runs, we'll copy their L3
123 * PTEs across into the current CPU's L3 page table. (As we can't do the
124 * first-L3-entry trick for 64-bit processes, ->hci_pcp_l2ptes is unused in this
125 * case.)
126 *
127 * The use of per-CPU page tables has a lot of implementation ramifications. A
128 * HAT that runs a user process will be flagged with the HAT_COPIED flag to
129 * indicate that it is using the per-CPU page table functionality. In tandem
130 * with the HAT, the top-level htable_t will be flagged with the HTABLE_COPIED
131 * flag. If the HAT represents a 32-bit process, then we will also set the
132 * HAT_COPIED_32 flag on that hat_t.
133 *
134 * These two flags work together. The top-level htable_t when using per-CPU page
135 * tables is 'virtual'. We never allocate a ptable for this htable_t (i.e.
136 * ht->ht_pfn is PFN_INVALID). Instead, when we need to modify a PTE in an
137 * HTABLE_COPIED ptable, x86pte_access_pagetable() will redirect any accesses to
138 * ht_hat->hat_copied_ptes.
139 *
140 * Of course, such a modification won't actually modify the HAT_PCP page tables
141 * that were copied from the HAT_COPIED htable. When we change the top level
142 * page table entries (L2 PTEs for a 32-bit process and L3 PTEs for a 64-bit
143 * process), we need to make sure to trigger hat_pcp_update() on all CPUs that
144 * are currently tied to this HAT (including the current CPU).
145 *
146 * To do this, PCP piggy-backs on TLB invalidation, specifically via the
147 * hat_tlb_inval() path from link_ptp() and unlink_ptp().
148 *
149 * (Importantly, in all such cases, when this is in operation, the top-level
150 * entry should not be able to refer to an actual page table entry that can be
151 * changed and consolidated into a large page. If large page consolidation is
152 * required here, then there will be much that needs to be reconsidered.)
153 *
154 * -----------------------------------------------
155 * Kernel Page Table Isolation and the Per-CPU HAT
156 * -----------------------------------------------
157 *
158 * All Intel CPUs that support speculative execution and paging are subject to a
159 * series of bugs that have been termed 'Meltdown'. These exploits allow a user
160 * process to read kernel memory through cache side channels and speculative
161 * execution. To mitigate this on vulnerable CPUs, we need to use a technique
162 * called kernel page table isolation. What this requires is that we have two
163 * different page table roots. When executing in kernel mode, we will use a %cr3
164 * value that has both the user and kernel pages. However when executing in user
165 * mode, we will need to have a %cr3 that has all of the user pages; however,
166 * only a subset of the kernel pages required to operate.
167 *
168 * These kernel pages that we need mapped are:
169 *
170 * o Kernel Text that allows us to switch between the cr3 values.
171 * o The current global descriptor table (GDT)
172 * o The current interrupt descriptor table (IDT)
173 * o The current task switching state (TSS)
174 * o The current local descriptor table (LDT)
175 * o Stacks and scratch space used by the interrupt handlers
176 *
177 * For more information on the stack switching techniques, construction of the
178 * trampolines, and more, please see i86pc/ml/kpti_trampolines.s. The most
179 * important part of these mappings are the following two constraints:
180 *
181 * o The mappings are all per-CPU (except for read-only text)
182 * o The mappings are static. They are all established before the CPU is
183 * started (with the exception of the boot CPU).
184 *
185 * To facilitate the kernel page table isolation we employ our per-CPU
186 * page tables discussed in the previous section and add the notion of a per-CPU
187 * HAT. Fundamentally we have a second page table root. There is both a kernel
188 * page table (hci_pcp_l3ptes), and a user L3 page table (hci_user_l3ptes).
189 * Both will have the user page table entries copied into them, the same way
190 * that we discussed in the section 'Per-CPU Page Tables'.
191 *
192 * The complex part of this is how do we construct the set of kernel mappings
193 * that should be present when running with the user page table. To answer that,
194 * we add the notion of a per-CPU HAT. This HAT functions like a normal HAT,
195 * except that it's not really associated with an address space the same way
196 * that other HATs are.
197 *
198 * This HAT lives off of the 'struct hat_cpu_info' which is a member of the
199 * machcpu in the member hci_user_hat. We use this per-CPU HAT to create the set
200 * of kernel mappings that should be present on this CPU. The kernel mappings
201 * are added to the per-CPU HAT through the function hati_cpu_punchin(). Once a
202 * mapping has been punched in, it may not be punched out. The reason that we
203 * opt to leverage a HAT structure is that it knows how to allocate and manage
204 * all of the lower level page tables as required.
205 *
206 * Because all of the mappings are present at the beginning of time for this CPU
207 * and none of the mappings are in the kernel pageable segment, we don't have to
208 * worry about faulting on these HAT structures and thus the notion of the
209 * current HAT that we're using is always the appropriate HAT for the process
210 * (usually a user HAT or the kernel's HAT).
211 *
212 * A further constraint we place on the system with these per-CPU HATs is that
213 * they are not subject to htable_steal(). Because each CPU will have a rather
214 * fixed number of page tables, the same way that we don't steal from the
215 * kernel's HAT, it was determined that we should not steal from this HAT due to
216 * the complications involved and somewhat criminal nature of htable_steal().
217 *
218 * The per-CPU HAT is initialized in hat_pcp_setup() which is called as part of
219 * onlining the CPU, but before the CPU is actually started. The per-CPU HAT is
220 * removed in hat_pcp_teardown() which is called when a CPU is being offlined to
221 * be removed from the system (which is different from what psradm usually
222 * does).
223 *
224 * Finally, once the CPU has been onlined, the set of mappings in the per-CPU
225 * HAT must not change. The HAT related functions that we call are not meant to
226 * be called when we're switching between processes. For example, it is quite
227 * possible that if they were, they would try to grab an htable mutex which
228 * another thread might have. One needs to treat hat_switch() as though they
229 * were above LOCK_LEVEL and therefore _must not_ block under any circumstance.
230 */
231
232 #include <sys/machparam.h>
233 #include <sys/machsystm.h>
234 #include <sys/mman.h>
235 #include <sys/types.h>
236 #include <sys/systm.h>
237 #include <sys/cpuvar.h>
238 #include <sys/thread.h>
239 #include <sys/proc.h>
240 #include <sys/cpu.h>
241 #include <sys/kmem.h>
242 #include <sys/disp.h>
243 #include <sys/shm.h>
244 #include <sys/sysmacros.h>
245 #include <sys/machparam.h>
246 #include <sys/vmem.h>
247 #include <sys/vmsystm.h>
248 #include <sys/promif.h>
249 #include <sys/var.h>
250 #include <sys/x86_archext.h>
251 #include <sys/atomic.h>
252 #include <sys/bitmap.h>
253 #include <sys/controlregs.h>
254 #include <sys/bootconf.h>
255 #include <sys/bootsvcs.h>
256 #include <sys/bootinfo.h>
257 #include <sys/archsystm.h>
258
259 #include <vm/seg_kmem.h>
260 #include <vm/hat_i86.h>
261 #include <vm/as.h>
262 #include <vm/seg.h>
263 #include <vm/page.h>
264 #include <vm/seg_kp.h>
265 #include <vm/seg_kpm.h>
266 #include <vm/vm_dep.h>
267 #ifdef __xpv
268 #include <sys/hypervisor.h>
269 #endif
270 #include <vm/kboot_mmu.h>
271 #include <vm/seg_spt.h>
272
273 #include <sys/cmn_err.h>
274
275 /*
276 * Basic parameters for hat operation.
277 */
278 struct hat_mmu_info mmu;
279
280 /*
281 * The page that is the kernel's top level pagetable.
282 *
283 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
284 * on this 4K page for its top level page table. The remaining groups of
285 * 4 entries are used for per processor copies of user PCP pagetables for
286 * running threads. See hat_switch() and reload_pae32() for details.
287 *
288 * pcp_page[0..3] - level==2 PTEs for kernel HAT
289 * pcp_page[4..7] - level==2 PTEs for user thread on cpu 0
290 * pcp_page[8..11] - level==2 PTE for user thread on cpu 1
291 * etc...
292 *
293 * On the 64-bit kernel, this is the normal root of the page table and there is
294 * nothing special about it when used for other CPUs.
295 */
296 static x86pte_t *pcp_page;
297
298 /*
299 * forward declaration of internal utility routines
300 */
301 static x86pte_t hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected,
302 x86pte_t new);
303
304 /*
305 * The kernel address space exists in all non-HAT_COPIED HATs. To implement this
306 * the kernel reserves a fixed number of entries in the topmost level(s) of page
307 * tables. The values are setup during startup and then copied to every user hat
308 * created by hat_alloc(). This means that kernelbase must be:
309 *
310 * 4Meg aligned for 32 bit kernels
311 * 512Gig aligned for x86_64 64 bit kernel
312 *
313 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
314 * to each user hat.
315 */
316 typedef struct hat_kernel_range {
317 level_t hkr_level;
318 uintptr_t hkr_start_va;
319 uintptr_t hkr_end_va; /* zero means to end of memory */
320 } hat_kernel_range_t;
321 #define NUM_KERNEL_RANGE 2
322 static hat_kernel_range_t kernel_ranges[NUM_KERNEL_RANGE];
323 static int num_kernel_ranges;
324
325 uint_t use_boot_reserve = 1; /* cleared after early boot process */
326 uint_t can_steal_post_boot = 0; /* set late in boot to enable stealing */
327
328 /*
329 * enable_1gpg: controls 1g page support for user applications.
330 * By default, 1g pages are exported to user applications. enable_1gpg can
331 * be set to 0 to not export.
332 */
333 int enable_1gpg = 1;
334
335 /*
336 * AMD shanghai processors provide better management of 1gb ptes in its tlb.
337 * By default, 1g page support will be disabled for pre-shanghai AMD
338 * processors that don't have optimal tlb support for the 1g page size.
339 * chk_optimal_1gtlb can be set to 0 to force 1g page support on sub-optimal
340 * processors.
341 */
342 int chk_optimal_1gtlb = 1;
343
344
345 #ifdef DEBUG
346 uint_t map1gcnt;
347 #endif
348
349
350 /*
351 * A cpuset for all cpus. This is used for kernel address cross calls, since
352 * the kernel addresses apply to all cpus.
353 */
354 cpuset_t khat_cpuset;
355
356 /*
357 * management stuff for hat structures
358 */
359 kmutex_t hat_list_lock;
360 kcondvar_t hat_list_cv;
361 kmem_cache_t *hat_cache;
362 kmem_cache_t *hat_hash_cache;
363 kmem_cache_t *hat32_hash_cache;
364
365 /*
366 * Simple statistics
367 */
368 struct hatstats hatstat;
369
370 /*
371 * Some earlier hypervisor versions do not emulate cmpxchg of PTEs
372 * correctly. For such hypervisors we must set PT_USER for kernel
373 * entries ourselves (normally the emulation would set PT_USER for
374 * kernel entries and PT_USER|PT_GLOBAL for user entries). pt_kern is
375 * thus set appropriately. Note that dboot/kbm is OK, as only the full
376 * HAT uses cmpxchg() and the other paths (hypercall etc.) were never
377 * incorrect.
378 */
379 int pt_kern;
380
381 #ifndef __xpv
382 extern pfn_t memseg_get_start(struct memseg *);
383 #endif
384
385 #define PP_GETRM(pp, rmmask) (pp->p_nrm & rmmask)
386 #define PP_ISMOD(pp) PP_GETRM(pp, P_MOD)
387 #define PP_ISREF(pp) PP_GETRM(pp, P_REF)
388 #define PP_ISRO(pp) PP_GETRM(pp, P_RO)
389
390 #define PP_SETRM(pp, rm) atomic_orb(&(pp->p_nrm), rm)
391 #define PP_SETMOD(pp) PP_SETRM(pp, P_MOD)
392 #define PP_SETREF(pp) PP_SETRM(pp, P_REF)
393 #define PP_SETRO(pp) PP_SETRM(pp, P_RO)
394
395 #define PP_CLRRM(pp, rm) atomic_andb(&(pp->p_nrm), ~(rm))
396 #define PP_CLRMOD(pp) PP_CLRRM(pp, P_MOD)
397 #define PP_CLRREF(pp) PP_CLRRM(pp, P_REF)
398 #define PP_CLRRO(pp) PP_CLRRM(pp, P_RO)
399 #define PP_CLRALL(pp) PP_CLRRM(pp, P_MOD | P_REF | P_RO)
400
401 /*
402 * kmem cache constructor for struct hat
403 */
404 /*ARGSUSED*/
405 static int
hati_constructor(void * buf,void * handle,int kmflags)406 hati_constructor(void *buf, void *handle, int kmflags)
407 {
408 hat_t *hat = buf;
409
410 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
411 bzero(hat->hat_pages_mapped,
412 sizeof (pgcnt_t) * (mmu.max_page_level + 1));
413 hat->hat_ism_pgcnt = 0;
414 hat->hat_stats = 0;
415 hat->hat_flags = 0;
416 CPUSET_ZERO(hat->hat_cpus);
417 hat->hat_htable = NULL;
418 hat->hat_ht_hash = NULL;
419 return (0);
420 }
421
422 /*
423 * Put it at the start of the global list of all hats (used by stealing)
424 *
425 * kas.a_hat is not in the list but is instead used to find the
426 * first and last items in the list.
427 *
428 * - kas.a_hat->hat_next points to the start of the user hats.
429 * The list ends where hat->hat_next == NULL
430 *
431 * - kas.a_hat->hat_prev points to the last of the user hats.
432 * The list begins where hat->hat_prev == NULL
433 */
434 static void
hat_list_append(hat_t * hat)435 hat_list_append(hat_t *hat)
436 {
437 mutex_enter(&hat_list_lock);
438 hat->hat_prev = NULL;
439 hat->hat_next = kas.a_hat->hat_next;
440 if (hat->hat_next)
441 hat->hat_next->hat_prev = hat;
442 else
443 kas.a_hat->hat_prev = hat;
444 kas.a_hat->hat_next = hat;
445 mutex_exit(&hat_list_lock);
446 }
447
448 /*
449 * Allocate a hat structure for as. We also create the top level
450 * htable and initialize it to contain the kernel hat entries.
451 */
452 hat_t *
hat_alloc(struct as * as)453 hat_alloc(struct as *as)
454 {
455 hat_t *hat;
456 htable_t *ht; /* top level htable */
457 uint_t use_copied;
458 uint_t r;
459 hat_kernel_range_t *rp;
460 uintptr_t va;
461 uintptr_t eva;
462 uint_t start;
463 uint_t cnt;
464 htable_t *src;
465 boolean_t use_hat32_cache;
466
467 /*
468 * Once we start creating user process HATs we can enable
469 * the htable_steal() code.
470 */
471 if (can_steal_post_boot == 0)
472 can_steal_post_boot = 1;
473
474 ASSERT(AS_WRITE_HELD(as));
475 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
476 hat->hat_as = as;
477 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
478 ASSERT(hat->hat_flags == 0);
479
480 #if defined(__xpv)
481 /*
482 * No PCP stuff on the hypervisor due to the 64-bit split top level
483 * page tables. On 32-bit it's not needed as the hypervisor takes
484 * care of copying the top level PTEs to a below 4Gig page.
485 */
486 use_copied = 0;
487 use_hat32_cache = B_FALSE;
488 hat->hat_max_level = mmu.max_level;
489 hat->hat_num_copied = 0;
490 hat->hat_flags = 0;
491 #else /* __xpv */
492
493 /*
494 * All processes use HAT_COPIED on the 64-bit kernel if KPTI is
495 * turned on.
496 */
497 if (ttoproc(curthread)->p_model == DATAMODEL_ILP32) {
498 use_copied = 1;
499 hat->hat_max_level = mmu.max_level32;
500 hat->hat_num_copied = mmu.num_copied_ents32;
501 use_hat32_cache = B_TRUE;
502 hat->hat_flags |= HAT_COPIED_32;
503 HATSTAT_INC(hs_hat_copied32);
504 } else if (kpti_enable == 1) {
505 use_copied = 1;
506 hat->hat_max_level = mmu.max_level;
507 hat->hat_num_copied = mmu.num_copied_ents;
508 use_hat32_cache = B_FALSE;
509 HATSTAT_INC(hs_hat_copied64);
510 } else {
511 use_copied = 0;
512 use_hat32_cache = B_FALSE;
513 hat->hat_max_level = mmu.max_level;
514 hat->hat_num_copied = 0;
515 hat->hat_flags = 0;
516 HATSTAT_INC(hs_hat_normal64);
517 }
518 #endif /* __xpv */
519 if (use_copied) {
520 hat->hat_flags |= HAT_COPIED;
521 bzero(hat->hat_copied_ptes, sizeof (hat->hat_copied_ptes));
522 }
523
524 /*
525 * Allocate the htable hash. For 32-bit PCP processes we use the
526 * hat32_hash_cache. However, for 64-bit PCP processes we do not as the
527 * number of entries that they have to handle is closer to
528 * hat_hash_cache in count (though there will be more wastage when we
529 * have more DRAM in the system and thus push down the user address
530 * range).
531 */
532 if (use_hat32_cache) {
533 hat->hat_num_hash = mmu.hat32_hash_cnt;
534 hat->hat_ht_hash = kmem_cache_alloc(hat32_hash_cache, KM_SLEEP);
535 } else {
536 hat->hat_num_hash = mmu.hash_cnt;
537 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
538 }
539 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
540
541 /*
542 * Initialize Kernel HAT entries at the top of the top level page
543 * tables for the new hat.
544 */
545 hat->hat_htable = NULL;
546 hat->hat_ht_cached = NULL;
547 XPV_DISALLOW_MIGRATE();
548 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
549 hat->hat_htable = ht;
550
551 if (hat->hat_flags & HAT_COPIED)
552 goto init_done;
553
554 for (r = 0; r < num_kernel_ranges; ++r) {
555 rp = &kernel_ranges[r];
556 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
557 va += cnt * LEVEL_SIZE(rp->hkr_level)) {
558
559 if (rp->hkr_level == TOP_LEVEL(hat))
560 ht = hat->hat_htable;
561 else
562 ht = htable_create(hat, va, rp->hkr_level,
563 NULL);
564
565 start = htable_va2entry(va, ht);
566 cnt = HTABLE_NUM_PTES(ht) - start;
567 eva = va +
568 ((uintptr_t)cnt << LEVEL_SHIFT(rp->hkr_level));
569 if (rp->hkr_end_va != 0 &&
570 (eva > rp->hkr_end_va || eva == 0))
571 cnt = htable_va2entry(rp->hkr_end_va, ht) -
572 start;
573
574 src = htable_lookup(kas.a_hat, va, rp->hkr_level);
575 ASSERT(src != NULL);
576 x86pte_copy(src, ht, start, cnt);
577 htable_release(src);
578 }
579 }
580
581 init_done:
582
583 #if defined(__xpv)
584 /*
585 * Pin top level page tables after initializing them
586 */
587 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
588 xen_pin(hat->hat_user_ptable, mmu.max_level);
589 #endif
590 XPV_ALLOW_MIGRATE();
591
592 hat_list_append(hat);
593
594 return (hat);
595 }
596
597 #if !defined(__xpv)
598 /*
599 * Cons up a HAT for a CPU. This represents the user mappings. This will have
600 * various kernel pages punched into it manually. Importantly, this hat is
601 * ineligible for stealing. We really don't want to deal with this ever
602 * faulting and figuring out that this is happening, much like we don't with
603 * kas.
604 */
605 static hat_t *
hat_cpu_alloc(cpu_t * cpu)606 hat_cpu_alloc(cpu_t *cpu)
607 {
608 hat_t *hat;
609 htable_t *ht;
610
611 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
612 hat->hat_as = NULL;
613 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
614 hat->hat_max_level = mmu.max_level;
615 hat->hat_num_copied = 0;
616 hat->hat_flags = HAT_PCP;
617
618 hat->hat_num_hash = mmu.hash_cnt;
619 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
620 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
621
622 hat->hat_next = hat->hat_prev = NULL;
623
624 /*
625 * Because this HAT will only ever be used by the current CPU, we'll go
626 * ahead and set the CPUSET up to only point to the CPU in question.
627 */
628 CPUSET_ADD(hat->hat_cpus, cpu->cpu_id);
629
630 hat->hat_htable = NULL;
631 hat->hat_ht_cached = NULL;
632 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
633 hat->hat_htable = ht;
634
635 hat_list_append(hat);
636
637 return (hat);
638 }
639 #endif /* !__xpv */
640
641 /*
642 * process has finished executing but as has not been cleaned up yet.
643 */
644 /*ARGSUSED*/
645 void
hat_free_start(hat_t * hat)646 hat_free_start(hat_t *hat)
647 {
648 ASSERT(AS_WRITE_HELD(hat->hat_as));
649
650 /*
651 * If the hat is currently a stealing victim, wait for the stealing
652 * to finish. Once we mark it as HAT_FREEING, htable_steal()
653 * won't look at its pagetables anymore.
654 */
655 mutex_enter(&hat_list_lock);
656 while (hat->hat_flags & HAT_VICTIM)
657 cv_wait(&hat_list_cv, &hat_list_lock);
658 hat->hat_flags |= HAT_FREEING;
659 mutex_exit(&hat_list_lock);
660 }
661
662 /*
663 * An address space is being destroyed, so we destroy the associated hat.
664 */
665 void
hat_free_end(hat_t * hat)666 hat_free_end(hat_t *hat)
667 {
668 kmem_cache_t *cache;
669
670 ASSERT(hat->hat_flags & HAT_FREEING);
671
672 /*
673 * must not be running on the given hat
674 */
675 ASSERT(CPU->cpu_current_hat != hat);
676
677 /*
678 * Remove it from the list of HATs
679 */
680 mutex_enter(&hat_list_lock);
681 if (hat->hat_prev)
682 hat->hat_prev->hat_next = hat->hat_next;
683 else
684 kas.a_hat->hat_next = hat->hat_next;
685 if (hat->hat_next)
686 hat->hat_next->hat_prev = hat->hat_prev;
687 else
688 kas.a_hat->hat_prev = hat->hat_prev;
689 mutex_exit(&hat_list_lock);
690 hat->hat_next = hat->hat_prev = NULL;
691
692 #if defined(__xpv)
693 /*
694 * On the hypervisor, unpin top level page table(s)
695 */
696 VERIFY3U(hat->hat_flags & HAT_PCP, ==, 0);
697 xen_unpin(hat->hat_htable->ht_pfn);
698 xen_unpin(hat->hat_user_ptable);
699 #endif
700
701 /*
702 * Make a pass through the htables freeing them all up.
703 */
704 htable_purge_hat(hat);
705
706 /*
707 * Decide which kmem cache the hash table came from, then free it.
708 */
709 if (hat->hat_flags & HAT_COPIED) {
710 if (hat->hat_flags & HAT_COPIED_32) {
711 cache = hat32_hash_cache;
712 } else {
713 cache = hat_hash_cache;
714 }
715 } else {
716 cache = hat_hash_cache;
717 }
718 kmem_cache_free(cache, hat->hat_ht_hash);
719 hat->hat_ht_hash = NULL;
720
721 hat->hat_flags = 0;
722 hat->hat_max_level = 0;
723 hat->hat_num_copied = 0;
724 kmem_cache_free(hat_cache, hat);
725 }
726
727 /*
728 * round kernelbase down to a supported value to use for _userlimit
729 *
730 * userlimit must be aligned down to an entry in the top level htable.
731 * The one exception is for 32 bit HAT's running PAE.
732 */
733 uintptr_t
hat_kernelbase(uintptr_t va)734 hat_kernelbase(uintptr_t va)
735 {
736 if (IN_VA_HOLE(va))
737 panic("_userlimit %p will fall in VA hole\n", (void *)va);
738 return (va);
739 }
740
741 /*
742 *
743 */
744 static void
set_max_page_level()745 set_max_page_level()
746 {
747 level_t lvl;
748
749 if (!kbm_largepage_support) {
750 lvl = 0;
751 } else {
752 if (is_x86_feature(x86_featureset, X86FSET_1GPG)) {
753 lvl = 2;
754 if (chk_optimal_1gtlb &&
755 cpuid_opteron_erratum(CPU, 6671130)) {
756 lvl = 1;
757 }
758 if (plat_mnode_xcheck(LEVEL_SIZE(2) >>
759 LEVEL_SHIFT(0))) {
760 lvl = 1;
761 }
762 } else {
763 lvl = 1;
764 }
765 }
766 mmu.max_page_level = lvl;
767
768 if ((lvl == 2) && (enable_1gpg == 0))
769 mmu.umax_page_level = 1;
770 else
771 mmu.umax_page_level = lvl;
772 }
773
774 /*
775 * Determine the number of slots that are in used in the top-most level page
776 * table for user memory. This is based on _userlimit. In effect this is similar
777 * to htable_va2entry, but without the convenience of having an htable.
778 */
779 void
mmu_calc_user_slots(void)780 mmu_calc_user_slots(void)
781 {
782 uint_t ent, nptes;
783 uintptr_t shift;
784
785 nptes = mmu.top_level_count;
786 shift = _userlimit >> mmu.level_shift[mmu.max_level];
787 ent = shift & (nptes - 1);
788
789 /*
790 * Ent tells us the slot that the page for _userlimit would fit in. We
791 * need to add one to this to cover the total number of entries.
792 */
793 mmu.top_level_uslots = ent + 1;
794
795 /*
796 * When running 32-bit compatability processes on a 64-bit kernel, we
797 * will only need to use one slot.
798 */
799 mmu.top_level_uslots32 = 1;
800
801 /*
802 * Record the number of PCP page table entries that we'll need to copy
803 * around. For 64-bit processes this is the number of user slots. For
804 * 32-bit proceses, this is 4 1 GiB pages.
805 */
806 mmu.num_copied_ents = mmu.top_level_uslots;
807 mmu.num_copied_ents32 = 4;
808 }
809
810 /*
811 * Initialize hat data structures based on processor MMU information.
812 */
813 void
mmu_init(void)814 mmu_init(void)
815 {
816 uint_t max_htables;
817 uint_t pa_bits;
818 uint_t va_bits;
819 int i;
820
821 /*
822 * If CPU enabled the page table global bit, use it for the kernel
823 * This is bit 7 in CR4 (PGE - Page Global Enable).
824 */
825 if (is_x86_feature(x86_featureset, X86FSET_PGE) &&
826 (getcr4() & CR4_PGE) != 0)
827 mmu.pt_global = PT_GLOBAL;
828
829 #if !defined(__xpv)
830 /*
831 * The 64-bit x86 kernel has split user/kernel page tables. As such we
832 * cannot have the global bit set. The simplest way for us to deal with
833 * this is to just say that pt_global is zero, so the global bit isn't
834 * present.
835 */
836 if (kpti_enable == 1)
837 mmu.pt_global = 0;
838 #endif
839
840 /*
841 * Detect NX and PAE usage.
842 */
843 mmu.pae_hat = kbm_pae_support;
844 if (kbm_nx_support)
845 mmu.pt_nx = PT_NX;
846 else
847 mmu.pt_nx = 0;
848
849 /*
850 * Use CPU info to set various MMU parameters
851 */
852 cpuid_get_addrsize(CPU, &pa_bits, &va_bits);
853
854 /*
855 * Check if 5 level paging is on, we dont support that (yet).
856 * X86_64 processors that support 5 level paging report
857 * the number of va bits for 5 level paging even if
858 * not in 5 level paging mode. So we need
859 * to adjust va_bits to max for 4 level paging if not in 5 level mode.
860 */
861 if ((getcr4() & CR4_LA57) != 0)
862 panic("5 Level paging enabled but not yet supported");
863 else if (va_bits > MMU_MAX4LEVELVABITS)
864 va_bits = MMU_MAX4LEVELVABITS;
865
866 if (va_bits < sizeof (void *) * NBBY) {
867 mmu.hole_start = (1ul << (va_bits - 1));
868 mmu.hole_end = 0ul - mmu.hole_start - 1;
869 } else {
870 mmu.hole_end = 0;
871 mmu.hole_start = mmu.hole_end - 1;
872 }
873 #if defined(OPTERON_ERRATUM_121)
874 /*
875 * If erratum 121 has already been detected at this time, hole_start
876 * contains the value to be subtracted from mmu.hole_start.
877 */
878 ASSERT(hole_start == 0 || opteron_erratum_121 != 0);
879 hole_start = mmu.hole_start - hole_start;
880 #else
881 hole_start = mmu.hole_start;
882 #endif
883 hole_end = mmu.hole_end;
884
885 mmu.highest_pfn = mmu_btop((1ull << pa_bits) - 1);
886 if (mmu.pae_hat == 0 && pa_bits > 32)
887 mmu.highest_pfn = PFN_4G - 1;
888
889 if (mmu.pae_hat) {
890 mmu.pte_size = 8; /* 8 byte PTEs */
891 mmu.pte_size_shift = 3;
892 } else {
893 mmu.pte_size = 4; /* 4 byte PTEs */
894 mmu.pte_size_shift = 2;
895 }
896
897 if (mmu.pae_hat && !is_x86_feature(x86_featureset, X86FSET_PAE))
898 panic("Processor does not support PAE");
899
900 if (!is_x86_feature(x86_featureset, X86FSET_CX8))
901 panic("Processor does not support cmpxchg8b instruction");
902
903
904 mmu.num_level = 4;
905 mmu.max_level = 3;
906 mmu.ptes_per_table = 512;
907 mmu.top_level_count = 512;
908
909 /*
910 * 32-bit processes only use 1 GB ptes.
911 */
912 mmu.max_level32 = 2;
913
914 mmu.level_shift[0] = 12;
915 mmu.level_shift[1] = 21;
916 mmu.level_shift[2] = 30;
917 mmu.level_shift[3] = 39;
918
919
920 for (i = 0; i < mmu.num_level; ++i) {
921 mmu.level_size[i] = 1UL << mmu.level_shift[i];
922 mmu.level_offset[i] = mmu.level_size[i] - 1;
923 mmu.level_mask[i] = ~mmu.level_offset[i];
924 }
925
926 set_max_page_level();
927 mmu_calc_user_slots();
928
929 mmu_page_sizes = mmu.max_page_level + 1;
930 mmu_exported_page_sizes = mmu.umax_page_level + 1;
931
932 /* restrict legacy applications from using pagesizes 1g and above */
933 mmu_legacy_page_sizes =
934 (mmu_exported_page_sizes > 2) ? 2 : mmu_exported_page_sizes;
935
936
937 for (i = 0; i <= mmu.max_page_level; ++i) {
938 mmu.pte_bits[i] = PT_VALID | pt_kern;
939 if (i > 0)
940 mmu.pte_bits[i] |= PT_PAGESIZE;
941 }
942
943 /*
944 * NOTE Legacy 32 bit PAE mode only has the P_VALID bit at top level.
945 */
946 for (i = 1; i < mmu.num_level; ++i)
947 mmu.ptp_bits[i] = PT_PTPBITS;
948
949 /*
950 * Compute how many hash table entries to have per process for htables.
951 * We start with 1 page's worth of entries.
952 *
953 * If physical memory is small, reduce the amount need to cover it.
954 */
955 max_htables = physmax / mmu.ptes_per_table;
956 mmu.hash_cnt = MMU_PAGESIZE / sizeof (htable_t *);
957 while (mmu.hash_cnt > 16 && mmu.hash_cnt >= max_htables)
958 mmu.hash_cnt >>= 1;
959 mmu.hat32_hash_cnt = mmu.hash_cnt;
960
961 /*
962 * If running in 64 bits and physical memory is large,
963 * increase the size of the cache to cover all of memory for
964 * a 64 bit process.
965 */
966 #define HASH_MAX_LENGTH 4
967 while (mmu.hash_cnt * HASH_MAX_LENGTH < max_htables)
968 mmu.hash_cnt <<= 1;
969 }
970
971
972 /*
973 * initialize hat data structures
974 */
975 void
hat_init()976 hat_init()
977 {
978 cv_init(&hat_list_cv, NULL, CV_DEFAULT, NULL);
979
980 /*
981 * initialize kmem caches
982 */
983 htable_init();
984 hment_init();
985
986 hat_cache = kmem_cache_create("hat_t",
987 sizeof (hat_t), 0, hati_constructor, NULL, NULL,
988 NULL, 0, 0);
989
990 hat_hash_cache = kmem_cache_create("HatHash",
991 mmu.hash_cnt * sizeof (htable_t *), 0, NULL, NULL, NULL,
992 NULL, 0, 0);
993
994 /*
995 * 32-bit PCP hats can use a smaller hash table size on large memory
996 * machines
997 */
998 if (mmu.hash_cnt == mmu.hat32_hash_cnt) {
999 hat32_hash_cache = hat_hash_cache;
1000 } else {
1001 hat32_hash_cache = kmem_cache_create("Hat32Hash",
1002 mmu.hat32_hash_cnt * sizeof (htable_t *), 0, NULL, NULL,
1003 NULL, NULL, 0, 0);
1004 }
1005
1006 /*
1007 * Set up the kernel's hat
1008 */
1009 AS_LOCK_ENTER(&kas, RW_WRITER);
1010 kas.a_hat = kmem_cache_alloc(hat_cache, KM_NOSLEEP);
1011 mutex_init(&kas.a_hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
1012 kas.a_hat->hat_as = &kas;
1013 kas.a_hat->hat_flags = 0;
1014 AS_LOCK_EXIT(&kas);
1015
1016 CPUSET_ZERO(khat_cpuset);
1017 CPUSET_ADD(khat_cpuset, CPU->cpu_id);
1018
1019 /*
1020 * The kernel HAT doesn't use PCP regardless of architectures.
1021 */
1022 ASSERT3U(mmu.max_level, >, 0);
1023 kas.a_hat->hat_max_level = mmu.max_level;
1024 kas.a_hat->hat_num_copied = 0;
1025
1026 /*
1027 * The kernel hat's next pointer serves as the head of the hat list .
1028 * The kernel hat's prev pointer tracks the last hat on the list for
1029 * htable_steal() to use.
1030 */
1031 kas.a_hat->hat_next = NULL;
1032 kas.a_hat->hat_prev = NULL;
1033
1034 /*
1035 * Allocate an htable hash bucket for the kernel
1036 * XX64 - tune for 64 bit procs
1037 */
1038 kas.a_hat->hat_num_hash = mmu.hash_cnt;
1039 kas.a_hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_NOSLEEP);
1040 bzero(kas.a_hat->hat_ht_hash, mmu.hash_cnt * sizeof (htable_t *));
1041
1042 /*
1043 * zero out the top level and cached htable pointers
1044 */
1045 kas.a_hat->hat_ht_cached = NULL;
1046 kas.a_hat->hat_htable = NULL;
1047
1048 /*
1049 * Pre-allocate hrm_hashtab before enabling the collection of
1050 * refmod statistics. Allocating on the fly would mean us
1051 * running the risk of suffering recursive mutex enters or
1052 * deadlocks.
1053 */
1054 hrm_hashtab = kmem_zalloc(HRM_HASHSIZE * sizeof (struct hrmstat *),
1055 KM_SLEEP);
1056 }
1057
1058
1059 extern void kpti_tramp_start();
1060 extern void kpti_tramp_end();
1061
1062 extern void kdi_isr_start();
1063 extern void kdi_isr_end();
1064
1065 extern gate_desc_t kdi_idt[NIDT];
1066
1067 /*
1068 * Prepare per-CPU pagetables for all processes on the 64 bit kernel.
1069 *
1070 * Each CPU has a set of 2 pagetables that are reused for any 32 bit
1071 * process it runs. They are the top level pagetable, hci_pcp_l3ptes, and
1072 * the next to top level table for the bottom 512 Gig, hci_pcp_l2ptes.
1073 */
1074 /*ARGSUSED*/
1075 static void
hat_pcp_setup(struct cpu * cpu)1076 hat_pcp_setup(struct cpu *cpu)
1077 {
1078 #if !defined(__xpv)
1079 struct hat_cpu_info *hci = cpu->cpu_hat_info;
1080 uintptr_t va;
1081 size_t len;
1082
1083 /*
1084 * allocate the level==2 page table for the bottom most
1085 * 512Gig of address space (this is where 32 bit apps live)
1086 */
1087 ASSERT(hci != NULL);
1088 hci->hci_pcp_l2ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
1089
1090 /*
1091 * Allocate a top level pagetable and copy the kernel's
1092 * entries into it. Then link in hci_pcp_l2ptes in the 1st entry.
1093 */
1094 hci->hci_pcp_l3ptes = kmem_zalloc(MMU_PAGESIZE, KM_SLEEP);
1095 hci->hci_pcp_l3pfn =
1096 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_pcp_l3ptes);
1097 ASSERT3U(hci->hci_pcp_l3pfn, !=, PFN_INVALID);
1098 bcopy(pcp_page, hci->hci_pcp_l3ptes, MMU_PAGESIZE);
1099
1100 hci->hci_pcp_l2pfn =
1101 hat_getpfnum(kas.a_hat, (caddr_t)hci->hci_pcp_l2ptes);
1102 ASSERT3U(hci->hci_pcp_l2pfn, !=, PFN_INVALID);
1103
1104 /*
1105 * Now go through and allocate the user version of these structures.
1106 * Unlike with the kernel version, we allocate a hat to represent the
1107 * top-level page table as that will make it much simpler when we need
1108 * to patch through user entries.
1109 */
1110 hci->hci_user_hat = hat_cpu_alloc(cpu);
1111 hci->hci_user_l3pfn = hci->hci_user_hat->hat_htable->ht_pfn;
1112 ASSERT3U(hci->hci_user_l3pfn, !=, PFN_INVALID);
1113 hci->hci_user_l3ptes =
1114 (x86pte_t *)hat_kpm_mapin_pfn(hci->hci_user_l3pfn);
1115
1116 /* Skip the rest of this if KPTI is switched off at boot. */
1117 if (kpti_enable != 1)
1118 return;
1119
1120 /*
1121 * OK, now that we have this we need to go through and punch the normal
1122 * holes in the CPU's hat for this. At this point we'll punch in the
1123 * following:
1124 *
1125 * o GDT
1126 * o IDT
1127 * o LDT
1128 * o Trampoline Code
1129 * o machcpu KPTI page
1130 * o kmdb ISR code page (just trampolines)
1131 *
1132 * If this is cpu0, then we also can initialize the following because
1133 * they'll have already been allocated.
1134 *
1135 * o TSS for CPU 0
1136 * o Double Fault for CPU 0
1137 *
1138 * The following items have yet to be allocated and have not been
1139 * punched in yet. They will be punched in later:
1140 *
1141 * o TSS (mach_cpucontext_alloc_tables())
1142 * o Double Fault Stack (mach_cpucontext_alloc_tables())
1143 */
1144 hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_gdt, PROT_READ);
1145 hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_idt, PROT_READ);
1146
1147 /*
1148 * As the KDI IDT is only active during kmdb sessions (including single
1149 * stepping), typically we don't actually need this punched in (we
1150 * consider the routines that switch to the user cr3 to be toxic). But
1151 * if we ever accidentally end up on the user cr3 while on this IDT,
1152 * we'd prefer not to triple fault.
1153 */
1154 hati_cpu_punchin(cpu, (uintptr_t)&kdi_idt, PROT_READ);
1155
1156 VERIFY0((uintptr_t)&kpti_tramp_start % MMU_PAGESIZE);
1157 VERIFY0((uintptr_t)&kpti_tramp_end % MMU_PAGESIZE);
1158 for (va = (uintptr_t)&kpti_tramp_start;
1159 va < (uintptr_t)&kpti_tramp_end; va += MMU_PAGESIZE) {
1160 hati_cpu_punchin(cpu, va, PROT_READ | PROT_EXEC);
1161 }
1162
1163 VERIFY3U(((uintptr_t)cpu->cpu_m.mcpu_ldt) % MMU_PAGESIZE, ==, 0);
1164 for (va = (uintptr_t)cpu->cpu_m.mcpu_ldt, len = LDT_CPU_SIZE;
1165 len >= MMU_PAGESIZE; va += MMU_PAGESIZE, len -= MMU_PAGESIZE) {
1166 hati_cpu_punchin(cpu, va, PROT_READ);
1167 }
1168
1169 /* mcpu_pad2 is the start of the page containing the kpti_frames. */
1170 hati_cpu_punchin(cpu, (uintptr_t)&cpu->cpu_m.mcpu_pad2[0],
1171 PROT_READ | PROT_WRITE);
1172
1173 if (cpu == &cpus[0]) {
1174 /*
1175 * CPU0 uses a global for its double fault stack to deal with
1176 * the chicken and egg problem. We need to punch it into its
1177 * user HAT.
1178 */
1179 extern char dblfault_stack0[];
1180
1181 hati_cpu_punchin(cpu, (uintptr_t)cpu->cpu_m.mcpu_tss,
1182 PROT_READ);
1183
1184 for (va = (uintptr_t)dblfault_stack0,
1185 len = DEFAULTSTKSZ; len >= MMU_PAGESIZE;
1186 va += MMU_PAGESIZE, len -= MMU_PAGESIZE) {
1187 hati_cpu_punchin(cpu, va, PROT_READ | PROT_WRITE);
1188 }
1189 }
1190
1191 VERIFY0((uintptr_t)&kdi_isr_start % MMU_PAGESIZE);
1192 VERIFY0((uintptr_t)&kdi_isr_end % MMU_PAGESIZE);
1193 for (va = (uintptr_t)&kdi_isr_start;
1194 va < (uintptr_t)&kdi_isr_end; va += MMU_PAGESIZE) {
1195 hati_cpu_punchin(cpu, va, PROT_READ | PROT_EXEC);
1196 }
1197 #endif /* !__xpv */
1198 }
1199
1200 /*ARGSUSED*/
1201 static void
hat_pcp_teardown(cpu_t * cpu)1202 hat_pcp_teardown(cpu_t *cpu)
1203 {
1204 #if !defined(__xpv)
1205 struct hat_cpu_info *hci;
1206
1207 if ((hci = cpu->cpu_hat_info) == NULL)
1208 return;
1209 if (hci->hci_pcp_l2ptes != NULL)
1210 kmem_free(hci->hci_pcp_l2ptes, MMU_PAGESIZE);
1211 if (hci->hci_pcp_l3ptes != NULL)
1212 kmem_free(hci->hci_pcp_l3ptes, MMU_PAGESIZE);
1213 if (hci->hci_user_hat != NULL) {
1214 hat_free_start(hci->hci_user_hat);
1215 hat_free_end(hci->hci_user_hat);
1216 }
1217 #endif
1218 }
1219
1220 #define NEXT_HKR(r, l, s, e) { \
1221 kernel_ranges[r].hkr_level = l; \
1222 kernel_ranges[r].hkr_start_va = s; \
1223 kernel_ranges[r].hkr_end_va = e; \
1224 ++r; \
1225 }
1226
1227 /*
1228 * Finish filling in the kernel hat.
1229 * Pre fill in all top level kernel page table entries for the kernel's
1230 * part of the address range. From this point on we can't use any new
1231 * kernel large pages if they need PTE's at max_level
1232 *
1233 * create the kmap mappings.
1234 */
1235 void
hat_init_finish(void)1236 hat_init_finish(void)
1237 {
1238 size_t size;
1239 uint_t r = 0;
1240 uintptr_t va;
1241 hat_kernel_range_t *rp;
1242
1243
1244 /*
1245 * We are now effectively running on the kernel hat.
1246 * Clearing use_boot_reserve shuts off using the pre-allocated boot
1247 * reserve for all HAT allocations. From here on, the reserves are
1248 * only used when avoiding recursion in kmem_alloc().
1249 */
1250 use_boot_reserve = 0;
1251 htable_adjust_reserve();
1252
1253 /*
1254 * User HATs are initialized with copies of all kernel mappings in
1255 * higher level page tables. Ensure that those entries exist.
1256 */
1257
1258 NEXT_HKR(r, 3, kernelbase, 0);
1259 #if defined(__xpv)
1260 NEXT_HKR(r, 3, HYPERVISOR_VIRT_START, HYPERVISOR_VIRT_END);
1261 #endif
1262
1263 num_kernel_ranges = r;
1264
1265 /*
1266 * Create all the kernel pagetables that will have entries
1267 * shared to user HATs.
1268 */
1269 for (r = 0; r < num_kernel_ranges; ++r) {
1270 rp = &kernel_ranges[r];
1271 for (va = rp->hkr_start_va; va != rp->hkr_end_va;
1272 va += LEVEL_SIZE(rp->hkr_level)) {
1273 htable_t *ht;
1274
1275 if (IN_HYPERVISOR_VA(va))
1276 continue;
1277
1278 /* can/must skip if a page mapping already exists */
1279 if (rp->hkr_level <= mmu.max_page_level &&
1280 (ht = htable_getpage(kas.a_hat, va, NULL)) !=
1281 NULL) {
1282 htable_release(ht);
1283 continue;
1284 }
1285
1286 (void) htable_create(kas.a_hat, va, rp->hkr_level - 1,
1287 NULL);
1288 }
1289 }
1290
1291 /*
1292 * 32 bit PAE metal kernels use only 4 of the 512 entries in the
1293 * page holding the top level pagetable. We use the remainder for
1294 * the "per CPU" page tables for PCP processes.
1295 * Map the top level kernel pagetable into the kernel to make
1296 * it easy to use bcopy access these tables.
1297 *
1298 * PAE is required for the 64-bit kernel which uses this as well to
1299 * perform the per-CPU pagetables. See the big theory statement.
1300 */
1301 if (mmu.pae_hat) {
1302 pcp_page = vmem_alloc(heap_arena, MMU_PAGESIZE, VM_SLEEP);
1303 hat_devload(kas.a_hat, (caddr_t)pcp_page, MMU_PAGESIZE,
1304 kas.a_hat->hat_htable->ht_pfn,
1305 #if !defined(__xpv)
1306 PROT_WRITE |
1307 #endif
1308 PROT_READ | HAT_NOSYNC | HAT_UNORDERED_OK,
1309 HAT_LOAD | HAT_LOAD_NOCONSIST);
1310 }
1311 hat_pcp_setup(CPU);
1312
1313 /*
1314 * Create kmap (cached mappings of kernel PTEs)
1315 * for 32 bit we map from segmap_start .. ekernelheap
1316 * for 64 bit we map from segmap_start .. segmap_start + segmapsize;
1317 */
1318 size = segmapsize;
1319 hat_kmap_init((uintptr_t)segmap_start, size);
1320
1321 #if !defined(__xpv)
1322 ASSERT3U(kas.a_hat->hat_htable->ht_pfn, !=, PFN_INVALID);
1323 ASSERT3U(kpti_safe_cr3, ==,
1324 MAKECR3(kas.a_hat->hat_htable->ht_pfn, PCID_KERNEL));
1325 #endif
1326 }
1327
1328 /*
1329 * Update the PCP data on the CPU cpu to the one on the hat. If this is a 32-bit
1330 * process, then we must update the L2 pages and then the L3. If this is a
1331 * 64-bit process then we must update the L3 entries.
1332 */
1333 static void
hat_pcp_update(cpu_t * cpu,const hat_t * hat)1334 hat_pcp_update(cpu_t *cpu, const hat_t *hat)
1335 {
1336 ASSERT3U(hat->hat_flags & HAT_COPIED, !=, 0);
1337
1338 if ((hat->hat_flags & HAT_COPIED_32) != 0) {
1339 const x86pte_t *l2src;
1340 x86pte_t *l2dst, *l3ptes, *l3uptes;
1341 /*
1342 * This is a 32-bit process. To set this up, we need to do the
1343 * following:
1344 *
1345 * - Copy the 4 L2 PTEs into the dedicated L2 table
1346 * - Zero the user L3 PTEs in the user and kernel page table
1347 * - Set the first L3 PTE to point to the CPU L2 table
1348 */
1349 l2src = hat->hat_copied_ptes;
1350 l2dst = cpu->cpu_hat_info->hci_pcp_l2ptes;
1351 l3ptes = cpu->cpu_hat_info->hci_pcp_l3ptes;
1352 l3uptes = cpu->cpu_hat_info->hci_user_l3ptes;
1353
1354 l2dst[0] = l2src[0];
1355 l2dst[1] = l2src[1];
1356 l2dst[2] = l2src[2];
1357 l2dst[3] = l2src[3];
1358
1359 /*
1360 * Make sure to use the mmu to get the number of slots. The
1361 * number of PLP entries that this has will always be less as
1362 * it's a 32-bit process.
1363 */
1364 bzero(l3ptes, sizeof (x86pte_t) * mmu.top_level_uslots);
1365 l3ptes[0] = MAKEPTP(cpu->cpu_hat_info->hci_pcp_l2pfn, 2);
1366 bzero(l3uptes, sizeof (x86pte_t) * mmu.top_level_uslots);
1367 l3uptes[0] = MAKEPTP(cpu->cpu_hat_info->hci_pcp_l2pfn, 2);
1368 } else {
1369 /*
1370 * This is a 64-bit process. To set this up, we need to do the
1371 * following:
1372 *
1373 * - Zero the 4 L2 PTEs in the CPU structure for safety
1374 * - Copy over the new user L3 PTEs into the kernel page table
1375 * - Copy over the new user L3 PTEs into the user page table
1376 */
1377 ASSERT3S(kpti_enable, ==, 1);
1378 bzero(cpu->cpu_hat_info->hci_pcp_l2ptes, sizeof (x86pte_t) * 4);
1379 bcopy(hat->hat_copied_ptes, cpu->cpu_hat_info->hci_pcp_l3ptes,
1380 sizeof (x86pte_t) * mmu.top_level_uslots);
1381 bcopy(hat->hat_copied_ptes, cpu->cpu_hat_info->hci_user_l3ptes,
1382 sizeof (x86pte_t) * mmu.top_level_uslots);
1383 }
1384 }
1385
1386 static void
reset_kpti(struct kpti_frame * fr,uint64_t kcr3,uint64_t ucr3)1387 reset_kpti(struct kpti_frame *fr, uint64_t kcr3, uint64_t ucr3)
1388 {
1389 ASSERT3U(fr->kf_tr_flag, ==, 0);
1390 #if DEBUG
1391 if (fr->kf_kernel_cr3 != 0) {
1392 ASSERT3U(fr->kf_lower_redzone, ==, 0xdeadbeefdeadbeef);
1393 ASSERT3U(fr->kf_middle_redzone, ==, 0xdeadbeefdeadbeef);
1394 ASSERT3U(fr->kf_upper_redzone, ==, 0xdeadbeefdeadbeef);
1395 }
1396 #endif
1397
1398 bzero(fr, offsetof(struct kpti_frame, kf_kernel_cr3));
1399 bzero(&fr->kf_unused, sizeof (struct kpti_frame) -
1400 offsetof(struct kpti_frame, kf_unused));
1401
1402 fr->kf_kernel_cr3 = kcr3;
1403 fr->kf_user_cr3 = ucr3;
1404 fr->kf_tr_ret_rsp = (uintptr_t)&fr->kf_tr_rsp;
1405
1406 fr->kf_lower_redzone = 0xdeadbeefdeadbeef;
1407 fr->kf_middle_redzone = 0xdeadbeefdeadbeef;
1408 fr->kf_upper_redzone = 0xdeadbeefdeadbeef;
1409 }
1410
1411 #ifdef __xpv
1412 static void
hat_switch_xen(hat_t * hat)1413 hat_switch_xen(hat_t *hat)
1414 {
1415 struct mmuext_op t[2];
1416 uint_t retcnt;
1417 uint_t opcnt = 1;
1418 uint64_t newcr3;
1419
1420 ASSERT(!(hat->hat_flags & HAT_COPIED));
1421 ASSERT(!(getcr4() & CR4_PCIDE));
1422
1423 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn, PCID_NONE);
1424
1425 t[0].cmd = MMUEXT_NEW_BASEPTR;
1426 t[0].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1427
1428 /*
1429 * There's an interesting problem here, as to what to actually specify
1430 * when switching to the kernel hat. For now we'll reuse the kernel hat
1431 * again.
1432 */
1433 t[1].cmd = MMUEXT_NEW_USER_BASEPTR;
1434 if (hat == kas.a_hat)
1435 t[1].arg1.mfn = mmu_btop(pa_to_ma(newcr3));
1436 else
1437 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1438 ++opcnt;
1439
1440 if (HYPERVISOR_mmuext_op(t, opcnt, &retcnt, DOMID_SELF) < 0)
1441 panic("HYPERVISOR_mmu_update() failed");
1442 ASSERT(retcnt == opcnt);
1443 }
1444 #endif /* __xpv */
1445
1446 /*
1447 * Switch to a new active hat, maintaining bit masks to track active CPUs.
1448 *
1449 * With KPTI, all our HATs except kas should be using PCP. Thus, to switch
1450 * HATs, we need to copy over the new user PTEs, then set our trampoline context
1451 * as appropriate.
1452 *
1453 * If lacking PCID, we then load our new cr3, which will flush the TLB: we may
1454 * have established userspace TLB entries via kernel accesses, and these are no
1455 * longer valid. We have to do this eagerly, as we just deleted this CPU from
1456 * ->hat_cpus, so would no longer see any TLB shootdowns.
1457 *
1458 * With PCID enabled, things get a little more complicated. We would like to
1459 * keep TLB context around when entering and exiting the kernel, and to do this,
1460 * we partition the TLB into two different spaces:
1461 *
1462 * PCID_KERNEL is defined as zero, and used both by kas and all other address
1463 * spaces while in the kernel (post-trampoline).
1464 *
1465 * PCID_USER is used while in userspace. Therefore, userspace cannot use any
1466 * lingering PCID_KERNEL entries to kernel addresses it should not be able to
1467 * read.
1468 *
1469 * The trampoline cr3s are set not to invalidate on a mov to %cr3. This means if
1470 * we take a journey through the kernel without switching HATs, we have some
1471 * hope of keeping our TLB state around.
1472 *
1473 * On a hat switch, rather than deal with any necessary flushes on the way out
1474 * of the trampolines, we do them upfront here. If we're switching from kas, we
1475 * shouldn't need any invalidation.
1476 *
1477 * Otherwise, we can have stale userspace entries for both PCID_USER (what
1478 * happened before we move onto the kcr3) and PCID_KERNEL (any subsequent
1479 * userspace accesses such as ddi_copyin()). Since setcr3() won't do these
1480 * flushes on its own in PCIDE, we'll do a non-flushing load and then
1481 * invalidate everything.
1482 */
1483 void
hat_switch(hat_t * hat)1484 hat_switch(hat_t *hat)
1485 {
1486 cpu_t *cpu = CPU;
1487 hat_t *old = cpu->cpu_current_hat;
1488
1489 /*
1490 * set up this information first, so we don't miss any cross calls
1491 */
1492 if (old != NULL) {
1493 if (old == hat)
1494 return;
1495 if (old != kas.a_hat)
1496 CPUSET_ATOMIC_DEL(old->hat_cpus, cpu->cpu_id);
1497 }
1498
1499 /*
1500 * Add this CPU to the active set for this HAT.
1501 */
1502 if (hat != kas.a_hat) {
1503 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1504 }
1505 cpu->cpu_current_hat = hat;
1506
1507 #if defined(__xpv)
1508 hat_switch_xen(hat);
1509 #else
1510 struct hat_cpu_info *info = cpu->cpu_m.mcpu_hat_info;
1511 uint64_t pcide = getcr4() & CR4_PCIDE;
1512 uint64_t kcr3, ucr3;
1513 pfn_t tl_kpfn;
1514 ulong_t flag;
1515
1516 EQUIV(kpti_enable, !mmu.pt_global);
1517
1518 if (hat->hat_flags & HAT_COPIED) {
1519 hat_pcp_update(cpu, hat);
1520 tl_kpfn = info->hci_pcp_l3pfn;
1521 } else {
1522 IMPLY(kpti_enable, hat == kas.a_hat);
1523 tl_kpfn = hat->hat_htable->ht_pfn;
1524 }
1525
1526 if (pcide) {
1527 ASSERT(kpti_enable);
1528
1529 kcr3 = MAKECR3(tl_kpfn, PCID_KERNEL) | CR3_NOINVL_BIT;
1530 ucr3 = MAKECR3(info->hci_user_l3pfn, PCID_USER) |
1531 CR3_NOINVL_BIT;
1532
1533 setcr3(kcr3);
1534 if (old != kas.a_hat)
1535 mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
1536 } else {
1537 kcr3 = MAKECR3(tl_kpfn, PCID_NONE);
1538 ucr3 = kpti_enable ?
1539 MAKECR3(info->hci_user_l3pfn, PCID_NONE) :
1540 0;
1541
1542 setcr3(kcr3);
1543 }
1544
1545 /*
1546 * We will already be taking shootdowns for our new HAT, and as KPTI
1547 * invpcid emulation needs to use kf_user_cr3, make sure we don't get
1548 * any cross calls while we're inconsistent. Note that it's harmless to
1549 * have a *stale* kf_user_cr3 (we just did a FLUSH_TLB_ALL), but a
1550 * *zero* kf_user_cr3 is not going to go very well.
1551 */
1552 if (pcide)
1553 flag = intr_clear();
1554
1555 reset_kpti(&cpu->cpu_m.mcpu_kpti, kcr3, ucr3);
1556 reset_kpti(&cpu->cpu_m.mcpu_kpti_flt, kcr3, ucr3);
1557 reset_kpti(&cpu->cpu_m.mcpu_kpti_dbg, kcr3, ucr3);
1558
1559 if (pcide)
1560 intr_restore(flag);
1561
1562 #endif /* !__xpv */
1563
1564 ASSERT(cpu == CPU);
1565 }
1566
1567 /*
1568 * Utility to return a valid x86pte_t from protections, pfn, and level number
1569 */
1570 static x86pte_t
hati_mkpte(pfn_t pfn,uint_t attr,level_t level,uint_t flags)1571 hati_mkpte(pfn_t pfn, uint_t attr, level_t level, uint_t flags)
1572 {
1573 x86pte_t pte;
1574 uint_t cache_attr = attr & HAT_ORDER_MASK;
1575
1576 pte = MAKEPTE(pfn, level);
1577
1578 if (attr & PROT_WRITE)
1579 PTE_SET(pte, PT_WRITABLE);
1580
1581 if (attr & PROT_USER)
1582 PTE_SET(pte, PT_USER);
1583
1584 if (!(attr & PROT_EXEC))
1585 PTE_SET(pte, mmu.pt_nx);
1586
1587 /*
1588 * Set the software bits used track ref/mod sync's and hments.
1589 * If not using REF/MOD, set them to avoid h/w rewriting PTEs.
1590 */
1591 if (flags & HAT_LOAD_NOCONSIST)
1592 PTE_SET(pte, PT_NOCONSIST | PT_REF | PT_MOD);
1593 else if (attr & HAT_NOSYNC)
1594 PTE_SET(pte, PT_NOSYNC | PT_REF | PT_MOD);
1595
1596 /*
1597 * Set the caching attributes in the PTE. The combination
1598 * of attributes are poorly defined, so we pay attention
1599 * to them in the given order.
1600 *
1601 * The test for HAT_STRICTORDER is different because it's defined
1602 * as "0" - which was a stupid thing to do, but is too late to change!
1603 */
1604 if (cache_attr == HAT_STRICTORDER) {
1605 PTE_SET(pte, PT_NOCACHE);
1606 /*LINTED [Lint hates empty ifs, but it's the obvious way to do this] */
1607 } else if (cache_attr & (HAT_UNORDERED_OK | HAT_STORECACHING_OK)) {
1608 /* nothing to set */;
1609 } else if (cache_attr & (HAT_MERGING_OK | HAT_LOADCACHING_OK)) {
1610 PTE_SET(pte, PT_NOCACHE);
1611 if (is_x86_feature(x86_featureset, X86FSET_PAT))
1612 PTE_SET(pte, (level == 0) ? PT_PAT_4K : PT_PAT_LARGE);
1613 else
1614 PTE_SET(pte, PT_WRITETHRU);
1615 } else {
1616 panic("hati_mkpte(): bad caching attributes: %x\n", cache_attr);
1617 }
1618
1619 return (pte);
1620 }
1621
1622 /*
1623 * Duplicate address translations of the parent to the child.
1624 * This function really isn't used anymore.
1625 */
1626 /*ARGSUSED*/
1627 int
hat_dup(hat_t * old,hat_t * new,caddr_t addr,size_t len,uint_t flag)1628 hat_dup(hat_t *old, hat_t *new, caddr_t addr, size_t len, uint_t flag)
1629 {
1630 ASSERT((uintptr_t)addr < kernelbase);
1631 ASSERT(new != kas.a_hat);
1632 ASSERT(old != kas.a_hat);
1633 return (0);
1634 }
1635
1636 /*
1637 * Allocate any hat resources required for a process being swapped in.
1638 */
1639 /*ARGSUSED*/
1640 void
hat_swapin(hat_t * hat)1641 hat_swapin(hat_t *hat)
1642 {
1643 /* do nothing - we let everything fault back in */
1644 }
1645
1646 /*
1647 * Unload all translations associated with an address space of a process
1648 * that is being swapped out.
1649 */
1650 void
hat_swapout(hat_t * hat)1651 hat_swapout(hat_t *hat)
1652 {
1653 uintptr_t vaddr = (uintptr_t)0;
1654 uintptr_t eaddr = _userlimit;
1655 htable_t *ht = NULL;
1656 level_t l;
1657
1658 XPV_DISALLOW_MIGRATE();
1659 /*
1660 * We can't just call hat_unload(hat, 0, _userlimit...) here, because
1661 * seg_spt and shared pagetables can't be swapped out.
1662 * Take a look at segspt_shmswapout() - it's a big no-op.
1663 *
1664 * Instead we'll walk through all the address space and unload
1665 * any mappings which we are sure are not shared, not locked.
1666 */
1667 ASSERT(IS_PAGEALIGNED(vaddr));
1668 ASSERT(IS_PAGEALIGNED(eaddr));
1669 ASSERT(AS_LOCK_HELD(hat->hat_as));
1670 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1671 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1672
1673 while (vaddr < eaddr) {
1674 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1675 if (ht == NULL)
1676 break;
1677
1678 ASSERT(!IN_VA_HOLE(vaddr));
1679
1680 /*
1681 * If the page table is shared skip its entire range.
1682 */
1683 l = ht->ht_level;
1684 if (ht->ht_flags & HTABLE_SHARED_PFN) {
1685 vaddr = ht->ht_vaddr + LEVEL_SIZE(l + 1);
1686 htable_release(ht);
1687 ht = NULL;
1688 continue;
1689 }
1690
1691 /*
1692 * If the page table has no locked entries, unload this one.
1693 */
1694 if (ht->ht_lock_cnt == 0)
1695 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1696 HAT_UNLOAD_UNMAP);
1697
1698 /*
1699 * If we have a level 0 page table with locked entries,
1700 * skip the entire page table, otherwise skip just one entry.
1701 */
1702 if (ht->ht_lock_cnt > 0 && l == 0)
1703 vaddr = ht->ht_vaddr + LEVEL_SIZE(1);
1704 else
1705 vaddr += LEVEL_SIZE(l);
1706 }
1707 if (ht)
1708 htable_release(ht);
1709
1710 /*
1711 * We're in swapout because the system is low on memory, so
1712 * go back and flush all the htables off the cached list.
1713 */
1714 htable_purge_hat(hat);
1715 XPV_ALLOW_MIGRATE();
1716 }
1717
1718 /*
1719 * returns number of bytes that have valid mappings in hat.
1720 */
1721 size_t
hat_get_mapped_size(hat_t * hat)1722 hat_get_mapped_size(hat_t *hat)
1723 {
1724 size_t total = 0;
1725 int l;
1726
1727 for (l = 0; l <= mmu.max_page_level; l++)
1728 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1729 total += hat->hat_ism_pgcnt;
1730
1731 return (total);
1732 }
1733
1734 /*
1735 * enable/disable collection of stats for hat.
1736 */
1737 int
hat_stats_enable(hat_t * hat)1738 hat_stats_enable(hat_t *hat)
1739 {
1740 atomic_inc_32(&hat->hat_stats);
1741 return (1);
1742 }
1743
1744 void
hat_stats_disable(hat_t * hat)1745 hat_stats_disable(hat_t *hat)
1746 {
1747 atomic_dec_32(&hat->hat_stats);
1748 }
1749
1750 /*
1751 * Utility to sync the ref/mod bits from a page table entry to the page_t
1752 * We must be holding the mapping list lock when this is called.
1753 */
1754 static void
hati_sync_pte_to_page(page_t * pp,x86pte_t pte,level_t level)1755 hati_sync_pte_to_page(page_t *pp, x86pte_t pte, level_t level)
1756 {
1757 uint_t rm = 0;
1758 pgcnt_t pgcnt;
1759
1760 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
1761 return;
1762
1763 if (PTE_GET(pte, PT_REF))
1764 rm |= P_REF;
1765
1766 if (PTE_GET(pte, PT_MOD))
1767 rm |= P_MOD;
1768
1769 if (rm == 0)
1770 return;
1771
1772 /*
1773 * sync to all constituent pages of a large page
1774 */
1775 ASSERT(x86_hm_held(pp));
1776 pgcnt = page_get_pagecnt(level);
1777 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
1778 for (; pgcnt > 0; --pgcnt) {
1779 /*
1780 * hat_page_demote() can't decrease
1781 * pszc below this mapping size
1782 * since this large mapping existed after we
1783 * took mlist lock.
1784 */
1785 ASSERT(pp->p_szc >= level);
1786 hat_page_setattr(pp, rm);
1787 ++pp;
1788 }
1789 }
1790
1791 /*
1792 * This the set of PTE bits for PFN, permissions and caching
1793 * that are allowed to change on a HAT_LOAD_REMAP
1794 */
1795 #define PT_REMAP_BITS \
1796 (PT_PADDR | PT_NX | PT_WRITABLE | PT_WRITETHRU | \
1797 PT_NOCACHE | PT_PAT_4K | PT_PAT_LARGE | PT_IGNORE | PT_REF | PT_MOD)
1798
1799 #define REMAPASSERT(EX) if (!(EX)) panic("hati_pte_map: " #EX)
1800 /*
1801 * Do the low-level work to get a mapping entered into a HAT's pagetables
1802 * and in the mapping list of the associated page_t.
1803 */
1804 static int
hati_pte_map(htable_t * ht,uint_t entry,page_t * pp,x86pte_t pte,int flags,void * pte_ptr)1805 hati_pte_map(
1806 htable_t *ht,
1807 uint_t entry,
1808 page_t *pp,
1809 x86pte_t pte,
1810 int flags,
1811 void *pte_ptr)
1812 {
1813 hat_t *hat = ht->ht_hat;
1814 x86pte_t old_pte;
1815 level_t l = ht->ht_level;
1816 hment_t *hm;
1817 uint_t is_consist;
1818 uint_t is_locked;
1819 int rv = 0;
1820
1821 /*
1822 * Is this a consistent (ie. need mapping list lock) mapping?
1823 */
1824 is_consist = (pp != NULL && (flags & HAT_LOAD_NOCONSIST) == 0);
1825
1826 /*
1827 * Track locked mapping count in the htable. Do this first,
1828 * as we track locking even if there already is a mapping present.
1829 */
1830 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1831 if (is_locked)
1832 HTABLE_LOCK_INC(ht);
1833
1834 /*
1835 * Acquire the page's mapping list lock and get an hment to use.
1836 * Note that hment_prepare() might return NULL.
1837 */
1838 if (is_consist) {
1839 x86_hm_enter(pp);
1840 hm = hment_prepare(ht, entry, pp);
1841 }
1842
1843 /*
1844 * Set the new pte, retrieving the old one at the same time.
1845 */
1846 old_pte = x86pte_set(ht, entry, pte, pte_ptr);
1847
1848 /*
1849 * Did we get a large page / page table collision?
1850 */
1851 if (old_pte == LPAGE_ERROR) {
1852 if (is_locked)
1853 HTABLE_LOCK_DEC(ht);
1854 rv = -1;
1855 goto done;
1856 }
1857
1858 /*
1859 * If the mapping didn't change there is nothing more to do.
1860 */
1861 if (PTE_EQUIV(pte, old_pte))
1862 goto done;
1863
1864 /*
1865 * Install a new mapping in the page's mapping list
1866 */
1867 if (!PTE_ISVALID(old_pte)) {
1868 if (is_consist) {
1869 hment_assign(ht, entry, pp, hm);
1870 x86_hm_exit(pp);
1871 } else {
1872 ASSERT(flags & HAT_LOAD_NOCONSIST);
1873 }
1874 if (ht->ht_flags & HTABLE_COPIED) {
1875 cpu_t *cpu = CPU;
1876 hat_pcp_update(cpu, hat);
1877 }
1878 HTABLE_INC(ht->ht_valid_cnt);
1879 PGCNT_INC(hat, l);
1880 return (rv);
1881 }
1882
1883 /*
1884 * Remap's are more complicated:
1885 * - HAT_LOAD_REMAP must be specified if changing the pfn.
1886 * We also require that NOCONSIST be specified.
1887 * - Otherwise only permission or caching bits may change.
1888 */
1889 if (!PTE_ISPAGE(old_pte, l))
1890 panic("non-null/page mapping pte=" FMT_PTE, old_pte);
1891
1892 if (PTE2PFN(old_pte, l) != PTE2PFN(pte, l)) {
1893 REMAPASSERT(flags & HAT_LOAD_REMAP);
1894 REMAPASSERT(flags & HAT_LOAD_NOCONSIST);
1895 REMAPASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
1896 REMAPASSERT(pf_is_memory(PTE2PFN(old_pte, l)) ==
1897 pf_is_memory(PTE2PFN(pte, l)));
1898 REMAPASSERT(!is_consist);
1899 }
1900
1901 /*
1902 * We only let remaps change the certain bits in the PTE.
1903 */
1904 if (PTE_GET(old_pte, ~PT_REMAP_BITS) != PTE_GET(pte, ~PT_REMAP_BITS))
1905 panic("remap bits changed: old_pte="FMT_PTE", pte="FMT_PTE"\n",
1906 old_pte, pte);
1907
1908 /*
1909 * We don't create any mapping list entries on a remap, so release
1910 * any allocated hment after we drop the mapping list lock.
1911 */
1912 done:
1913 if (is_consist) {
1914 x86_hm_exit(pp);
1915 if (hm != NULL)
1916 hment_free(hm);
1917 }
1918 return (rv);
1919 }
1920
1921 /*
1922 * Internal routine to load a single page table entry. This only fails if
1923 * we attempt to overwrite a page table link with a large page.
1924 */
1925 static int
hati_load_common(hat_t * hat,uintptr_t va,page_t * pp,uint_t attr,uint_t flags,level_t level,pfn_t pfn)1926 hati_load_common(
1927 hat_t *hat,
1928 uintptr_t va,
1929 page_t *pp,
1930 uint_t attr,
1931 uint_t flags,
1932 level_t level,
1933 pfn_t pfn)
1934 {
1935 htable_t *ht;
1936 uint_t entry;
1937 x86pte_t pte;
1938 int rv = 0;
1939
1940 /*
1941 * The number 16 is arbitrary and here to catch a recursion problem
1942 * early before we blow out the kernel stack.
1943 */
1944 ++curthread->t_hatdepth;
1945 ASSERT(curthread->t_hatdepth < 16);
1946
1947 ASSERT(hat == kas.a_hat || (hat->hat_flags & HAT_PCP) != 0 ||
1948 AS_LOCK_HELD(hat->hat_as));
1949
1950 if (flags & HAT_LOAD_SHARE)
1951 hat->hat_flags |= HAT_SHARED;
1952
1953 /*
1954 * Find the page table that maps this page if it already exists.
1955 */
1956 ht = htable_lookup(hat, va, level);
1957
1958 /*
1959 * We must have HAT_LOAD_NOCONSIST if page_t is NULL.
1960 */
1961 if (pp == NULL)
1962 flags |= HAT_LOAD_NOCONSIST;
1963
1964 if (ht == NULL) {
1965 ht = htable_create(hat, va, level, NULL);
1966 ASSERT(ht != NULL);
1967 }
1968 /*
1969 * htable_va2entry checks this condition as well, but it won't include
1970 * much useful info in the panic. So we do it in advance here to include
1971 * all the context.
1972 */
1973 if (ht->ht_vaddr > va || va > HTABLE_LAST_PAGE(ht)) {
1974 panic("hati_load_common: bad htable: va=%p, last page=%p, "
1975 "ht->ht_vaddr=%p, ht->ht_level=%d", (void *)va,
1976 (void *)HTABLE_LAST_PAGE(ht), (void *)ht->ht_vaddr,
1977 (int)ht->ht_level);
1978 }
1979 entry = htable_va2entry(va, ht);
1980
1981 /*
1982 * a bunch of paranoid error checking
1983 */
1984 ASSERT(ht->ht_busy > 0);
1985 ASSERT(ht->ht_level == level);
1986
1987 /*
1988 * construct the new PTE
1989 */
1990 if (hat == kas.a_hat)
1991 attr &= ~PROT_USER;
1992 pte = hati_mkpte(pfn, attr, level, flags);
1993 if (hat == kas.a_hat && va >= kernelbase)
1994 PTE_SET(pte, mmu.pt_global);
1995
1996 /*
1997 * establish the mapping
1998 */
1999 rv = hati_pte_map(ht, entry, pp, pte, flags, NULL);
2000
2001 /*
2002 * release the htable and any reserves
2003 */
2004 htable_release(ht);
2005 --curthread->t_hatdepth;
2006 return (rv);
2007 }
2008
2009 /*
2010 * special case of hat_memload to deal with some kernel addrs for performance
2011 */
2012 static void
hat_kmap_load(caddr_t addr,page_t * pp,uint_t attr,uint_t flags)2013 hat_kmap_load(
2014 caddr_t addr,
2015 page_t *pp,
2016 uint_t attr,
2017 uint_t flags)
2018 {
2019 uintptr_t va = (uintptr_t)addr;
2020 x86pte_t pte;
2021 pfn_t pfn = page_pptonum(pp);
2022 pgcnt_t pg_off = mmu_btop(va - mmu.kmap_addr);
2023 htable_t *ht;
2024 uint_t entry;
2025 void *pte_ptr;
2026
2027 /*
2028 * construct the requested PTE
2029 */
2030 attr &= ~PROT_USER;
2031 attr |= HAT_STORECACHING_OK;
2032 pte = hati_mkpte(pfn, attr, 0, flags);
2033 PTE_SET(pte, mmu.pt_global);
2034
2035 /*
2036 * Figure out the pte_ptr and htable and use common code to finish up
2037 */
2038 if (mmu.pae_hat)
2039 pte_ptr = mmu.kmap_ptes + pg_off;
2040 else
2041 pte_ptr = (x86pte32_t *)mmu.kmap_ptes + pg_off;
2042 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr) >>
2043 LEVEL_SHIFT(1)];
2044 entry = htable_va2entry(va, ht);
2045 ++curthread->t_hatdepth;
2046 ASSERT(curthread->t_hatdepth < 16);
2047 (void) hati_pte_map(ht, entry, pp, pte, flags, pte_ptr);
2048 --curthread->t_hatdepth;
2049 }
2050
2051 /*
2052 * hat_memload() - load a translation to the given page struct
2053 *
2054 * Flags for hat_memload/hat_devload/hat_*attr.
2055 *
2056 * HAT_LOAD Default flags to load a translation to the page.
2057 *
2058 * HAT_LOAD_LOCK Lock down mapping resources; hat_map(), hat_memload(),
2059 * and hat_devload().
2060 *
2061 * HAT_LOAD_NOCONSIST Do not add mapping to page_t mapping list.
2062 * sets PT_NOCONSIST
2063 *
2064 * HAT_LOAD_SHARE A flag to hat_memload() to indicate h/w page tables
2065 * that map some user pages (not kas) is shared by more
2066 * than one process (eg. ISM).
2067 *
2068 * HAT_LOAD_REMAP Reload a valid pte with a different page frame.
2069 *
2070 * HAT_NO_KALLOC Do not kmem_alloc while creating the mapping; at this
2071 * point, it's setting up mapping to allocate internal
2072 * hat layer data structures. This flag forces hat layer
2073 * to tap its reserves in order to prevent infinite
2074 * recursion.
2075 *
2076 * The following is a protection attribute (like PROT_READ, etc.)
2077 *
2078 * HAT_NOSYNC set PT_NOSYNC - this mapping's ref/mod bits
2079 * are never cleared.
2080 *
2081 * Installing new valid PTE's and creation of the mapping list
2082 * entry are controlled under the same lock. It's derived from the
2083 * page_t being mapped.
2084 */
2085 static uint_t supported_memload_flags =
2086 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_ADV | HAT_LOAD_NOCONSIST |
2087 HAT_LOAD_SHARE | HAT_NO_KALLOC | HAT_LOAD_REMAP | HAT_LOAD_TEXT;
2088
2089 void
hat_memload(hat_t * hat,caddr_t addr,page_t * pp,uint_t attr,uint_t flags)2090 hat_memload(
2091 hat_t *hat,
2092 caddr_t addr,
2093 page_t *pp,
2094 uint_t attr,
2095 uint_t flags)
2096 {
2097 uintptr_t va = (uintptr_t)addr;
2098 level_t level = 0;
2099 pfn_t pfn = page_pptonum(pp);
2100
2101 XPV_DISALLOW_MIGRATE();
2102 ASSERT(IS_PAGEALIGNED(va));
2103 ASSERT(hat == kas.a_hat || va < _userlimit);
2104 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2105 ASSERT((flags & supported_memload_flags) == flags);
2106
2107 ASSERT(!IN_VA_HOLE(va));
2108 ASSERT(!PP_ISFREE(pp));
2109
2110 /*
2111 * kernel address special case for performance.
2112 */
2113 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2114 ASSERT(hat == kas.a_hat);
2115 hat_kmap_load(addr, pp, attr, flags);
2116 XPV_ALLOW_MIGRATE();
2117 return;
2118 }
2119
2120 /*
2121 * This is used for memory with normal caching enabled, so
2122 * always set HAT_STORECACHING_OK.
2123 */
2124 attr |= HAT_STORECACHING_OK;
2125 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
2126 panic("unexpected hati_load_common() failure");
2127 XPV_ALLOW_MIGRATE();
2128 }
2129
2130 /* ARGSUSED */
2131 void
hat_memload_region(struct hat * hat,caddr_t addr,struct page * pp,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)2132 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
2133 uint_t attr, uint_t flags, hat_region_cookie_t rcookie)
2134 {
2135 hat_memload(hat, addr, pp, attr, flags);
2136 }
2137
2138 /*
2139 * Load the given array of page structs using large pages when possible
2140 */
2141 void
hat_memload_array(hat_t * hat,caddr_t addr,size_t len,page_t ** pages,uint_t attr,uint_t flags)2142 hat_memload_array(
2143 hat_t *hat,
2144 caddr_t addr,
2145 size_t len,
2146 page_t **pages,
2147 uint_t attr,
2148 uint_t flags)
2149 {
2150 uintptr_t va = (uintptr_t)addr;
2151 uintptr_t eaddr = va + len;
2152 level_t level;
2153 size_t pgsize;
2154 pgcnt_t pgindx = 0;
2155 pfn_t pfn;
2156 pgcnt_t i;
2157
2158 XPV_DISALLOW_MIGRATE();
2159 ASSERT(IS_PAGEALIGNED(va));
2160 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2161 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2162 ASSERT((flags & supported_memload_flags) == flags);
2163
2164 /*
2165 * memload is used for memory with full caching enabled, so
2166 * set HAT_STORECACHING_OK.
2167 */
2168 attr |= HAT_STORECACHING_OK;
2169
2170 /*
2171 * handle all pages using largest possible pagesize
2172 */
2173 while (va < eaddr) {
2174 /*
2175 * decide what level mapping to use (ie. pagesize)
2176 */
2177 pfn = page_pptonum(pages[pgindx]);
2178 for (level = mmu.max_page_level; ; --level) {
2179 pgsize = LEVEL_SIZE(level);
2180 if (level == 0)
2181 break;
2182
2183 if (!IS_P2ALIGNED(va, pgsize) ||
2184 (eaddr - va) < pgsize ||
2185 !IS_P2ALIGNED(pfn_to_pa(pfn), pgsize))
2186 continue;
2187
2188 /*
2189 * To use a large mapping of this size, all the
2190 * pages we are passed must be sequential subpages
2191 * of the large page.
2192 * hat_page_demote() can't change p_szc because
2193 * all pages are locked.
2194 */
2195 if (pages[pgindx]->p_szc >= level) {
2196 for (i = 0; i < mmu_btop(pgsize); ++i) {
2197 if (pfn + i !=
2198 page_pptonum(pages[pgindx + i]))
2199 break;
2200 ASSERT(pages[pgindx + i]->p_szc >=
2201 level);
2202 ASSERT(pages[pgindx] + i ==
2203 pages[pgindx + i]);
2204 }
2205 if (i == mmu_btop(pgsize)) {
2206 #ifdef DEBUG
2207 if (level == 2)
2208 map1gcnt++;
2209 #endif
2210 break;
2211 }
2212 }
2213 }
2214
2215 /*
2216 * Load this page mapping. If the load fails, try a smaller
2217 * pagesize.
2218 */
2219 ASSERT(!IN_VA_HOLE(va));
2220 while (hati_load_common(hat, va, pages[pgindx], attr,
2221 flags, level, pfn) != 0) {
2222 if (level == 0)
2223 panic("unexpected hati_load_common() failure");
2224 --level;
2225 pgsize = LEVEL_SIZE(level);
2226 }
2227
2228 /*
2229 * move to next page
2230 */
2231 va += pgsize;
2232 pgindx += mmu_btop(pgsize);
2233 }
2234 XPV_ALLOW_MIGRATE();
2235 }
2236
2237 /* ARGSUSED */
2238 void
hat_memload_array_region(struct hat * hat,caddr_t addr,size_t len,struct page ** pps,uint_t attr,uint_t flags,hat_region_cookie_t rcookie)2239 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
2240 struct page **pps, uint_t attr, uint_t flags,
2241 hat_region_cookie_t rcookie)
2242 {
2243 hat_memload_array(hat, addr, len, pps, attr, flags);
2244 }
2245
2246 /*
2247 * void hat_devload(hat, addr, len, pf, attr, flags)
2248 * load/lock the given page frame number
2249 *
2250 * Advisory ordering attributes. Apply only to device mappings.
2251 *
2252 * HAT_STRICTORDER: the CPU must issue the references in order, as the
2253 * programmer specified. This is the default.
2254 * HAT_UNORDERED_OK: the CPU may reorder the references (this is all kinds
2255 * of reordering; store or load with store or load).
2256 * HAT_MERGING_OK: merging and batching: the CPU may merge individual stores
2257 * to consecutive locations (for example, turn two consecutive byte
2258 * stores into one halfword store), and it may batch individual loads
2259 * (for example, turn two consecutive byte loads into one halfword load).
2260 * This also implies re-ordering.
2261 * HAT_LOADCACHING_OK: the CPU may cache the data it fetches and reuse it
2262 * until another store occurs. The default is to fetch new data
2263 * on every load. This also implies merging.
2264 * HAT_STORECACHING_OK: the CPU may keep the data in the cache and push it to
2265 * the device (perhaps with other data) at a later time. The default is
2266 * to push the data right away. This also implies load caching.
2267 *
2268 * Equivalent of hat_memload(), but can be used for device memory where
2269 * there are no page_t's and we support additional flags (write merging, etc).
2270 * Note that we can have large page mappings with this interface.
2271 */
2272 int supported_devload_flags = HAT_LOAD | HAT_LOAD_LOCK |
2273 HAT_LOAD_NOCONSIST | HAT_STRICTORDER | HAT_UNORDERED_OK |
2274 HAT_MERGING_OK | HAT_LOADCACHING_OK | HAT_STORECACHING_OK;
2275
2276 void
hat_devload(hat_t * hat,caddr_t addr,size_t len,pfn_t pfn,uint_t attr,int flags)2277 hat_devload(
2278 hat_t *hat,
2279 caddr_t addr,
2280 size_t len,
2281 pfn_t pfn,
2282 uint_t attr,
2283 int flags)
2284 {
2285 uintptr_t va = ALIGN2PAGE(addr);
2286 uintptr_t eva = va + len;
2287 level_t level;
2288 size_t pgsize;
2289 page_t *pp;
2290 int f; /* per PTE copy of flags - maybe modified */
2291 uint_t a; /* per PTE copy of attr */
2292
2293 XPV_DISALLOW_MIGRATE();
2294 ASSERT(IS_PAGEALIGNED(va));
2295 ASSERT(hat == kas.a_hat || eva <= _userlimit);
2296 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2297 ASSERT((flags & supported_devload_flags) == flags);
2298
2299 /*
2300 * handle all pages
2301 */
2302 while (va < eva) {
2303
2304 /*
2305 * decide what level mapping to use (ie. pagesize)
2306 */
2307 for (level = mmu.max_page_level; ; --level) {
2308 pgsize = LEVEL_SIZE(level);
2309 if (level == 0)
2310 break;
2311 if (IS_P2ALIGNED(va, pgsize) &&
2312 (eva - va) >= pgsize &&
2313 IS_P2ALIGNED(pfn, mmu_btop(pgsize))) {
2314 #ifdef DEBUG
2315 if (level == 2)
2316 map1gcnt++;
2317 #endif
2318 break;
2319 }
2320 }
2321
2322 /*
2323 * If this is just memory then allow caching (this happens
2324 * for the nucleus pages) - though HAT_PLAT_NOCACHE can be used
2325 * to override that. If we don't have a page_t then make sure
2326 * NOCONSIST is set.
2327 */
2328 a = attr;
2329 f = flags;
2330 if (!pf_is_memory(pfn))
2331 f |= HAT_LOAD_NOCONSIST;
2332 else if (!(a & HAT_PLAT_NOCACHE))
2333 a |= HAT_STORECACHING_OK;
2334
2335 if (f & HAT_LOAD_NOCONSIST)
2336 pp = NULL;
2337 else
2338 pp = page_numtopp_nolock(pfn);
2339
2340 /*
2341 * Check to make sure we are really trying to map a valid
2342 * memory page. The caller wishing to intentionally map
2343 * free memory pages will have passed the HAT_LOAD_NOCONSIST
2344 * flag, then pp will be NULL.
2345 */
2346 if (pp != NULL) {
2347 if (PP_ISFREE(pp)) {
2348 panic("hat_devload: loading "
2349 "a mapping to free page %p", (void *)pp);
2350 }
2351
2352 if (!PAGE_LOCKED(pp) && !PP_ISNORELOC(pp)) {
2353 panic("hat_devload: loading a mapping "
2354 "to an unlocked page %p",
2355 (void *)pp);
2356 }
2357 }
2358
2359 /*
2360 * load this page mapping
2361 */
2362 ASSERT(!IN_VA_HOLE(va));
2363 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
2364 if (level == 0)
2365 panic("unexpected hati_load_common() failure");
2366 --level;
2367 pgsize = LEVEL_SIZE(level);
2368 }
2369
2370 /*
2371 * move to next page
2372 */
2373 va += pgsize;
2374 pfn += mmu_btop(pgsize);
2375 }
2376 XPV_ALLOW_MIGRATE();
2377 }
2378
2379 /*
2380 * void hat_unlock(hat, addr, len)
2381 * unlock the mappings to a given range of addresses
2382 *
2383 * Locks are tracked by ht_lock_cnt in the htable.
2384 */
2385 void
hat_unlock(hat_t * hat,caddr_t addr,size_t len)2386 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
2387 {
2388 uintptr_t vaddr = (uintptr_t)addr;
2389 uintptr_t eaddr = vaddr + len;
2390 htable_t *ht = NULL;
2391
2392 /*
2393 * kernel entries are always locked, we don't track lock counts
2394 */
2395 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2396 ASSERT(IS_PAGEALIGNED(vaddr));
2397 ASSERT(IS_PAGEALIGNED(eaddr));
2398 if (hat == kas.a_hat)
2399 return;
2400 if (eaddr > _userlimit)
2401 panic("hat_unlock() address out of range - above _userlimit");
2402
2403 XPV_DISALLOW_MIGRATE();
2404 ASSERT(AS_LOCK_HELD(hat->hat_as));
2405 while (vaddr < eaddr) {
2406 (void) htable_walk(hat, &ht, &vaddr, eaddr);
2407 if (ht == NULL)
2408 break;
2409
2410 ASSERT(!IN_VA_HOLE(vaddr));
2411
2412 if (ht->ht_lock_cnt < 1)
2413 panic("hat_unlock(): lock_cnt < 1, "
2414 "htable=%p, vaddr=%p\n", (void *)ht, (void *)vaddr);
2415 HTABLE_LOCK_DEC(ht);
2416
2417 vaddr += LEVEL_SIZE(ht->ht_level);
2418 }
2419 if (ht)
2420 htable_release(ht);
2421 XPV_ALLOW_MIGRATE();
2422 }
2423
2424 /* ARGSUSED */
2425 void
hat_unlock_region(struct hat * hat,caddr_t addr,size_t len,hat_region_cookie_t rcookie)2426 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
2427 hat_region_cookie_t rcookie)
2428 {
2429 panic("No shared region support on x86");
2430 }
2431
2432 #if !defined(__xpv)
2433 /*
2434 * Cross call service routine to demap a range of virtual
2435 * pages on the current CPU or flush all mappings in TLB.
2436 */
2437 static int
hati_demap_func(xc_arg_t a1,xc_arg_t a2,xc_arg_t a3)2438 hati_demap_func(xc_arg_t a1, xc_arg_t a2, xc_arg_t a3)
2439 {
2440 _NOTE(ARGUNUSED(a3));
2441 hat_t *hat = (hat_t *)a1;
2442 tlb_range_t *range = (tlb_range_t *)a2;
2443
2444 /*
2445 * If the target hat isn't the kernel and this CPU isn't operating
2446 * in the target hat, we can ignore the cross call.
2447 */
2448 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
2449 return (0);
2450
2451 if (range->tr_va != DEMAP_ALL_ADDR) {
2452 mmu_flush_tlb(FLUSH_TLB_RANGE, range);
2453 return (0);
2454 }
2455
2456 /*
2457 * We are flushing all of userspace.
2458 *
2459 * When using PCP, we first need to update this CPU's idea of the PCP
2460 * PTEs.
2461 */
2462 if (hat->hat_flags & HAT_COPIED) {
2463 hat_pcp_update(CPU, hat);
2464 }
2465
2466 mmu_flush_tlb(FLUSH_TLB_NONGLOBAL, NULL);
2467 return (0);
2468 }
2469
2470 #define TLBIDLE_CPU_HALTED (0x1UL)
2471 #define TLBIDLE_INVAL_ALL (0x2UL)
2472 #define CAS_TLB_INFO(cpu, old, new) \
2473 atomic_cas_ulong((ulong_t *)&(cpu)->cpu_m.mcpu_tlb_info, (old), (new))
2474
2475 /*
2476 * Record that a CPU is going idle
2477 */
2478 void
tlb_going_idle(void)2479 tlb_going_idle(void)
2480 {
2481 atomic_or_ulong((ulong_t *)&CPU->cpu_m.mcpu_tlb_info,
2482 TLBIDLE_CPU_HALTED);
2483 }
2484
2485 /*
2486 * Service a delayed TLB flush if coming out of being idle.
2487 * It will be called from cpu idle notification with interrupt disabled.
2488 */
2489 void
tlb_service(void)2490 tlb_service(void)
2491 {
2492 ulong_t tlb_info;
2493 ulong_t found;
2494
2495 /*
2496 * We only have to do something if coming out of being idle.
2497 */
2498 tlb_info = CPU->cpu_m.mcpu_tlb_info;
2499 if (tlb_info & TLBIDLE_CPU_HALTED) {
2500 ASSERT(CPU->cpu_current_hat == kas.a_hat);
2501
2502 /*
2503 * Atomic clear and fetch of old state.
2504 */
2505 while ((found = CAS_TLB_INFO(CPU, tlb_info, 0)) != tlb_info) {
2506 ASSERT(found & TLBIDLE_CPU_HALTED);
2507 tlb_info = found;
2508 SMT_PAUSE();
2509 }
2510 if (tlb_info & TLBIDLE_INVAL_ALL)
2511 mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
2512 }
2513 }
2514 #endif /* !__xpv */
2515
2516 /*
2517 * Internal routine to do cross calls to invalidate a range of pages on
2518 * all CPUs using a given hat.
2519 */
2520 void
hat_tlb_inval_range(hat_t * hat,tlb_range_t * in_range)2521 hat_tlb_inval_range(hat_t *hat, tlb_range_t *in_range)
2522 {
2523 extern int flushes_require_xcalls; /* from mp_startup.c */
2524 cpuset_t justme;
2525 cpuset_t cpus_to_shootdown;
2526 tlb_range_t range = *in_range;
2527 #ifndef __xpv
2528 cpuset_t check_cpus;
2529 cpu_t *cpup;
2530 int c;
2531 #endif
2532
2533 /*
2534 * If the hat is being destroyed, there are no more users, so
2535 * demap need not do anything.
2536 */
2537 if (hat->hat_flags & HAT_FREEING)
2538 return;
2539
2540 /*
2541 * If demapping from a shared pagetable, we best demap the
2542 * entire set of user TLBs, since we don't know what addresses
2543 * these were shared at.
2544 */
2545 if (hat->hat_flags & HAT_SHARED) {
2546 hat = kas.a_hat;
2547 range.tr_va = DEMAP_ALL_ADDR;
2548 }
2549
2550 /*
2551 * if not running with multiple CPUs, don't use cross calls
2552 */
2553 if (panicstr || !flushes_require_xcalls) {
2554 #ifdef __xpv
2555 if (range.tr_va == DEMAP_ALL_ADDR) {
2556 xen_flush_tlb();
2557 } else {
2558 for (size_t i = 0; i < TLB_RANGE_LEN(&range);
2559 i += MMU_PAGESIZE) {
2560 xen_flush_va((caddr_t)(range.tr_va + i));
2561 }
2562 }
2563 #else
2564 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)&range, 0);
2565 #endif
2566 return;
2567 }
2568
2569
2570 /*
2571 * Determine CPUs to shootdown. Kernel changes always do all CPUs.
2572 * Otherwise it's just CPUs currently executing in this hat.
2573 */
2574 kpreempt_disable();
2575 CPUSET_ONLY(justme, CPU->cpu_id);
2576 if (hat == kas.a_hat)
2577 cpus_to_shootdown = khat_cpuset;
2578 else
2579 cpus_to_shootdown = hat->hat_cpus;
2580
2581 #ifndef __xpv
2582 /*
2583 * If any CPUs in the set are idle, just request a delayed flush
2584 * and avoid waking them up.
2585 */
2586 check_cpus = cpus_to_shootdown;
2587 for (c = 0; c < NCPU && !CPUSET_ISNULL(check_cpus); ++c) {
2588 ulong_t tlb_info;
2589
2590 if (!CPU_IN_SET(check_cpus, c))
2591 continue;
2592 CPUSET_DEL(check_cpus, c);
2593 cpup = cpu[c];
2594 if (cpup == NULL)
2595 continue;
2596
2597 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2598 while (tlb_info == TLBIDLE_CPU_HALTED) {
2599 (void) CAS_TLB_INFO(cpup, TLBIDLE_CPU_HALTED,
2600 TLBIDLE_CPU_HALTED | TLBIDLE_INVAL_ALL);
2601 SMT_PAUSE();
2602 tlb_info = cpup->cpu_m.mcpu_tlb_info;
2603 }
2604 if (tlb_info == (TLBIDLE_CPU_HALTED | TLBIDLE_INVAL_ALL)) {
2605 HATSTAT_INC(hs_tlb_inval_delayed);
2606 CPUSET_DEL(cpus_to_shootdown, c);
2607 }
2608 }
2609 #endif
2610
2611 if (CPUSET_ISNULL(cpus_to_shootdown) ||
2612 CPUSET_ISEQUAL(cpus_to_shootdown, justme)) {
2613
2614 #ifdef __xpv
2615 if (range.tr_va == DEMAP_ALL_ADDR) {
2616 xen_flush_tlb();
2617 } else {
2618 for (size_t i = 0; i < TLB_RANGE_LEN(&range);
2619 i += MMU_PAGESIZE) {
2620 xen_flush_va((caddr_t)(range.tr_va + i));
2621 }
2622 }
2623 #else
2624 (void) hati_demap_func((xc_arg_t)hat, (xc_arg_t)&range, 0);
2625 #endif
2626
2627 } else {
2628
2629 CPUSET_ADD(cpus_to_shootdown, CPU->cpu_id);
2630 #ifdef __xpv
2631 if (range.tr_va == DEMAP_ALL_ADDR) {
2632 xen_gflush_tlb(cpus_to_shootdown);
2633 } else {
2634 for (size_t i = 0; i < TLB_RANGE_LEN(&range);
2635 i += MMU_PAGESIZE) {
2636 xen_gflush_va((caddr_t)(range.tr_va + i),
2637 cpus_to_shootdown);
2638 }
2639 }
2640 #else
2641 xc_call((xc_arg_t)hat, (xc_arg_t)&range, 0,
2642 CPUSET2BV(cpus_to_shootdown), hati_demap_func);
2643 #endif
2644
2645 }
2646 kpreempt_enable();
2647 }
2648
2649 void
hat_tlb_inval(hat_t * hat,uintptr_t va)2650 hat_tlb_inval(hat_t *hat, uintptr_t va)
2651 {
2652 /*
2653 * Create range for a single page.
2654 */
2655 tlb_range_t range;
2656 range.tr_va = va;
2657 range.tr_cnt = 1; /* one page */
2658 range.tr_level = MIN_PAGE_LEVEL; /* pages are MMU_PAGESIZE */
2659
2660 hat_tlb_inval_range(hat, &range);
2661 }
2662
2663 /*
2664 * Interior routine for HAT_UNLOADs from hat_unload_callback(),
2665 * hat_kmap_unload() OR from hat_steal() code. This routine doesn't
2666 * handle releasing of the htables.
2667 */
2668 void
hat_pte_unmap(htable_t * ht,uint_t entry,uint_t flags,x86pte_t old_pte,void * pte_ptr,boolean_t tlb)2669 hat_pte_unmap(
2670 htable_t *ht,
2671 uint_t entry,
2672 uint_t flags,
2673 x86pte_t old_pte,
2674 void *pte_ptr,
2675 boolean_t tlb)
2676 {
2677 hat_t *hat = ht->ht_hat;
2678 hment_t *hm = NULL;
2679 page_t *pp = NULL;
2680 level_t l = ht->ht_level;
2681 pfn_t pfn;
2682
2683 /*
2684 * We always track the locking counts, even if nothing is unmapped
2685 */
2686 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2687 ASSERT(ht->ht_lock_cnt > 0);
2688 HTABLE_LOCK_DEC(ht);
2689 }
2690
2691 /*
2692 * Figure out which page's mapping list lock to acquire using the PFN
2693 * passed in "old" PTE. We then attempt to invalidate the PTE.
2694 * If another thread, probably a hat_pageunload, has asynchronously
2695 * unmapped/remapped this address we'll loop here.
2696 */
2697 ASSERT(ht->ht_busy > 0);
2698 while (PTE_ISVALID(old_pte)) {
2699 pfn = PTE2PFN(old_pte, l);
2700 if (PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST) {
2701 pp = NULL;
2702 } else {
2703 #ifdef __xpv
2704 if (pfn == PFN_INVALID)
2705 panic("Invalid PFN, but not PT_NOCONSIST");
2706 #endif
2707 pp = page_numtopp_nolock(pfn);
2708 if (pp == NULL) {
2709 panic("no page_t, not NOCONSIST: old_pte="
2710 FMT_PTE " ht=%lx entry=0x%x pte_ptr=%lx",
2711 old_pte, (uintptr_t)ht, entry,
2712 (uintptr_t)pte_ptr);
2713 }
2714 x86_hm_enter(pp);
2715 }
2716
2717 old_pte = x86pte_inval(ht, entry, old_pte, pte_ptr, tlb);
2718
2719 /*
2720 * If the page hadn't changed we've unmapped it and can proceed
2721 */
2722 if (PTE_ISVALID(old_pte) && PTE2PFN(old_pte, l) == pfn)
2723 break;
2724
2725 /*
2726 * Otherwise, we'll have to retry with the current old_pte.
2727 * Drop the hment lock, since the pfn may have changed.
2728 */
2729 if (pp != NULL) {
2730 x86_hm_exit(pp);
2731 pp = NULL;
2732 } else {
2733 ASSERT(PTE_GET(old_pte, PT_SOFTWARE) >= PT_NOCONSIST);
2734 }
2735 }
2736
2737 /*
2738 * If the old mapping wasn't valid, there's nothing more to do
2739 */
2740 if (!PTE_ISVALID(old_pte)) {
2741 if (pp != NULL)
2742 x86_hm_exit(pp);
2743 return;
2744 }
2745
2746 /*
2747 * Take care of syncing any MOD/REF bits and removing the hment.
2748 */
2749 if (pp != NULL) {
2750 if (!(flags & HAT_UNLOAD_NOSYNC))
2751 hati_sync_pte_to_page(pp, old_pte, l);
2752 hm = hment_remove(pp, ht, entry);
2753 x86_hm_exit(pp);
2754 if (hm != NULL)
2755 hment_free(hm);
2756 }
2757
2758 /*
2759 * Handle book keeping in the htable and hat
2760 */
2761 ASSERT(ht->ht_valid_cnt > 0);
2762 HTABLE_DEC(ht->ht_valid_cnt);
2763 PGCNT_DEC(hat, l);
2764 }
2765
2766 /*
2767 * very cheap unload implementation to special case some kernel addresses
2768 */
2769 static void
hat_kmap_unload(caddr_t addr,size_t len,uint_t flags)2770 hat_kmap_unload(caddr_t addr, size_t len, uint_t flags)
2771 {
2772 uintptr_t va = (uintptr_t)addr;
2773 uintptr_t eva = va + len;
2774 pgcnt_t pg_index;
2775 htable_t *ht;
2776 uint_t entry;
2777 x86pte_t *pte_ptr;
2778 x86pte_t old_pte;
2779
2780 for (; va < eva; va += MMU_PAGESIZE) {
2781 /*
2782 * Get the PTE
2783 */
2784 pg_index = mmu_btop(va - mmu.kmap_addr);
2785 pte_ptr = PT_INDEX_PTR(mmu.kmap_ptes, pg_index);
2786 old_pte = GET_PTE(pte_ptr);
2787
2788 /*
2789 * get the htable / entry
2790 */
2791 ht = mmu.kmap_htables[(va - mmu.kmap_htables[0]->ht_vaddr)
2792 >> LEVEL_SHIFT(1)];
2793 entry = htable_va2entry(va, ht);
2794
2795 /*
2796 * use mostly common code to unmap it.
2797 */
2798 hat_pte_unmap(ht, entry, flags, old_pte, pte_ptr, B_TRUE);
2799 }
2800 }
2801
2802
2803 /*
2804 * unload a range of virtual address space (no callback)
2805 */
2806 void
hat_unload(hat_t * hat,caddr_t addr,size_t len,uint_t flags)2807 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2808 {
2809 uintptr_t va = (uintptr_t)addr;
2810
2811 XPV_DISALLOW_MIGRATE();
2812 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2813
2814 /*
2815 * special case for performance.
2816 */
2817 if (mmu.kmap_addr <= va && va < mmu.kmap_eaddr) {
2818 ASSERT(hat == kas.a_hat);
2819 hat_kmap_unload(addr, len, flags);
2820 } else {
2821 hat_unload_callback(hat, addr, len, flags, NULL);
2822 }
2823 XPV_ALLOW_MIGRATE();
2824 }
2825
2826 /*
2827 * Invalidate the TLB, and perform the callback to the upper level VM system,
2828 * for the specified ranges of contiguous pages.
2829 */
2830 static void
handle_ranges(hat_t * hat,hat_callback_t * cb,uint_t cnt,tlb_range_t * range)2831 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, tlb_range_t *range)
2832 {
2833 while (cnt > 0) {
2834 --cnt;
2835 hat_tlb_inval_range(hat, &range[cnt]);
2836
2837 if (cb != NULL) {
2838 cb->hcb_start_addr = (caddr_t)range[cnt].tr_va;
2839 cb->hcb_end_addr = cb->hcb_start_addr;
2840 cb->hcb_end_addr += range[cnt].tr_cnt <<
2841 LEVEL_SHIFT(range[cnt].tr_level);
2842 cb->hcb_function(cb);
2843 }
2844 }
2845 }
2846
2847 /*
2848 * Unload a given range of addresses (has optional callback)
2849 *
2850 * Flags:
2851 * define HAT_UNLOAD 0x00
2852 * define HAT_UNLOAD_NOSYNC 0x02
2853 * define HAT_UNLOAD_UNLOCK 0x04
2854 * define HAT_UNLOAD_OTHER 0x08 - not used
2855 * define HAT_UNLOAD_UNMAP 0x10 - same as HAT_UNLOAD
2856 */
2857 #define MAX_UNLOAD_CNT (8)
2858 void
hat_unload_callback(hat_t * hat,caddr_t addr,size_t len,uint_t flags,hat_callback_t * cb)2859 hat_unload_callback(
2860 hat_t *hat,
2861 caddr_t addr,
2862 size_t len,
2863 uint_t flags,
2864 hat_callback_t *cb)
2865 {
2866 uintptr_t vaddr = (uintptr_t)addr;
2867 uintptr_t eaddr = vaddr + len;
2868 htable_t *ht = NULL;
2869 uint_t entry;
2870 uintptr_t contig_va = (uintptr_t)-1L;
2871 tlb_range_t r[MAX_UNLOAD_CNT];
2872 uint_t r_cnt = 0;
2873 x86pte_t old_pte;
2874
2875 XPV_DISALLOW_MIGRATE();
2876 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2877 ASSERT(IS_PAGEALIGNED(vaddr));
2878 ASSERT(IS_PAGEALIGNED(eaddr));
2879
2880 /*
2881 * Special case a single page being unloaded for speed. This happens
2882 * quite frequently, COW faults after a fork() for example.
2883 */
2884 if (cb == NULL && len == MMU_PAGESIZE) {
2885 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2886 if (ht != NULL) {
2887 if (PTE_ISVALID(old_pte)) {
2888 hat_pte_unmap(ht, entry, flags, old_pte,
2889 NULL, B_TRUE);
2890 }
2891 htable_release(ht);
2892 }
2893 XPV_ALLOW_MIGRATE();
2894 return;
2895 }
2896
2897 while (vaddr < eaddr) {
2898 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2899 if (ht == NULL)
2900 break;
2901
2902 ASSERT(!IN_VA_HOLE(vaddr));
2903
2904 if (vaddr < (uintptr_t)addr)
2905 panic("hat_unload_callback(): unmap inside large page");
2906
2907 /*
2908 * We'll do the call backs for contiguous ranges
2909 */
2910 if (vaddr != contig_va ||
2911 (r_cnt > 0 && r[r_cnt - 1].tr_level != ht->ht_level)) {
2912 if (r_cnt == MAX_UNLOAD_CNT) {
2913 handle_ranges(hat, cb, r_cnt, r);
2914 r_cnt = 0;
2915 }
2916 r[r_cnt].tr_va = vaddr;
2917 r[r_cnt].tr_cnt = 0;
2918 r[r_cnt].tr_level = ht->ht_level;
2919 ++r_cnt;
2920 }
2921
2922 /*
2923 * Unload one mapping (for a single page) from the page tables.
2924 * Note that we do not remove the mapping from the TLB yet,
2925 * as indicated by the tlb=FALSE argument to hat_pte_unmap().
2926 * handle_ranges() will clear the TLB entries with one call to
2927 * hat_tlb_inval_range() per contiguous range. This is
2928 * safe because the page can not be reused until the
2929 * callback is made (or we return).
2930 */
2931 entry = htable_va2entry(vaddr, ht);
2932 hat_pte_unmap(ht, entry, flags, old_pte, NULL, B_FALSE);
2933 ASSERT(ht->ht_level <= mmu.max_page_level);
2934 vaddr += LEVEL_SIZE(ht->ht_level);
2935 contig_va = vaddr;
2936 ++r[r_cnt - 1].tr_cnt;
2937 }
2938 if (ht)
2939 htable_release(ht);
2940
2941 /*
2942 * handle last range for callbacks
2943 */
2944 if (r_cnt > 0)
2945 handle_ranges(hat, cb, r_cnt, r);
2946 XPV_ALLOW_MIGRATE();
2947 }
2948
2949 /*
2950 * Invalidate a virtual address translation on a slave CPU during
2951 * panic() dumps.
2952 */
2953 void
hat_flush_range(hat_t * hat,caddr_t va,size_t size)2954 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2955 {
2956 ssize_t sz;
2957 caddr_t endva = va + size;
2958
2959 while (va < endva) {
2960 sz = hat_getpagesize(hat, va);
2961 if (sz < 0) {
2962 #ifdef __xpv
2963 xen_flush_tlb();
2964 #else
2965 mmu_flush_tlb(FLUSH_TLB_ALL, NULL);
2966 #endif
2967 break;
2968 }
2969 #ifdef __xpv
2970 xen_flush_va(va);
2971 #else
2972 mmu_flush_tlb_kpage((uintptr_t)va);
2973 #endif
2974 va += sz;
2975 }
2976 }
2977
2978 /*
2979 * synchronize mapping with software data structures
2980 *
2981 * This interface is currently only used by the working set monitor
2982 * driver.
2983 */
2984 /*ARGSUSED*/
2985 void
hat_sync(hat_t * hat,caddr_t addr,size_t len,uint_t flags)2986 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2987 {
2988 uintptr_t vaddr = (uintptr_t)addr;
2989 uintptr_t eaddr = vaddr + len;
2990 htable_t *ht = NULL;
2991 uint_t entry;
2992 x86pte_t pte;
2993 x86pte_t save_pte;
2994 x86pte_t new;
2995 page_t *pp;
2996
2997 ASSERT(!IN_VA_HOLE(vaddr));
2998 ASSERT(IS_PAGEALIGNED(vaddr));
2999 ASSERT(IS_PAGEALIGNED(eaddr));
3000 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
3001
3002 XPV_DISALLOW_MIGRATE();
3003 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
3004 try_again:
3005 pte = htable_walk(hat, &ht, &vaddr, eaddr);
3006 if (ht == NULL)
3007 break;
3008 entry = htable_va2entry(vaddr, ht);
3009
3010 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
3011 PTE_GET(pte, PT_REF | PT_MOD) == 0)
3012 continue;
3013
3014 /*
3015 * We need to acquire the mapping list lock to protect
3016 * against hat_pageunload(), hat_unload(), etc.
3017 */
3018 pp = page_numtopp_nolock(PTE2PFN(pte, ht->ht_level));
3019 if (pp == NULL)
3020 break;
3021 x86_hm_enter(pp);
3022 save_pte = pte;
3023 pte = x86pte_get(ht, entry);
3024 if (pte != save_pte) {
3025 x86_hm_exit(pp);
3026 goto try_again;
3027 }
3028 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC ||
3029 PTE_GET(pte, PT_REF | PT_MOD) == 0) {
3030 x86_hm_exit(pp);
3031 continue;
3032 }
3033
3034 /*
3035 * Need to clear ref or mod bits. We may compete with
3036 * hardware updating the R/M bits and have to try again.
3037 */
3038 if (flags == HAT_SYNC_ZERORM) {
3039 new = pte;
3040 PTE_CLR(new, PT_REF | PT_MOD);
3041 pte = hati_update_pte(ht, entry, pte, new);
3042 if (pte != 0) {
3043 x86_hm_exit(pp);
3044 goto try_again;
3045 }
3046 } else {
3047 /*
3048 * sync the PTE to the page_t
3049 */
3050 hati_sync_pte_to_page(pp, save_pte, ht->ht_level);
3051 }
3052 x86_hm_exit(pp);
3053 }
3054 if (ht)
3055 htable_release(ht);
3056 XPV_ALLOW_MIGRATE();
3057 }
3058
3059 /*
3060 * void hat_map(hat, addr, len, flags)
3061 */
3062 /*ARGSUSED*/
3063 void
hat_map(hat_t * hat,caddr_t addr,size_t len,uint_t flags)3064 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
3065 {
3066 /* does nothing */
3067 }
3068
3069 /*
3070 * uint_t hat_getattr(hat, addr, *attr)
3071 * returns attr for <hat,addr> in *attr. returns 0 if there was a
3072 * mapping and *attr is valid, nonzero if there was no mapping and
3073 * *attr is not valid.
3074 */
3075 uint_t
hat_getattr(hat_t * hat,caddr_t addr,uint_t * attr)3076 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
3077 {
3078 uintptr_t vaddr = ALIGN2PAGE(addr);
3079 htable_t *ht = NULL;
3080 x86pte_t pte;
3081
3082 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
3083
3084 if (IN_VA_HOLE(vaddr))
3085 return ((uint_t)-1);
3086
3087 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
3088 if (ht == NULL)
3089 return ((uint_t)-1);
3090
3091 if (!PTE_ISVALID(pte) || !PTE_ISPAGE(pte, ht->ht_level)) {
3092 htable_release(ht);
3093 return ((uint_t)-1);
3094 }
3095
3096 *attr = PROT_READ;
3097 if (PTE_GET(pte, PT_WRITABLE))
3098 *attr |= PROT_WRITE;
3099 if (PTE_GET(pte, PT_USER))
3100 *attr |= PROT_USER;
3101 if (!PTE_GET(pte, mmu.pt_nx))
3102 *attr |= PROT_EXEC;
3103 if (PTE_GET(pte, PT_SOFTWARE) >= PT_NOSYNC)
3104 *attr |= HAT_NOSYNC;
3105 htable_release(ht);
3106 return (0);
3107 }
3108
3109 /*
3110 * hat_updateattr() applies the given attribute change to an existing mapping
3111 */
3112 #define HAT_LOAD_ATTR 1
3113 #define HAT_SET_ATTR 2
3114 #define HAT_CLR_ATTR 3
3115
3116 static void
hat_updateattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr,int what)3117 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
3118 {
3119 uintptr_t vaddr = (uintptr_t)addr;
3120 uintptr_t eaddr = (uintptr_t)addr + len;
3121 htable_t *ht = NULL;
3122 uint_t entry;
3123 x86pte_t oldpte, newpte;
3124 page_t *pp;
3125
3126 XPV_DISALLOW_MIGRATE();
3127 ASSERT(IS_PAGEALIGNED(vaddr));
3128 ASSERT(IS_PAGEALIGNED(eaddr));
3129 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
3130 for (; vaddr < eaddr; vaddr += LEVEL_SIZE(ht->ht_level)) {
3131 try_again:
3132 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
3133 if (ht == NULL)
3134 break;
3135 if (PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOCONSIST)
3136 continue;
3137
3138 pp = page_numtopp_nolock(PTE2PFN(oldpte, ht->ht_level));
3139 if (pp == NULL)
3140 continue;
3141 x86_hm_enter(pp);
3142
3143 newpte = oldpte;
3144 /*
3145 * We found a page table entry in the desired range,
3146 * figure out the new attributes.
3147 */
3148 if (what == HAT_SET_ATTR || what == HAT_LOAD_ATTR) {
3149 if ((attr & PROT_WRITE) &&
3150 !PTE_GET(oldpte, PT_WRITABLE))
3151 newpte |= PT_WRITABLE;
3152
3153 if ((attr & HAT_NOSYNC) &&
3154 PTE_GET(oldpte, PT_SOFTWARE) < PT_NOSYNC)
3155 newpte |= PT_NOSYNC;
3156
3157 if ((attr & PROT_EXEC) && PTE_GET(oldpte, mmu.pt_nx))
3158 newpte &= ~mmu.pt_nx;
3159 }
3160
3161 if (what == HAT_LOAD_ATTR) {
3162 if (!(attr & PROT_WRITE) &&
3163 PTE_GET(oldpte, PT_WRITABLE))
3164 newpte &= ~PT_WRITABLE;
3165
3166 if (!(attr & HAT_NOSYNC) &&
3167 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
3168 newpte &= ~PT_SOFTWARE;
3169
3170 if (!(attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
3171 newpte |= mmu.pt_nx;
3172 }
3173
3174 if (what == HAT_CLR_ATTR) {
3175 if ((attr & PROT_WRITE) && PTE_GET(oldpte, PT_WRITABLE))
3176 newpte &= ~PT_WRITABLE;
3177
3178 if ((attr & HAT_NOSYNC) &&
3179 PTE_GET(oldpte, PT_SOFTWARE) >= PT_NOSYNC)
3180 newpte &= ~PT_SOFTWARE;
3181
3182 if ((attr & PROT_EXEC) && !PTE_GET(oldpte, mmu.pt_nx))
3183 newpte |= mmu.pt_nx;
3184 }
3185
3186 /*
3187 * Ensure NOSYNC/NOCONSIST mappings have REF and MOD set.
3188 * x86pte_set() depends on this.
3189 */
3190 if (PTE_GET(newpte, PT_SOFTWARE) >= PT_NOSYNC)
3191 newpte |= PT_REF | PT_MOD;
3192
3193 /*
3194 * what about PROT_READ or others? this code only handles:
3195 * EXEC, WRITE, NOSYNC
3196 */
3197
3198 /*
3199 * If new PTE really changed, update the table.
3200 */
3201 if (newpte != oldpte) {
3202 entry = htable_va2entry(vaddr, ht);
3203 oldpte = hati_update_pte(ht, entry, oldpte, newpte);
3204 if (oldpte != 0) {
3205 x86_hm_exit(pp);
3206 goto try_again;
3207 }
3208 }
3209 x86_hm_exit(pp);
3210 }
3211 if (ht)
3212 htable_release(ht);
3213 XPV_ALLOW_MIGRATE();
3214 }
3215
3216 /*
3217 * Various wrappers for hat_updateattr()
3218 */
3219 void
hat_setattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)3220 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
3221 {
3222 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
3223 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
3224 }
3225
3226 void
hat_clrattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)3227 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
3228 {
3229 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
3230 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
3231 }
3232
3233 void
hat_chgattr(hat_t * hat,caddr_t addr,size_t len,uint_t attr)3234 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
3235 {
3236 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
3237 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
3238 }
3239
3240 void
hat_chgprot(hat_t * hat,caddr_t addr,size_t len,uint_t vprot)3241 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
3242 {
3243 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
3244 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
3245 }
3246
3247 /*
3248 * size_t hat_getpagesize(hat, addr)
3249 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
3250 * no mapping. This is an advisory call.
3251 */
3252 ssize_t
hat_getpagesize(hat_t * hat,caddr_t addr)3253 hat_getpagesize(hat_t *hat, caddr_t addr)
3254 {
3255 uintptr_t vaddr = ALIGN2PAGE(addr);
3256 htable_t *ht;
3257 size_t pagesize;
3258
3259 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
3260 if (IN_VA_HOLE(vaddr))
3261 return (-1);
3262 ht = htable_getpage(hat, vaddr, NULL);
3263 if (ht == NULL)
3264 return (-1);
3265 pagesize = LEVEL_SIZE(ht->ht_level);
3266 htable_release(ht);
3267 return (pagesize);
3268 }
3269
3270
3271
3272 /*
3273 * pfn_t hat_getpfnum(hat, addr)
3274 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
3275 */
3276 pfn_t
hat_getpfnum(hat_t * hat,caddr_t addr)3277 hat_getpfnum(hat_t *hat, caddr_t addr)
3278 {
3279 uintptr_t vaddr = ALIGN2PAGE(addr);
3280 htable_t *ht;
3281 uint_t entry;
3282 pfn_t pfn = PFN_INVALID;
3283
3284 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
3285 if (khat_running == 0)
3286 return (PFN_INVALID);
3287
3288 if (IN_VA_HOLE(vaddr))
3289 return (PFN_INVALID);
3290
3291 XPV_DISALLOW_MIGRATE();
3292 /*
3293 * A very common use of hat_getpfnum() is from the DDI for kernel pages.
3294 * Use the kmap_ptes (which also covers the 32 bit heap) to speed
3295 * this up.
3296 */
3297 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
3298 x86pte_t pte;
3299 pgcnt_t pg_index;
3300
3301 pg_index = mmu_btop(vaddr - mmu.kmap_addr);
3302 pte = GET_PTE(PT_INDEX_PTR(mmu.kmap_ptes, pg_index));
3303 if (PTE_ISVALID(pte))
3304 /*LINTED [use of constant 0 causes a lint warning] */
3305 pfn = PTE2PFN(pte, 0);
3306 XPV_ALLOW_MIGRATE();
3307 return (pfn);
3308 }
3309
3310 ht = htable_getpage(hat, vaddr, &entry);
3311 if (ht == NULL) {
3312 XPV_ALLOW_MIGRATE();
3313 return (PFN_INVALID);
3314 }
3315 ASSERT(vaddr >= ht->ht_vaddr);
3316 ASSERT(vaddr <= HTABLE_LAST_PAGE(ht));
3317 pfn = PTE2PFN(x86pte_get(ht, entry), ht->ht_level);
3318 if (ht->ht_level > 0)
3319 pfn += mmu_btop(vaddr & LEVEL_OFFSET(ht->ht_level));
3320 htable_release(ht);
3321 XPV_ALLOW_MIGRATE();
3322 return (pfn);
3323 }
3324
3325 /*
3326 * int hat_probe(hat, addr)
3327 * return 0 if no valid mapping is present. Faster version
3328 * of hat_getattr in certain architectures.
3329 */
3330 int
hat_probe(hat_t * hat,caddr_t addr)3331 hat_probe(hat_t *hat, caddr_t addr)
3332 {
3333 uintptr_t vaddr = ALIGN2PAGE(addr);
3334 uint_t entry;
3335 htable_t *ht;
3336 pgcnt_t pg_off;
3337
3338 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
3339 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
3340 if (IN_VA_HOLE(vaddr))
3341 return (0);
3342
3343 /*
3344 * Most common use of hat_probe is from segmap. We special case it
3345 * for performance.
3346 */
3347 if (mmu.kmap_addr <= vaddr && vaddr < mmu.kmap_eaddr) {
3348 pg_off = mmu_btop(vaddr - mmu.kmap_addr);
3349 if (mmu.pae_hat)
3350 return (PTE_ISVALID(mmu.kmap_ptes[pg_off]));
3351 else
3352 return (PTE_ISVALID(
3353 ((x86pte32_t *)mmu.kmap_ptes)[pg_off]));
3354 }
3355
3356 ht = htable_getpage(hat, vaddr, &entry);
3357 htable_release(ht);
3358 return (ht != NULL);
3359 }
3360
3361 /*
3362 * Find out if the segment for hat_share()/hat_unshare() is DISM or locked ISM.
3363 */
3364 static int
is_it_dism(hat_t * hat,caddr_t va)3365 is_it_dism(hat_t *hat, caddr_t va)
3366 {
3367 struct seg *seg;
3368 struct shm_data *shmd;
3369 struct spt_data *sptd;
3370
3371 seg = as_findseg(hat->hat_as, va, 0);
3372 ASSERT(seg != NULL);
3373 ASSERT(seg->s_base <= va);
3374 shmd = (struct shm_data *)seg->s_data;
3375 ASSERT(shmd != NULL);
3376 sptd = (struct spt_data *)shmd->shm_sptseg->s_data;
3377 ASSERT(sptd != NULL);
3378 if (sptd->spt_flags & SHM_PAGEABLE)
3379 return (1);
3380 return (0);
3381 }
3382
3383 /*
3384 * Simple implementation of ISM. hat_share() is similar to hat_memload_array(),
3385 * except that we use the ism_hat's existing mappings to determine the pages
3386 * and protections to use for this hat. If we find a full properly aligned
3387 * and sized pagetable, we will attempt to share the pagetable itself.
3388 */
3389 /*ARGSUSED*/
3390 int
hat_share(hat_t * hat,caddr_t addr,hat_t * ism_hat,caddr_t src_addr,size_t len,uint_t ismszc)3391 hat_share(
3392 hat_t *hat,
3393 caddr_t addr,
3394 hat_t *ism_hat,
3395 caddr_t src_addr,
3396 size_t len, /* almost useless value, see below.. */
3397 uint_t ismszc)
3398 {
3399 uintptr_t vaddr_start = (uintptr_t)addr;
3400 uintptr_t vaddr;
3401 uintptr_t eaddr = vaddr_start + len;
3402 uintptr_t ism_addr_start = (uintptr_t)src_addr;
3403 uintptr_t ism_addr = ism_addr_start;
3404 uintptr_t e_ism_addr = ism_addr + len;
3405 htable_t *ism_ht = NULL;
3406 htable_t *ht;
3407 x86pte_t pte;
3408 page_t *pp;
3409 pfn_t pfn;
3410 level_t l;
3411 pgcnt_t pgcnt;
3412 uint_t prot;
3413 int is_dism;
3414 int flags;
3415
3416 /*
3417 * We might be asked to share an empty DISM hat by as_dup()
3418 */
3419 ASSERT(hat != kas.a_hat);
3420 ASSERT(eaddr <= _userlimit);
3421 if (!(ism_hat->hat_flags & HAT_SHARED)) {
3422 ASSERT(hat_get_mapped_size(ism_hat) == 0);
3423 return (0);
3424 }
3425 XPV_DISALLOW_MIGRATE();
3426
3427 /*
3428 * The SPT segment driver often passes us a size larger than there are
3429 * valid mappings. That's because it rounds the segment size up to a
3430 * large pagesize, even if the actual memory mapped by ism_hat is less.
3431 */
3432 ASSERT(IS_PAGEALIGNED(vaddr_start));
3433 ASSERT(IS_PAGEALIGNED(ism_addr_start));
3434 ASSERT(ism_hat->hat_flags & HAT_SHARED);
3435 is_dism = is_it_dism(hat, addr);
3436 while (ism_addr < e_ism_addr) {
3437 /*
3438 * use htable_walk to get the next valid ISM mapping
3439 */
3440 pte = htable_walk(ism_hat, &ism_ht, &ism_addr, e_ism_addr);
3441 if (ism_ht == NULL)
3442 break;
3443
3444 /*
3445 * First check to see if we already share the page table.
3446 */
3447 l = ism_ht->ht_level;
3448 vaddr = vaddr_start + (ism_addr - ism_addr_start);
3449 ht = htable_lookup(hat, vaddr, l);
3450 if (ht != NULL) {
3451 if (ht->ht_flags & HTABLE_SHARED_PFN)
3452 goto shared;
3453 htable_release(ht);
3454 goto not_shared;
3455 }
3456
3457 /*
3458 * Can't ever share top table.
3459 */
3460 if (l == mmu.max_level)
3461 goto not_shared;
3462
3463 /*
3464 * Avoid level mismatches later due to DISM faults.
3465 */
3466 if (is_dism && l > 0)
3467 goto not_shared;
3468
3469 /*
3470 * addresses and lengths must align
3471 * table must be fully populated
3472 * no lower level page tables
3473 */
3474 if (ism_addr != ism_ht->ht_vaddr ||
3475 (vaddr & LEVEL_OFFSET(l + 1)) != 0)
3476 goto not_shared;
3477
3478 /*
3479 * The range of address space must cover a full table.
3480 */
3481 if (e_ism_addr - ism_addr < LEVEL_SIZE(l + 1))
3482 goto not_shared;
3483
3484 /*
3485 * All entries in the ISM page table must be leaf PTEs.
3486 */
3487 if (l > 0) {
3488 int e;
3489
3490 /*
3491 * We know the 0th is from htable_walk() above.
3492 */
3493 for (e = 1; e < HTABLE_NUM_PTES(ism_ht); ++e) {
3494 x86pte_t pte;
3495 pte = x86pte_get(ism_ht, e);
3496 if (!PTE_ISPAGE(pte, l))
3497 goto not_shared;
3498 }
3499 }
3500
3501 /*
3502 * share the page table
3503 */
3504 ht = htable_create(hat, vaddr, l, ism_ht);
3505 shared:
3506 ASSERT(ht->ht_flags & HTABLE_SHARED_PFN);
3507 ASSERT(ht->ht_shares == ism_ht);
3508 hat->hat_ism_pgcnt +=
3509 (ism_ht->ht_valid_cnt - ht->ht_valid_cnt) <<
3510 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3511 ht->ht_valid_cnt = ism_ht->ht_valid_cnt;
3512 htable_release(ht);
3513 ism_addr = ism_ht->ht_vaddr + LEVEL_SIZE(l + 1);
3514 htable_release(ism_ht);
3515 ism_ht = NULL;
3516 continue;
3517
3518 not_shared:
3519 /*
3520 * Unable to share the page table. Instead we will
3521 * create new mappings from the values in the ISM mappings.
3522 * Figure out what level size mappings to use;
3523 */
3524 for (l = ism_ht->ht_level; l > 0; --l) {
3525 if (LEVEL_SIZE(l) <= eaddr - vaddr &&
3526 (vaddr & LEVEL_OFFSET(l)) == 0)
3527 break;
3528 }
3529
3530 /*
3531 * The ISM mapping might be larger than the share area,
3532 * be careful to truncate it if needed.
3533 */
3534 if (eaddr - vaddr >= LEVEL_SIZE(ism_ht->ht_level)) {
3535 pgcnt = mmu_btop(LEVEL_SIZE(ism_ht->ht_level));
3536 } else {
3537 pgcnt = mmu_btop(eaddr - vaddr);
3538 l = 0;
3539 }
3540
3541 pfn = PTE2PFN(pte, ism_ht->ht_level);
3542 ASSERT(pfn != PFN_INVALID);
3543 while (pgcnt > 0) {
3544 /*
3545 * Make a new pte for the PFN for this level.
3546 * Copy protections for the pte from the ISM pte.
3547 */
3548 pp = page_numtopp_nolock(pfn);
3549 ASSERT(pp != NULL);
3550
3551 prot = PROT_USER | PROT_READ | HAT_UNORDERED_OK;
3552 if (PTE_GET(pte, PT_WRITABLE))
3553 prot |= PROT_WRITE;
3554 if (!PTE_GET(pte, PT_NX))
3555 prot |= PROT_EXEC;
3556
3557 flags = HAT_LOAD;
3558 if (!is_dism)
3559 flags |= HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST;
3560 while (hati_load_common(hat, vaddr, pp, prot, flags,
3561 l, pfn) != 0) {
3562 if (l == 0)
3563 panic("hati_load_common() failure");
3564 --l;
3565 }
3566
3567 vaddr += LEVEL_SIZE(l);
3568 ism_addr += LEVEL_SIZE(l);
3569 pfn += mmu_btop(LEVEL_SIZE(l));
3570 pgcnt -= mmu_btop(LEVEL_SIZE(l));
3571 }
3572 }
3573 if (ism_ht != NULL)
3574 htable_release(ism_ht);
3575 XPV_ALLOW_MIGRATE();
3576 return (0);
3577 }
3578
3579
3580 /*
3581 * hat_unshare() is similar to hat_unload_callback(), but
3582 * we have to look for empty shared pagetables. Note that
3583 * hat_unshare() is always invoked against an entire segment.
3584 */
3585 /*ARGSUSED*/
3586 void
hat_unshare(hat_t * hat,caddr_t addr,size_t len,uint_t ismszc)3587 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3588 {
3589 uint64_t vaddr = (uintptr_t)addr;
3590 uintptr_t eaddr = vaddr + len;
3591 htable_t *ht = NULL;
3592 uint_t need_demaps = 0;
3593 int flags = HAT_UNLOAD_UNMAP;
3594 level_t l;
3595
3596 ASSERT(hat != kas.a_hat);
3597 ASSERT(eaddr <= _userlimit);
3598 ASSERT(IS_PAGEALIGNED(vaddr));
3599 ASSERT(IS_PAGEALIGNED(eaddr));
3600 XPV_DISALLOW_MIGRATE();
3601
3602 /*
3603 * First go through and remove any shared pagetables.
3604 *
3605 * Note that it's ok to delay the TLB shootdown till the entire range is
3606 * finished, because if hat_pageunload() were to unload a shared
3607 * pagetable page, its hat_tlb_inval() will do a global TLB invalidate.
3608 */
3609 l = mmu.max_page_level;
3610 if (l == mmu.max_level)
3611 --l;
3612 for (; l >= 0; --l) {
3613 for (vaddr = (uintptr_t)addr; vaddr < eaddr;
3614 vaddr = (vaddr & LEVEL_MASK(l + 1)) + LEVEL_SIZE(l + 1)) {
3615 ASSERT(!IN_VA_HOLE(vaddr));
3616 /*
3617 * find a pagetable that maps the current address
3618 */
3619 ht = htable_lookup(hat, vaddr, l);
3620 if (ht == NULL)
3621 continue;
3622 if (ht->ht_flags & HTABLE_SHARED_PFN) {
3623 /*
3624 * clear page count, set valid_cnt to 0,
3625 * let htable_release() finish the job
3626 */
3627 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3628 (LEVEL_SHIFT(ht->ht_level) - MMU_PAGESHIFT);
3629 ht->ht_valid_cnt = 0;
3630 need_demaps = 1;
3631 }
3632 htable_release(ht);
3633 }
3634 }
3635
3636 /*
3637 * flush the TLBs - since we're probably dealing with MANY mappings
3638 * we just do a full invalidation.
3639 */
3640 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3641 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3642
3643 /*
3644 * Now go back and clean up any unaligned mappings that
3645 * couldn't share pagetables.
3646 */
3647 if (!is_it_dism(hat, addr))
3648 flags |= HAT_UNLOAD_UNLOCK;
3649 hat_unload(hat, addr, len, flags);
3650 XPV_ALLOW_MIGRATE();
3651 }
3652
3653
3654 /*
3655 * hat_reserve() does nothing
3656 */
3657 /*ARGSUSED*/
3658 void
hat_reserve(struct as * as,caddr_t addr,size_t len)3659 hat_reserve(struct as *as, caddr_t addr, size_t len)
3660 {
3661 }
3662
3663
3664 /*
3665 * Called when all mappings to a page should have write permission removed.
3666 * Mostly stolen from hat_pagesync()
3667 */
3668 static void
hati_page_clrwrt(struct page * pp)3669 hati_page_clrwrt(struct page *pp)
3670 {
3671 hment_t *hm = NULL;
3672 htable_t *ht;
3673 uint_t entry;
3674 x86pte_t old;
3675 x86pte_t new;
3676 uint_t pszc = 0;
3677
3678 XPV_DISALLOW_MIGRATE();
3679 next_size:
3680 /*
3681 * walk thru the mapping list clearing write permission
3682 */
3683 x86_hm_enter(pp);
3684 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
3685 if (ht->ht_level < pszc)
3686 continue;
3687 old = x86pte_get(ht, entry);
3688
3689 for (;;) {
3690 /*
3691 * Is this mapping of interest?
3692 */
3693 if (PTE2PFN(old, ht->ht_level) != pp->p_pagenum ||
3694 PTE_GET(old, PT_WRITABLE) == 0)
3695 break;
3696
3697 /*
3698 * Clear ref/mod writable bits. This requires cross
3699 * calls to ensure any executing TLBs see cleared bits.
3700 */
3701 new = old;
3702 PTE_CLR(new, PT_REF | PT_MOD | PT_WRITABLE);
3703 old = hati_update_pte(ht, entry, old, new);
3704 if (old != 0)
3705 continue;
3706
3707 break;
3708 }
3709 }
3710 x86_hm_exit(pp);
3711 while (pszc < pp->p_szc) {
3712 page_t *tpp;
3713 pszc++;
3714 tpp = PP_GROUPLEADER(pp, pszc);
3715 if (pp != tpp) {
3716 pp = tpp;
3717 goto next_size;
3718 }
3719 }
3720 XPV_ALLOW_MIGRATE();
3721 }
3722
3723 /*
3724 * void hat_page_setattr(pp, flag)
3725 * void hat_page_clrattr(pp, flag)
3726 * used to set/clr ref/mod bits.
3727 */
3728 void
hat_page_setattr(struct page * pp,uint_t flag)3729 hat_page_setattr(struct page *pp, uint_t flag)
3730 {
3731 vnode_t *vp = pp->p_vnode;
3732 kmutex_t *vphm = NULL;
3733 page_t **listp;
3734 int noshuffle;
3735
3736 noshuffle = flag & P_NSH;
3737 flag &= ~P_NSH;
3738
3739 if (PP_GETRM(pp, flag) == flag)
3740 return;
3741
3742 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp) &&
3743 !noshuffle) {
3744 vphm = page_vnode_mutex(vp);
3745 mutex_enter(vphm);
3746 }
3747
3748 PP_SETRM(pp, flag);
3749
3750 if (vphm != NULL) {
3751
3752 /*
3753 * Some File Systems examine v_pages for NULL w/o
3754 * grabbing the vphm mutex. Must not let it become NULL when
3755 * pp is the only page on the list.
3756 */
3757 if (pp->p_vpnext != pp) {
3758 page_vpsub(&vp->v_pages, pp);
3759 if (vp->v_pages != NULL)
3760 listp = &vp->v_pages->p_vpprev->p_vpnext;
3761 else
3762 listp = &vp->v_pages;
3763 page_vpadd(listp, pp);
3764 }
3765 mutex_exit(vphm);
3766 }
3767 }
3768
3769 void
hat_page_clrattr(struct page * pp,uint_t flag)3770 hat_page_clrattr(struct page *pp, uint_t flag)
3771 {
3772 vnode_t *vp = pp->p_vnode;
3773 ASSERT(!(flag & ~(P_MOD | P_REF | P_RO)));
3774
3775 /*
3776 * Caller is expected to hold page's io lock for VMODSORT to work
3777 * correctly with pvn_vplist_dirty() and pvn_getdirty() when mod
3778 * bit is cleared.
3779 * We don't have assert to avoid tripping some existing third party
3780 * code. The dirty page is moved back to top of the v_page list
3781 * after IO is done in pvn_write_done().
3782 */
3783 PP_CLRRM(pp, flag);
3784
3785 if ((flag & P_MOD) != 0 && vp != NULL && IS_VMODSORT(vp)) {
3786
3787 /*
3788 * VMODSORT works by removing write permissions and getting
3789 * a fault when a page is made dirty. At this point
3790 * we need to remove write permission from all mappings
3791 * to this page.
3792 */
3793 hati_page_clrwrt(pp);
3794 }
3795 }
3796
3797 /*
3798 * If flag is specified, returns 0 if attribute is disabled
3799 * and non zero if enabled. If flag specifes multiple attributes
3800 * then returns 0 if ALL attributes are disabled. This is an advisory
3801 * call.
3802 */
3803 uint_t
hat_page_getattr(struct page * pp,uint_t flag)3804 hat_page_getattr(struct page *pp, uint_t flag)
3805 {
3806 return (PP_GETRM(pp, flag));
3807 }
3808
3809
3810 /*
3811 * common code used by hat_pageunload() and hment_steal()
3812 */
3813 hment_t *
hati_page_unmap(page_t * pp,htable_t * ht,uint_t entry)3814 hati_page_unmap(page_t *pp, htable_t *ht, uint_t entry)
3815 {
3816 x86pte_t old_pte;
3817 pfn_t pfn = pp->p_pagenum;
3818 hment_t *hm;
3819
3820 /*
3821 * We need to acquire a hold on the htable in order to
3822 * do the invalidate. We know the htable must exist, since
3823 * unmap's don't release the htable until after removing any
3824 * hment. Having x86_hm_enter() keeps that from proceeding.
3825 */
3826 htable_acquire(ht);
3827
3828 /*
3829 * Invalidate the PTE and remove the hment.
3830 */
3831 old_pte = x86pte_inval(ht, entry, 0, NULL, B_TRUE);
3832 if (PTE2PFN(old_pte, ht->ht_level) != pfn) {
3833 panic("x86pte_inval() failure found PTE = " FMT_PTE
3834 " pfn being unmapped is %lx ht=0x%lx entry=0x%x",
3835 old_pte, pfn, (uintptr_t)ht, entry);
3836 }
3837
3838 /*
3839 * Clean up all the htable information for this mapping
3840 */
3841 ASSERT(ht->ht_valid_cnt > 0);
3842 HTABLE_DEC(ht->ht_valid_cnt);
3843 PGCNT_DEC(ht->ht_hat, ht->ht_level);
3844
3845 /*
3846 * sync ref/mod bits to the page_t
3847 */
3848 if (PTE_GET(old_pte, PT_SOFTWARE) < PT_NOSYNC)
3849 hati_sync_pte_to_page(pp, old_pte, ht->ht_level);
3850
3851 /*
3852 * Remove the mapping list entry for this page.
3853 */
3854 hm = hment_remove(pp, ht, entry);
3855
3856 /*
3857 * drop the mapping list lock so that we might free the
3858 * hment and htable.
3859 */
3860 x86_hm_exit(pp);
3861 htable_release(ht);
3862 return (hm);
3863 }
3864
3865 extern int vpm_enable;
3866 /*
3867 * Unload all translations to a page. If the page is a subpage of a large
3868 * page, the large page mappings are also removed.
3869 *
3870 * The forceflags are unused.
3871 */
3872
3873 /*ARGSUSED*/
3874 static int
hati_pageunload(struct page * pp,uint_t pg_szcd,uint_t forceflag)3875 hati_pageunload(struct page *pp, uint_t pg_szcd, uint_t forceflag)
3876 {
3877 page_t *cur_pp = pp;
3878 hment_t *hm;
3879 hment_t *prev;
3880 htable_t *ht;
3881 uint_t entry;
3882 level_t level;
3883
3884 XPV_DISALLOW_MIGRATE();
3885
3886 /*
3887 * prevent recursion due to kmem_free()
3888 */
3889 ++curthread->t_hatdepth;
3890 ASSERT(curthread->t_hatdepth < 16);
3891
3892 /*
3893 * clear the vpm ref.
3894 */
3895 if (vpm_enable) {
3896 pp->p_vpmref = 0;
3897 }
3898 /*
3899 * The loop with next_size handles pages with multiple pagesize mappings
3900 */
3901 next_size:
3902 for (;;) {
3903
3904 /*
3905 * Get a mapping list entry
3906 */
3907 x86_hm_enter(cur_pp);
3908 for (prev = NULL; ; prev = hm) {
3909 hm = hment_walk(cur_pp, &ht, &entry, prev);
3910 if (hm == NULL) {
3911 x86_hm_exit(cur_pp);
3912
3913 /*
3914 * If not part of a larger page, we're done.
3915 */
3916 if (cur_pp->p_szc <= pg_szcd) {
3917 ASSERT(curthread->t_hatdepth > 0);
3918 --curthread->t_hatdepth;
3919 XPV_ALLOW_MIGRATE();
3920 return (0);
3921 }
3922
3923 /*
3924 * Else check the next larger page size.
3925 * hat_page_demote() may decrease p_szc
3926 * but that's ok we'll just take an extra
3927 * trip discover there're no larger mappings
3928 * and return.
3929 */
3930 ++pg_szcd;
3931 cur_pp = PP_GROUPLEADER(cur_pp, pg_szcd);
3932 goto next_size;
3933 }
3934
3935 /*
3936 * If this mapping size matches, remove it.
3937 */
3938 level = ht->ht_level;
3939 if (level == pg_szcd)
3940 break;
3941 }
3942
3943 /*
3944 * Remove the mapping list entry for this page.
3945 * Note this does the x86_hm_exit() for us.
3946 */
3947 hm = hati_page_unmap(cur_pp, ht, entry);
3948 if (hm != NULL)
3949 hment_free(hm);
3950 }
3951 }
3952
3953 int
hat_pageunload(struct page * pp,uint_t forceflag)3954 hat_pageunload(struct page *pp, uint_t forceflag)
3955 {
3956 ASSERT(PAGE_EXCL(pp));
3957 return (hati_pageunload(pp, 0, forceflag));
3958 }
3959
3960 /*
3961 * Unload all large mappings to pp and reduce by 1 p_szc field of every large
3962 * page level that included pp.
3963 *
3964 * pp must be locked EXCL. Even though no other constituent pages are locked
3965 * it's legal to unload large mappings to pp because all constituent pages of
3966 * large locked mappings have to be locked SHARED. therefore if we have EXCL
3967 * lock on one of constituent pages none of the large mappings to pp are
3968 * locked.
3969 *
3970 * Change (always decrease) p_szc field starting from the last constituent
3971 * page and ending with root constituent page so that root's pszc always shows
3972 * the area where hat_page_demote() may be active.
3973 *
3974 * This mechanism is only used for file system pages where it's not always
3975 * possible to get EXCL locks on all constituent pages to demote the size code
3976 * (as is done for anonymous or kernel large pages).
3977 */
3978 void
hat_page_demote(page_t * pp)3979 hat_page_demote(page_t *pp)
3980 {
3981 uint_t pszc;
3982 uint_t rszc;
3983 uint_t szc;
3984 page_t *rootpp;
3985 page_t *firstpp;
3986 page_t *lastpp;
3987 pgcnt_t pgcnt;
3988
3989 ASSERT(PAGE_EXCL(pp));
3990 ASSERT(!PP_ISFREE(pp));
3991 ASSERT(page_szc_lock_assert(pp));
3992
3993 if (pp->p_szc == 0)
3994 return;
3995
3996 rootpp = PP_GROUPLEADER(pp, 1);
3997 (void) hati_pageunload(rootpp, 1, HAT_FORCE_PGUNLOAD);
3998
3999 /*
4000 * all large mappings to pp are gone
4001 * and no new can be setup since pp is locked exclusively.
4002 *
4003 * Lock the root to make sure there's only one hat_page_demote()
4004 * outstanding within the area of this root's pszc.
4005 *
4006 * Second potential hat_page_demote() is already eliminated by upper
4007 * VM layer via page_szc_lock() but we don't rely on it and use our
4008 * own locking (so that upper layer locking can be changed without
4009 * assumptions that hat depends on upper layer VM to prevent multiple
4010 * hat_page_demote() to be issued simultaneously to the same large
4011 * page).
4012 */
4013 again:
4014 pszc = pp->p_szc;
4015 if (pszc == 0)
4016 return;
4017 rootpp = PP_GROUPLEADER(pp, pszc);
4018 x86_hm_enter(rootpp);
4019 /*
4020 * If root's p_szc is different from pszc we raced with another
4021 * hat_page_demote(). Drop the lock and try to find the root again.
4022 * If root's p_szc is greater than pszc previous hat_page_demote() is
4023 * not done yet. Take and release mlist lock of root's root to wait
4024 * for previous hat_page_demote() to complete.
4025 */
4026 if ((rszc = rootpp->p_szc) != pszc) {
4027 x86_hm_exit(rootpp);
4028 if (rszc > pszc) {
4029 /* p_szc of a locked non free page can't increase */
4030 ASSERT(pp != rootpp);
4031
4032 rootpp = PP_GROUPLEADER(rootpp, rszc);
4033 x86_hm_enter(rootpp);
4034 x86_hm_exit(rootpp);
4035 }
4036 goto again;
4037 }
4038 ASSERT(pp->p_szc == pszc);
4039
4040 /*
4041 * Decrement by 1 p_szc of every constituent page of a region that
4042 * covered pp. For example if original szc is 3 it gets changed to 2
4043 * everywhere except in region 2 that covered pp. Region 2 that
4044 * covered pp gets demoted to 1 everywhere except in region 1 that
4045 * covered pp. The region 1 that covered pp is demoted to region
4046 * 0. It's done this way because from region 3 we removed level 3
4047 * mappings, from region 2 that covered pp we removed level 2 mappings
4048 * and from region 1 that covered pp we removed level 1 mappings. All
4049 * changes are done from from high pfn's to low pfn's so that roots
4050 * are changed last allowing one to know the largest region where
4051 * hat_page_demote() is stil active by only looking at the root page.
4052 *
4053 * This algorithm is implemented in 2 while loops. First loop changes
4054 * p_szc of pages to the right of pp's level 1 region and second
4055 * loop changes p_szc of pages of level 1 region that covers pp
4056 * and all pages to the left of level 1 region that covers pp.
4057 * In the first loop p_szc keeps dropping with every iteration
4058 * and in the second loop it keeps increasing with every iteration.
4059 *
4060 * First loop description: Demote pages to the right of pp outside of
4061 * level 1 region that covers pp. In every iteration of the while
4062 * loop below find the last page of szc region and the first page of
4063 * (szc - 1) region that is immediately to the right of (szc - 1)
4064 * region that covers pp. From last such page to first such page
4065 * change every page's szc to szc - 1. Decrement szc and continue
4066 * looping until szc is 1. If pp belongs to the last (szc - 1) region
4067 * of szc region skip to the next iteration.
4068 */
4069 szc = pszc;
4070 while (szc > 1) {
4071 lastpp = PP_GROUPLEADER(pp, szc);
4072 pgcnt = page_get_pagecnt(szc);
4073 lastpp += pgcnt - 1;
4074 firstpp = PP_GROUPLEADER(pp, (szc - 1));
4075 pgcnt = page_get_pagecnt(szc - 1);
4076 if (lastpp - firstpp < pgcnt) {
4077 szc--;
4078 continue;
4079 }
4080 firstpp += pgcnt;
4081 while (lastpp != firstpp) {
4082 ASSERT(lastpp->p_szc == pszc);
4083 lastpp->p_szc = szc - 1;
4084 lastpp--;
4085 }
4086 firstpp->p_szc = szc - 1;
4087 szc--;
4088 }
4089
4090 /*
4091 * Second loop description:
4092 * First iteration changes p_szc to 0 of every
4093 * page of level 1 region that covers pp.
4094 * Subsequent iterations find last page of szc region
4095 * immediately to the left of szc region that covered pp
4096 * and first page of (szc + 1) region that covers pp.
4097 * From last to first page change p_szc of every page to szc.
4098 * Increment szc and continue looping until szc is pszc.
4099 * If pp belongs to the fist szc region of (szc + 1) region
4100 * skip to the next iteration.
4101 *
4102 */
4103 szc = 0;
4104 while (szc < pszc) {
4105 firstpp = PP_GROUPLEADER(pp, (szc + 1));
4106 if (szc == 0) {
4107 pgcnt = page_get_pagecnt(1);
4108 lastpp = firstpp + (pgcnt - 1);
4109 } else {
4110 lastpp = PP_GROUPLEADER(pp, szc);
4111 if (firstpp == lastpp) {
4112 szc++;
4113 continue;
4114 }
4115 lastpp--;
4116 pgcnt = page_get_pagecnt(szc);
4117 }
4118 while (lastpp != firstpp) {
4119 ASSERT(lastpp->p_szc == pszc);
4120 lastpp->p_szc = szc;
4121 lastpp--;
4122 }
4123 firstpp->p_szc = szc;
4124 if (firstpp == rootpp)
4125 break;
4126 szc++;
4127 }
4128 x86_hm_exit(rootpp);
4129 }
4130
4131 /*
4132 * get hw stats from hardware into page struct and reset hw stats
4133 * returns attributes of page
4134 * Flags for hat_pagesync, hat_getstat, hat_sync
4135 *
4136 * define HAT_SYNC_ZERORM 0x01
4137 *
4138 * Additional flags for hat_pagesync
4139 *
4140 * define HAT_SYNC_STOPON_REF 0x02
4141 * define HAT_SYNC_STOPON_MOD 0x04
4142 * define HAT_SYNC_STOPON_RM 0x06
4143 * define HAT_SYNC_STOPON_SHARED 0x08
4144 */
4145 uint_t
hat_pagesync(struct page * pp,uint_t flags)4146 hat_pagesync(struct page *pp, uint_t flags)
4147 {
4148 hment_t *hm = NULL;
4149 htable_t *ht;
4150 uint_t entry;
4151 x86pte_t old, save_old;
4152 x86pte_t new;
4153 uchar_t nrmbits = P_REF|P_MOD|P_RO;
4154 extern ulong_t po_share;
4155 page_t *save_pp = pp;
4156 uint_t pszc = 0;
4157
4158 ASSERT(PAGE_LOCKED(pp) || panicstr);
4159
4160 if (PP_ISRO(pp) && (flags & HAT_SYNC_STOPON_MOD))
4161 return (pp->p_nrm & nrmbits);
4162
4163 if ((flags & HAT_SYNC_ZERORM) == 0) {
4164
4165 if ((flags & HAT_SYNC_STOPON_REF) != 0 && PP_ISREF(pp))
4166 return (pp->p_nrm & nrmbits);
4167
4168 if ((flags & HAT_SYNC_STOPON_MOD) != 0 && PP_ISMOD(pp))
4169 return (pp->p_nrm & nrmbits);
4170
4171 if ((flags & HAT_SYNC_STOPON_SHARED) != 0 &&
4172 hat_page_getshare(pp) > po_share) {
4173 if (PP_ISRO(pp))
4174 PP_SETREF(pp);
4175 return (pp->p_nrm & nrmbits);
4176 }
4177 }
4178
4179 XPV_DISALLOW_MIGRATE();
4180 next_size:
4181 /*
4182 * walk thru the mapping list syncing (and clearing) ref/mod bits.
4183 */
4184 x86_hm_enter(pp);
4185 while ((hm = hment_walk(pp, &ht, &entry, hm)) != NULL) {
4186 if (ht->ht_level < pszc)
4187 continue;
4188 old = x86pte_get(ht, entry);
4189 try_again:
4190
4191 ASSERT(PTE2PFN(old, ht->ht_level) == pp->p_pagenum);
4192
4193 if (PTE_GET(old, PT_REF | PT_MOD) == 0)
4194 continue;
4195
4196 save_old = old;
4197 if ((flags & HAT_SYNC_ZERORM) != 0) {
4198
4199 /*
4200 * Need to clear ref or mod bits. Need to demap
4201 * to make sure any executing TLBs see cleared bits.
4202 */
4203 new = old;
4204 PTE_CLR(new, PT_REF | PT_MOD);
4205 old = hati_update_pte(ht, entry, old, new);
4206 if (old != 0)
4207 goto try_again;
4208
4209 old = save_old;
4210 }
4211
4212 /*
4213 * Sync the PTE
4214 */
4215 if (!(flags & HAT_SYNC_ZERORM) &&
4216 PTE_GET(old, PT_SOFTWARE) <= PT_NOSYNC)
4217 hati_sync_pte_to_page(pp, old, ht->ht_level);
4218
4219 /*
4220 * can stop short if we found a ref'd or mod'd page
4221 */
4222 if (((flags & HAT_SYNC_STOPON_MOD) && PP_ISMOD(save_pp)) ||
4223 ((flags & HAT_SYNC_STOPON_REF) && PP_ISREF(save_pp))) {
4224 x86_hm_exit(pp);
4225 goto done;
4226 }
4227 }
4228 x86_hm_exit(pp);
4229 while (pszc < pp->p_szc) {
4230 page_t *tpp;
4231 pszc++;
4232 tpp = PP_GROUPLEADER(pp, pszc);
4233 if (pp != tpp) {
4234 pp = tpp;
4235 goto next_size;
4236 }
4237 }
4238 done:
4239 XPV_ALLOW_MIGRATE();
4240 return (save_pp->p_nrm & nrmbits);
4241 }
4242
4243 /*
4244 * returns approx number of mappings to this pp. A return of 0 implies
4245 * there are no mappings to the page.
4246 */
4247 ulong_t
hat_page_getshare(page_t * pp)4248 hat_page_getshare(page_t *pp)
4249 {
4250 uint_t cnt;
4251 cnt = hment_mapcnt(pp);
4252 if (vpm_enable && pp->p_vpmref) {
4253 cnt += 1;
4254 }
4255 return (cnt);
4256 }
4257
4258 /*
4259 * Return 1 the number of mappings exceeds sh_thresh. Return 0
4260 * otherwise.
4261 */
4262 int
hat_page_checkshare(page_t * pp,ulong_t sh_thresh)4263 hat_page_checkshare(page_t *pp, ulong_t sh_thresh)
4264 {
4265 return (hat_page_getshare(pp) > sh_thresh);
4266 }
4267
4268 /*
4269 * hat_softlock isn't supported anymore
4270 */
4271 /*ARGSUSED*/
4272 faultcode_t
hat_softlock(hat_t * hat,caddr_t addr,size_t * len,struct page ** page_array,uint_t flags)4273 hat_softlock(
4274 hat_t *hat,
4275 caddr_t addr,
4276 size_t *len,
4277 struct page **page_array,
4278 uint_t flags)
4279 {
4280 return (FC_NOSUPPORT);
4281 }
4282
4283
4284
4285 /*
4286 * Routine to expose supported HAT features to platform independent code.
4287 */
4288 /*ARGSUSED*/
4289 int
hat_supported(enum hat_features feature,void * arg)4290 hat_supported(enum hat_features feature, void *arg)
4291 {
4292 switch (feature) {
4293
4294 case HAT_SHARED_PT: /* this is really ISM */
4295 return (1);
4296
4297 case HAT_DYNAMIC_ISM_UNMAP:
4298 return (0);
4299
4300 case HAT_VMODSORT:
4301 return (1);
4302
4303 case HAT_SHARED_REGIONS:
4304 return (0);
4305
4306 default:
4307 panic("hat_supported() - unknown feature");
4308 }
4309 return (0);
4310 }
4311
4312 /*
4313 * Called when a thread is exiting and has been switched to the kernel AS
4314 */
4315 void
hat_thread_exit(kthread_t * thd)4316 hat_thread_exit(kthread_t *thd)
4317 {
4318 ASSERT(thd->t_procp->p_as == &kas);
4319 XPV_DISALLOW_MIGRATE();
4320 hat_switch(thd->t_procp->p_as->a_hat);
4321 XPV_ALLOW_MIGRATE();
4322 }
4323
4324 /*
4325 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
4326 */
4327 /*ARGSUSED*/
4328 void
hat_setup(hat_t * hat,int flags)4329 hat_setup(hat_t *hat, int flags)
4330 {
4331 XPV_DISALLOW_MIGRATE();
4332 kpreempt_disable();
4333
4334 hat_switch(hat);
4335
4336 kpreempt_enable();
4337 XPV_ALLOW_MIGRATE();
4338 }
4339
4340 /*
4341 * Prepare for a CPU private mapping for the given address.
4342 *
4343 * The address can only be used from a single CPU and can be remapped
4344 * using hat_mempte_remap(). Return the address of the PTE.
4345 *
4346 * We do the htable_create() if necessary and increment the valid count so
4347 * the htable can't disappear. We also hat_devload() the page table into
4348 * kernel so that the PTE is quickly accessed.
4349 */
4350 hat_mempte_t
hat_mempte_setup(caddr_t addr)4351 hat_mempte_setup(caddr_t addr)
4352 {
4353 uintptr_t va = (uintptr_t)addr;
4354 htable_t *ht;
4355 uint_t entry;
4356 x86pte_t oldpte;
4357 hat_mempte_t p;
4358
4359 ASSERT(IS_PAGEALIGNED(va));
4360 ASSERT(!IN_VA_HOLE(va));
4361 ++curthread->t_hatdepth;
4362 XPV_DISALLOW_MIGRATE();
4363 ht = htable_getpte(kas.a_hat, va, &entry, &oldpte, 0);
4364 if (ht == NULL) {
4365 ht = htable_create(kas.a_hat, va, 0, NULL);
4366 entry = htable_va2entry(va, ht);
4367 ASSERT(ht->ht_level == 0);
4368 oldpte = x86pte_get(ht, entry);
4369 }
4370 if (PTE_ISVALID(oldpte))
4371 panic("hat_mempte_setup(): address already mapped"
4372 "ht=%p, entry=%d, pte=" FMT_PTE, (void *)ht, entry, oldpte);
4373
4374 /*
4375 * increment ht_valid_cnt so that the pagetable can't disappear
4376 */
4377 HTABLE_INC(ht->ht_valid_cnt);
4378
4379 /*
4380 * return the PTE physical address to the caller.
4381 */
4382 htable_release(ht);
4383 XPV_ALLOW_MIGRATE();
4384 p = PT_INDEX_PHYSADDR(pfn_to_pa(ht->ht_pfn), entry);
4385 --curthread->t_hatdepth;
4386 return (p);
4387 }
4388
4389 /*
4390 * Release a CPU private mapping for the given address.
4391 * We decrement the htable valid count so it might be destroyed.
4392 */
4393 /*ARGSUSED1*/
4394 void
hat_mempte_release(caddr_t addr,hat_mempte_t pte_pa)4395 hat_mempte_release(caddr_t addr, hat_mempte_t pte_pa)
4396 {
4397 htable_t *ht;
4398
4399 XPV_DISALLOW_MIGRATE();
4400 /*
4401 * invalidate any left over mapping and decrement the htable valid count
4402 */
4403 #ifdef __xpv
4404 if (HYPERVISOR_update_va_mapping((uintptr_t)addr, 0,
4405 UVMF_INVLPG | UVMF_LOCAL))
4406 panic("HYPERVISOR_update_va_mapping() failed");
4407 #else
4408 {
4409 x86pte_t *pteptr;
4410
4411 pteptr = x86pte_mapin(mmu_btop(pte_pa),
4412 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
4413 if (mmu.pae_hat)
4414 *pteptr = 0;
4415 else
4416 *(x86pte32_t *)pteptr = 0;
4417 mmu_flush_tlb_kpage((uintptr_t)addr);
4418 x86pte_mapout();
4419 }
4420 #endif
4421
4422 ht = htable_getpte(kas.a_hat, ALIGN2PAGE(addr), NULL, NULL, 0);
4423 if (ht == NULL)
4424 panic("hat_mempte_release(): invalid address");
4425 ASSERT(ht->ht_level == 0);
4426 HTABLE_DEC(ht->ht_valid_cnt);
4427 htable_release(ht);
4428 XPV_ALLOW_MIGRATE();
4429 }
4430
4431 /*
4432 * Apply a temporary CPU private mapping to a page. We flush the TLB only
4433 * on this CPU, so this ought to have been called with preemption disabled.
4434 */
4435 void
hat_mempte_remap(pfn_t pfn,caddr_t addr,hat_mempte_t pte_pa,uint_t attr,uint_t flags)4436 hat_mempte_remap(
4437 pfn_t pfn,
4438 caddr_t addr,
4439 hat_mempte_t pte_pa,
4440 uint_t attr,
4441 uint_t flags)
4442 {
4443 uintptr_t va = (uintptr_t)addr;
4444 x86pte_t pte;
4445
4446 /*
4447 * Remap the given PTE to the new page's PFN. Invalidate only
4448 * on this CPU.
4449 */
4450 #ifdef DEBUG
4451 htable_t *ht;
4452 uint_t entry;
4453
4454 ASSERT(IS_PAGEALIGNED(va));
4455 ASSERT(!IN_VA_HOLE(va));
4456 ht = htable_getpte(kas.a_hat, va, &entry, NULL, 0);
4457 ASSERT(ht != NULL);
4458 ASSERT(ht->ht_level == 0);
4459 ASSERT(ht->ht_valid_cnt > 0);
4460 ASSERT(ht->ht_pfn == mmu_btop(pte_pa));
4461 htable_release(ht);
4462 #endif
4463 XPV_DISALLOW_MIGRATE();
4464 pte = hati_mkpte(pfn, attr, 0, flags);
4465 #ifdef __xpv
4466 if (HYPERVISOR_update_va_mapping(va, pte, UVMF_INVLPG | UVMF_LOCAL))
4467 panic("HYPERVISOR_update_va_mapping() failed");
4468 #else
4469 {
4470 x86pte_t *pteptr;
4471
4472 pteptr = x86pte_mapin(mmu_btop(pte_pa),
4473 (pte_pa & MMU_PAGEOFFSET) >> mmu.pte_size_shift, NULL);
4474 if (mmu.pae_hat)
4475 *(x86pte_t *)pteptr = pte;
4476 else
4477 *(x86pte32_t *)pteptr = (x86pte32_t)pte;
4478 mmu_flush_tlb_kpage((uintptr_t)addr);
4479 x86pte_mapout();
4480 }
4481 #endif
4482 XPV_ALLOW_MIGRATE();
4483 }
4484
4485
4486
4487 /*
4488 * Hat locking functions
4489 * XXX - these two functions are currently being used by hatstats
4490 * they can be removed by using a per-as mutex for hatstats.
4491 */
4492 void
hat_enter(hat_t * hat)4493 hat_enter(hat_t *hat)
4494 {
4495 mutex_enter(&hat->hat_mutex);
4496 }
4497
4498 void
hat_exit(hat_t * hat)4499 hat_exit(hat_t *hat)
4500 {
4501 mutex_exit(&hat->hat_mutex);
4502 }
4503
4504 /*
4505 * HAT part of cpu initialization.
4506 */
4507 void
hat_cpu_online(struct cpu * cpup)4508 hat_cpu_online(struct cpu *cpup)
4509 {
4510 if (cpup != CPU) {
4511 x86pte_cpu_init(cpup);
4512 hat_pcp_setup(cpup);
4513 }
4514 CPUSET_ATOMIC_ADD(khat_cpuset, cpup->cpu_id);
4515 }
4516
4517 /*
4518 * HAT part of cpu deletion.
4519 * (currently, we only call this after the cpu is safely passivated.)
4520 */
4521 void
hat_cpu_offline(struct cpu * cpup)4522 hat_cpu_offline(struct cpu *cpup)
4523 {
4524 ASSERT(cpup != CPU);
4525
4526 CPUSET_ATOMIC_DEL(khat_cpuset, cpup->cpu_id);
4527 hat_pcp_teardown(cpup);
4528 x86pte_cpu_fini(cpup);
4529 }
4530
4531 /*
4532 * Function called after all CPUs are brought online.
4533 * Used to remove low address boot mappings.
4534 */
4535 void
clear_boot_mappings(uintptr_t low,uintptr_t high)4536 clear_boot_mappings(uintptr_t low, uintptr_t high)
4537 {
4538 uintptr_t vaddr = low;
4539 htable_t *ht = NULL;
4540 level_t level;
4541 uint_t entry;
4542 x86pte_t pte;
4543
4544 /*
4545 * On 1st CPU we can unload the prom mappings, basically we blow away
4546 * all virtual mappings under _userlimit.
4547 */
4548 while (vaddr < high) {
4549 pte = htable_walk(kas.a_hat, &ht, &vaddr, high);
4550 if (ht == NULL)
4551 break;
4552
4553 level = ht->ht_level;
4554 entry = htable_va2entry(vaddr, ht);
4555 ASSERT(level <= mmu.max_page_level);
4556 ASSERT(PTE_ISPAGE(pte, level));
4557
4558 /*
4559 * Unload the mapping from the page tables.
4560 */
4561 (void) x86pte_inval(ht, entry, 0, NULL, B_TRUE);
4562 ASSERT(ht->ht_valid_cnt > 0);
4563 HTABLE_DEC(ht->ht_valid_cnt);
4564 PGCNT_DEC(ht->ht_hat, ht->ht_level);
4565
4566 vaddr += LEVEL_SIZE(ht->ht_level);
4567 }
4568 if (ht)
4569 htable_release(ht);
4570 }
4571
4572 /*
4573 * Atomically update a new translation for a single page. If the
4574 * currently installed PTE doesn't match the value we expect to find,
4575 * it's not updated and we return the PTE we found.
4576 *
4577 * If activating nosync or NOWRITE and the page was modified we need to sync
4578 * with the page_t. Also sync with page_t if clearing ref/mod bits.
4579 */
4580 static x86pte_t
hati_update_pte(htable_t * ht,uint_t entry,x86pte_t expected,x86pte_t new)4581 hati_update_pte(htable_t *ht, uint_t entry, x86pte_t expected, x86pte_t new)
4582 {
4583 page_t *pp;
4584 uint_t rm = 0;
4585 x86pte_t replaced;
4586
4587 if (PTE_GET(expected, PT_SOFTWARE) < PT_NOSYNC &&
4588 PTE_GET(expected, PT_MOD | PT_REF) &&
4589 (PTE_GET(new, PT_NOSYNC) || !PTE_GET(new, PT_WRITABLE) ||
4590 !PTE_GET(new, PT_MOD | PT_REF))) {
4591
4592 ASSERT(!pfn_is_foreign(PTE2PFN(expected, ht->ht_level)));
4593 pp = page_numtopp_nolock(PTE2PFN(expected, ht->ht_level));
4594 ASSERT(pp != NULL);
4595 if (PTE_GET(expected, PT_MOD))
4596 rm |= P_MOD;
4597 if (PTE_GET(expected, PT_REF))
4598 rm |= P_REF;
4599 PTE_CLR(new, PT_MOD | PT_REF);
4600 }
4601
4602 replaced = x86pte_update(ht, entry, expected, new);
4603 if (replaced != expected)
4604 return (replaced);
4605
4606 if (rm) {
4607 /*
4608 * sync to all constituent pages of a large page
4609 */
4610 pgcnt_t pgcnt = page_get_pagecnt(ht->ht_level);
4611 ASSERT(IS_P2ALIGNED(pp->p_pagenum, pgcnt));
4612 while (pgcnt-- > 0) {
4613 /*
4614 * hat_page_demote() can't decrease
4615 * pszc below this mapping size
4616 * since large mapping existed after we
4617 * took mlist lock.
4618 */
4619 ASSERT(pp->p_szc >= ht->ht_level);
4620 hat_page_setattr(pp, rm);
4621 ++pp;
4622 }
4623 }
4624
4625 return (0);
4626 }
4627
4628 /* ARGSUSED */
4629 void
hat_join_srd(struct hat * hat,vnode_t * evp)4630 hat_join_srd(struct hat *hat, vnode_t *evp)
4631 {
4632 }
4633
4634 /* ARGSUSED */
4635 hat_region_cookie_t
hat_join_region(struct hat * hat,caddr_t r_saddr,size_t r_size,void * r_obj,u_offset_t r_objoff,uchar_t r_perm,uchar_t r_pgszc,hat_rgn_cb_func_t r_cb_function,uint_t flags)4636 hat_join_region(struct hat *hat,
4637 caddr_t r_saddr,
4638 size_t r_size,
4639 void *r_obj,
4640 u_offset_t r_objoff,
4641 uchar_t r_perm,
4642 uchar_t r_pgszc,
4643 hat_rgn_cb_func_t r_cb_function,
4644 uint_t flags)
4645 {
4646 panic("No shared region support on x86");
4647 return (HAT_INVALID_REGION_COOKIE);
4648 }
4649
4650 /* ARGSUSED */
4651 void
hat_leave_region(struct hat * hat,hat_region_cookie_t rcookie,uint_t flags)4652 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4653 {
4654 panic("No shared region support on x86");
4655 }
4656
4657 /* ARGSUSED */
4658 void
hat_dup_region(struct hat * hat,hat_region_cookie_t rcookie)4659 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4660 {
4661 panic("No shared region support on x86");
4662 }
4663
4664
4665 /*
4666 * Kernel Physical Mapping (kpm) facility
4667 *
4668 * Most of the routines needed to support segkpm are almost no-ops on the
4669 * x86 platform. We map in the entire segment when it is created and leave
4670 * it mapped in, so there is no additional work required to set up and tear
4671 * down individual mappings. All of these routines were created to support
4672 * SPARC platforms that have to avoid aliasing in their virtually indexed
4673 * caches.
4674 *
4675 * Most of the routines have sanity checks in them (e.g. verifying that the
4676 * passed-in page is locked). We don't actually care about most of these
4677 * checks on x86, but we leave them in place to identify problems in the
4678 * upper levels.
4679 */
4680
4681 /*
4682 * Map in a locked page and return the vaddr.
4683 */
4684 /*ARGSUSED*/
4685 caddr_t
hat_kpm_mapin(struct page * pp,struct kpme * kpme)4686 hat_kpm_mapin(struct page *pp, struct kpme *kpme)
4687 {
4688 caddr_t vaddr;
4689
4690 #ifdef DEBUG
4691 if (kpm_enable == 0) {
4692 cmn_err(CE_WARN, "hat_kpm_mapin: kpm_enable not set\n");
4693 return ((caddr_t)NULL);
4694 }
4695
4696 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4697 cmn_err(CE_WARN, "hat_kpm_mapin: pp zero or not locked\n");
4698 return ((caddr_t)NULL);
4699 }
4700 #endif
4701
4702 vaddr = hat_kpm_page2va(pp, 1);
4703
4704 return (vaddr);
4705 }
4706
4707 /*
4708 * Mapout a locked page.
4709 */
4710 /*ARGSUSED*/
4711 void
hat_kpm_mapout(struct page * pp,struct kpme * kpme,caddr_t vaddr)4712 hat_kpm_mapout(struct page *pp, struct kpme *kpme, caddr_t vaddr)
4713 {
4714 #ifdef DEBUG
4715 if (kpm_enable == 0) {
4716 cmn_err(CE_WARN, "hat_kpm_mapout: kpm_enable not set\n");
4717 return;
4718 }
4719
4720 if (IS_KPM_ADDR(vaddr) == 0) {
4721 cmn_err(CE_WARN, "hat_kpm_mapout: no kpm address\n");
4722 return;
4723 }
4724
4725 if (pp == NULL || PAGE_LOCKED(pp) == 0) {
4726 cmn_err(CE_WARN, "hat_kpm_mapout: page zero or not locked\n");
4727 return;
4728 }
4729 #endif
4730 }
4731
4732 /*
4733 * hat_kpm_mapin_pfn is used to obtain a kpm mapping for physical
4734 * memory addresses that are not described by a page_t. It can
4735 * also be used for normal pages that are not locked, but beware
4736 * this is dangerous - no locking is performed, so the identity of
4737 * the page could change. hat_kpm_mapin_pfn is not supported when
4738 * vac_colors > 1, because the chosen va depends on the page identity,
4739 * which could change.
4740 * The caller must only pass pfn's for valid physical addresses; violation
4741 * of this rule will cause panic.
4742 */
4743 caddr_t
hat_kpm_mapin_pfn(pfn_t pfn)4744 hat_kpm_mapin_pfn(pfn_t pfn)
4745 {
4746 caddr_t paddr, vaddr;
4747
4748 if (kpm_enable == 0)
4749 return ((caddr_t)NULL);
4750
4751 paddr = (caddr_t)ptob(pfn);
4752 vaddr = (uintptr_t)kpm_vbase + paddr;
4753
4754 return ((caddr_t)vaddr);
4755 }
4756
4757 /*ARGSUSED*/
4758 void
hat_kpm_mapout_pfn(pfn_t pfn)4759 hat_kpm_mapout_pfn(pfn_t pfn)
4760 {
4761 /* empty */
4762 }
4763
4764 /*
4765 * Return the kpm virtual address for a specific pfn
4766 */
4767 caddr_t
hat_kpm_pfn2va(pfn_t pfn)4768 hat_kpm_pfn2va(pfn_t pfn)
4769 {
4770 uintptr_t vaddr = (uintptr_t)kpm_vbase + mmu_ptob(pfn);
4771
4772 ASSERT(!pfn_is_foreign(pfn));
4773 return ((caddr_t)vaddr);
4774 }
4775
4776 /*
4777 * Return the kpm virtual address for the page at pp.
4778 */
4779 /*ARGSUSED*/
4780 caddr_t
hat_kpm_page2va(struct page * pp,int checkswap)4781 hat_kpm_page2va(struct page *pp, int checkswap)
4782 {
4783 return (hat_kpm_pfn2va(pp->p_pagenum));
4784 }
4785
4786 /*
4787 * Return the page frame number for the kpm virtual address vaddr.
4788 */
4789 pfn_t
hat_kpm_va2pfn(caddr_t vaddr)4790 hat_kpm_va2pfn(caddr_t vaddr)
4791 {
4792 pfn_t pfn;
4793
4794 ASSERT(IS_KPM_ADDR(vaddr));
4795
4796 pfn = (pfn_t)btop(vaddr - kpm_vbase);
4797
4798 return (pfn);
4799 }
4800
4801
4802 /*
4803 * Return the page for the kpm virtual address vaddr.
4804 */
4805 page_t *
hat_kpm_vaddr2page(caddr_t vaddr)4806 hat_kpm_vaddr2page(caddr_t vaddr)
4807 {
4808 pfn_t pfn;
4809
4810 ASSERT(IS_KPM_ADDR(vaddr));
4811
4812 pfn = hat_kpm_va2pfn(vaddr);
4813
4814 return (page_numtopp_nolock(pfn));
4815 }
4816
4817 /*
4818 * hat_kpm_fault is called from segkpm_fault when we take a page fault on a
4819 * KPM page. This should never happen on x86
4820 */
4821 int
hat_kpm_fault(hat_t * hat,caddr_t vaddr)4822 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4823 {
4824 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4825 (void *)hat, (void *)vaddr);
4826
4827 return (0);
4828 }
4829
4830 /*ARGSUSED*/
4831 void
hat_kpm_mseghash_clear(int nentries)4832 hat_kpm_mseghash_clear(int nentries)
4833 {}
4834
4835 /*ARGSUSED*/
4836 void
hat_kpm_mseghash_update(pgcnt_t inx,struct memseg * msp)4837 hat_kpm_mseghash_update(pgcnt_t inx, struct memseg *msp)
4838 {}
4839
4840 #ifndef __xpv
4841 void
hat_kpm_addmem_mseg_update(struct memseg * msp,pgcnt_t nkpmpgs,offset_t kpm_pages_off)4842 hat_kpm_addmem_mseg_update(struct memseg *msp, pgcnt_t nkpmpgs,
4843 offset_t kpm_pages_off)
4844 {
4845 _NOTE(ARGUNUSED(nkpmpgs, kpm_pages_off));
4846 pfn_t base, end;
4847
4848 /*
4849 * kphysm_add_memory_dynamic() does not set nkpmpgs
4850 * when page_t memory is externally allocated. That
4851 * code must properly calculate nkpmpgs in all cases
4852 * if nkpmpgs needs to be used at some point.
4853 */
4854
4855 /*
4856 * The meta (page_t) pages for dynamically added memory are allocated
4857 * either from the incoming memory itself or from existing memory.
4858 * In the former case the base of the incoming pages will be different
4859 * than the base of the dynamic segment so call memseg_get_start() to
4860 * get the actual base of the incoming memory for each case.
4861 */
4862
4863 base = memseg_get_start(msp);
4864 end = msp->pages_end;
4865
4866 hat_devload(kas.a_hat, kpm_vbase + mmu_ptob(base),
4867 mmu_ptob(end - base), base, PROT_READ | PROT_WRITE,
4868 HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
4869 }
4870
4871 void
hat_kpm_addmem_mseg_insert(struct memseg * msp)4872 hat_kpm_addmem_mseg_insert(struct memseg *msp)
4873 {
4874 _NOTE(ARGUNUSED(msp));
4875 }
4876
4877 void
hat_kpm_addmem_memsegs_update(struct memseg * msp)4878 hat_kpm_addmem_memsegs_update(struct memseg *msp)
4879 {
4880 _NOTE(ARGUNUSED(msp));
4881 }
4882
4883 /*
4884 * Return end of metadata for an already setup memseg.
4885 * X86 platforms don't need per-page meta data to support kpm.
4886 */
4887 caddr_t
hat_kpm_mseg_reuse(struct memseg * msp)4888 hat_kpm_mseg_reuse(struct memseg *msp)
4889 {
4890 return ((caddr_t)msp->epages);
4891 }
4892
4893 void
hat_kpm_delmem_mseg_update(struct memseg * msp,struct memseg ** mspp)4894 hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
4895 {
4896 _NOTE(ARGUNUSED(msp, mspp));
4897 ASSERT(0);
4898 }
4899
4900 void
hat_kpm_split_mseg_update(struct memseg * msp,struct memseg ** mspp,struct memseg * lo,struct memseg * mid,struct memseg * hi)4901 hat_kpm_split_mseg_update(struct memseg *msp, struct memseg **mspp,
4902 struct memseg *lo, struct memseg *mid, struct memseg *hi)
4903 {
4904 _NOTE(ARGUNUSED(msp, mspp, lo, mid, hi));
4905 ASSERT(0);
4906 }
4907
4908 /*
4909 * Walk the memsegs chain, applying func to each memseg span.
4910 */
4911 void
hat_kpm_walk(void (* func)(void *,void *,size_t),void * arg)4912 hat_kpm_walk(void (*func)(void *, void *, size_t), void *arg)
4913 {
4914 pfn_t pbase, pend;
4915 void *base;
4916 size_t size;
4917 struct memseg *msp;
4918
4919 for (msp = memsegs; msp; msp = msp->next) {
4920 pbase = msp->pages_base;
4921 pend = msp->pages_end;
4922 base = ptob(pbase) + kpm_vbase;
4923 size = ptob(pend - pbase);
4924 func(arg, base, size);
4925 }
4926 }
4927
4928 #else /* __xpv */
4929
4930 /*
4931 * There are specific Hypervisor calls to establish and remove mappings
4932 * to grant table references and the privcmd driver. We have to ensure
4933 * that a page table actually exists.
4934 */
4935 void
hat_prepare_mapping(hat_t * hat,caddr_t addr,uint64_t * pte_ma)4936 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4937 {
4938 maddr_t base_ma;
4939 htable_t *ht;
4940 uint_t entry;
4941
4942 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4943 XPV_DISALLOW_MIGRATE();
4944 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4945
4946 /*
4947 * if an address for pte_ma is passed in, return the MA of the pte
4948 * for this specific address. This address is only valid as long
4949 * as the htable stays locked.
4950 */
4951 if (pte_ma != NULL) {
4952 entry = htable_va2entry((uintptr_t)addr, ht);
4953 base_ma = pa_to_ma(ptob(ht->ht_pfn));
4954 *pte_ma = base_ma + (entry << mmu.pte_size_shift);
4955 }
4956 XPV_ALLOW_MIGRATE();
4957 }
4958
4959 void
hat_release_mapping(hat_t * hat,caddr_t addr)4960 hat_release_mapping(hat_t *hat, caddr_t addr)
4961 {
4962 htable_t *ht;
4963
4964 ASSERT(IS_P2ALIGNED((uintptr_t)addr, MMU_PAGESIZE));
4965 XPV_DISALLOW_MIGRATE();
4966 ht = htable_lookup(hat, (uintptr_t)addr, 0);
4967 ASSERT(ht != NULL);
4968 ASSERT(ht->ht_busy >= 2);
4969 htable_release(ht);
4970 htable_release(ht);
4971 XPV_ALLOW_MIGRATE();
4972 }
4973 #endif /* __xpv */
4974
4975 /*
4976 * Helper function to punch in a mapping that we need with the specified
4977 * attributes.
4978 */
4979 void
hati_cpu_punchin(cpu_t * cpu,uintptr_t va,uint_t attrs)4980 hati_cpu_punchin(cpu_t *cpu, uintptr_t va, uint_t attrs)
4981 {
4982 int ret;
4983 pfn_t pfn;
4984 hat_t *cpu_hat = cpu->cpu_hat_info->hci_user_hat;
4985
4986 ASSERT3S(kpti_enable, ==, 1);
4987 ASSERT3P(cpu_hat, !=, NULL);
4988 ASSERT3U(cpu_hat->hat_flags & HAT_PCP, ==, HAT_PCP);
4989 ASSERT3U(va & MMU_PAGEOFFSET, ==, 0);
4990
4991 pfn = hat_getpfnum(kas.a_hat, (caddr_t)va);
4992 VERIFY3U(pfn, !=, PFN_INVALID);
4993
4994 /*
4995 * We purposefully don't try to find the page_t. This means that this
4996 * will be marked PT_NOCONSIST; however, given that this is pretty much
4997 * a static mapping that we're using we should be relatively OK.
4998 */
4999 attrs |= HAT_STORECACHING_OK;
5000 ret = hati_load_common(cpu_hat, va, NULL, attrs, 0, 0, pfn);
5001 VERIFY3S(ret, ==, 0);
5002 }
5003