Lines Matching defs:hat
36 * Implementation of the interfaces described in <common/vm/hat.h>
89 * Basic parameters for hat operation.
96 * For 32 bit PAE support on i86pc, the kernel hat will use the 1st 4 entries
118 * hat created by hat_alloc(). This means that kernelbase must be:
123 * The hat_kernel_range_ts describe what needs to be copied from kernel hat
124 * to each user hat.
167 * management stuff for hat structures
218 * kmem cache constructor for struct hat
224 hat_t *hat = buf;
226 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
227 bzero(hat->hat_pages_mapped,
229 hat->hat_ism_pgcnt = 0;
230 hat->hat_stats = 0;
231 hat->hat_flags = 0;
232 CPUSET_ZERO(hat->hat_cpus);
233 hat->hat_htable = NULL;
234 hat->hat_ht_hash = NULL;
239 * Allocate a hat structure for as. We also create the top level
240 * htable and initialize it to contain the kernel hat entries.
245 hat_t *hat;
264 hat = kmem_cache_alloc(hat_cache, KM_SLEEP);
265 hat->hat_as = as;
266 mutex_init(&hat->hat_mutex, NULL, MUTEX_DEFAULT, NULL);
267 ASSERT(hat->hat_flags == 0);
277 /* 32 bit processes uses a VLP style hat when running with PAE */
285 hat->hat_flags = HAT_VLP;
286 bzero(hat->hat_vlp_ptes, VLP_SIZE);
292 if ((hat->hat_flags & HAT_VLP)) {
293 hat->hat_num_hash = mmu.vlp_hash_cnt;
294 hat->hat_ht_hash = kmem_cache_alloc(vlp_hash_cache, KM_SLEEP);
296 hat->hat_num_hash = mmu.hash_cnt;
297 hat->hat_ht_hash = kmem_cache_alloc(hat_hash_cache, KM_SLEEP);
299 bzero(hat->hat_ht_hash, hat->hat_num_hash * sizeof (htable_t *));
303 * tables for the new hat.
305 hat->hat_htable = NULL;
306 hat->hat_ht_cached = NULL;
308 ht = htable_create(hat, (uintptr_t)0, TOP_LEVEL(hat), NULL);
309 hat->hat_htable = ht;
312 if (hat->hat_flags & HAT_VLP)
321 if (rp->hkr_level == TOP_LEVEL(hat))
322 ht = hat->hat_htable;
324 ht = htable_create(hat, va, rp->hkr_level,
339 &hat->hat_vlp_ptes[start],
357 xen_pin(hat->hat_htable->ht_pfn, mmu.max_level);
359 xen_pin(hat->hat_user_ptable, mmu.max_level);
371 * The list ends where hat->hat_next == NULL
374 * The list begins where hat->hat_prev == NULL
377 hat->hat_prev = NULL;
378 hat->hat_next = kas.a_hat->hat_next;
379 if (hat->hat_next)
380 hat->hat_next->hat_prev = hat;
382 kas.a_hat->hat_prev = hat;
383 kas.a_hat->hat_next = hat;
386 return (hat);
394 hat_free_start(hat_t *hat)
396 ASSERT(AS_WRITE_HELD(hat->hat_as));
399 * If the hat is currently a stealing victim, wait for the stealing
404 while (hat->hat_flags & HAT_VICTIM)
406 hat->hat_flags |= HAT_FREEING;
411 * An address space is being destroyed, so we destroy the associated hat.
414 hat_free_end(hat_t *hat)
418 ASSERT(hat->hat_flags & HAT_FREEING);
421 * must not be running on the given hat
423 ASSERT(CPU->cpu_current_hat != hat);
429 if (hat->hat_prev)
430 hat->hat_prev->hat_next = hat->hat_next;
432 kas.a_hat->hat_next = hat->hat_next;
433 if (hat->hat_next)
434 hat->hat_next->hat_prev = hat->hat_prev;
436 kas.a_hat->hat_prev = hat->hat_prev;
438 hat->hat_next = hat->hat_prev = NULL;
444 xen_unpin(hat->hat_htable->ht_pfn);
446 xen_unpin(hat->hat_user_ptable);
453 htable_purge_hat(hat);
458 if (hat->hat_flags & HAT_VLP)
462 kmem_cache_free(cache, hat->hat_ht_hash);
463 hat->hat_ht_hash = NULL;
465 hat->hat_flags = 0;
466 kmem_cache_free(hat_cache, hat);
520 * Initialize hat data structures based on processor MMU information.
683 * initialize hat data structures
727 * Set up the kernel's hat
740 * The kernel hat's next pointer serves as the head of the hat list .
741 * The kernel hat's prev pointer tracks the last hat on the list for
833 * Finish filling in the kernel hat.
850 * We are now effectively running on the kernel hat.
952 reload_pae32(hat_t *hat, cpu_t *cpu)
964 src = hat->hat_vlp_ptes;
979 * Switch to a new active hat, maintaining bit masks to track active CPUs.
985 hat_switch(hat_t *hat)
995 if (old == hat)
1004 if (hat != kas.a_hat) {
1005 CPUSET_ATOMIC_ADD(hat->hat_cpus, cpu->cpu_id);
1007 cpu->cpu_current_hat = hat;
1012 if (hat->hat_flags & HAT_VLP) {
1016 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1019 reload_pae32(hat, cpu);
1024 newcr3 = MAKECR3((uint64_t)hat->hat_htable->ht_pfn);
1037 * actually specify when switching to the kernel hat.
1038 * For now we'll reuse the kernel hat again.
1041 if (hat == kas.a_hat)
1044 t[1].arg1.mfn = pfn_to_mfn(hat->hat_user_ptable);
1128 * Allocate any hat resources required for a process being swapped in.
1132 hat_swapin(hat_t *hat)
1142 hat_swapout(hat_t *hat)
1151 * We can't just call hat_unload(hat, 0, _userlimit...) here, because
1160 ASSERT(AS_LOCK_HELD(hat->hat_as));
1161 if ((uintptr_t)hat->hat_as->a_userlimit < eaddr)
1162 eaddr = (uintptr_t)hat->hat_as->a_userlimit;
1165 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1186 hat_unload(hat, (caddr_t)vaddr, LEVEL_SIZE(l),
1205 htable_purge_hat(hat);
1210 * returns number of bytes that have valid mappings in hat.
1213 hat_get_mapped_size(hat_t *hat)
1219 total += (hat->hat_pages_mapped[l] << LEVEL_SHIFT(l));
1220 total += hat->hat_ism_pgcnt;
1226 * enable/disable collection of stats for hat.
1229 hat_stats_enable(hat_t *hat)
1231 atomic_inc_32(&hat->hat_stats);
1236 hat_stats_disable(hat_t *hat)
1238 atomic_dec_32(&hat->hat_stats);
1304 hat_t *hat = ht->ht_hat;
1321 is_locked = (flags & HAT_LOAD_LOCK) != 0 && hat != kas.a_hat;
1369 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1373 PGCNT_INC(hat, l);
1421 hat_t *hat,
1441 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1444 hat->hat_flags |= HAT_SHARED;
1449 ht = htable_lookup(hat, va, level);
1458 ht = htable_create(hat, va, level, NULL);
1475 if (hat == kas.a_hat)
1478 if (hat == kas.a_hat && va >= kernelbase)
1557 * hat layer data structures. This flag forces hat layer
1576 hat_t *hat,
1588 ASSERT(hat == kas.a_hat || va < _userlimit);
1589 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1599 ASSERT(hat == kas.a_hat);
1610 if (hati_load_common(hat, va, pp, attr, flags, level, pfn) != 0)
1617 hat_memload_region(struct hat *hat, caddr_t addr, struct page *pp,
1620 hat_memload(hat, addr, pp, attr, flags);
1628 hat_t *hat,
1645 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
1646 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1705 while (hati_load_common(hat, va, pages[pgindx], attr,
1724 hat_memload_array_region(struct hat *hat, caddr_t addr, size_t len,
1728 hat_memload_array(hat, addr, len, pps, attr, flags);
1732 * void hat_devload(hat, addr, len, pf, attr, flags)
1763 hat_t *hat,
1780 ASSERT(hat == kas.a_hat || eva <= _userlimit);
1781 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
1848 while (hati_load_common(hat, va, pp, a, f, level, pfn) != 0) {
1865 * void hat_unlock(hat, addr, len)
1871 hat_unlock(hat_t *hat, caddr_t addr, size_t len)
1880 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
1883 if (hat == kas.a_hat)
1889 ASSERT(AS_LOCK_HELD(hat->hat_as));
1891 (void) htable_walk(hat, &ht, &vaddr, eaddr);
1911 hat_unlock_region(struct hat *hat, caddr_t addr, size_t len,
1926 hat_t *hat = (hat_t *)a1;
1931 * If the target hat isn't the kernel and this CPU isn't operating
1932 * in the target hat, we can ignore the cross call.
1934 if (hat != kas.a_hat && hat != CPU->cpu_current_hat)
1950 * the pte values from the struct hat
1952 if (hat->hat_flags & HAT_VLP) {
1956 VLP_COPY(hat->hat_vlp_ptes, vlpptep);
1958 reload_pae32(hat, CPU);
2034 * all CPUs using a given hat.
2037 hat_tlb_inval_range(hat_t *hat, uintptr_t va, size_t len)
2049 * If the hat is being destroyed, there are no more users, so
2052 if (hat->hat_flags & HAT_FREEING)
2060 if (hat->hat_flags & HAT_SHARED) {
2061 hat = kas.a_hat;
2077 (void) hati_demap_func((xc_arg_t)hat,
2086 * Otherwise it's just CPUs currently executing in this hat.
2090 if (hat == kas.a_hat)
2093 cpus_to_shootdown = hat->hat_cpus;
2136 (void) hati_demap_func((xc_arg_t)hat,
2153 xc_call((xc_arg_t)hat, (xc_arg_t)va, (xc_arg_t)len,
2162 hat_tlb_inval(hat_t *hat, uintptr_t va)
2164 hat_tlb_inval_range(hat, va, MMU_PAGESIZE);
2181 hat_t *hat = ht->ht_hat;
2190 if ((flags & HAT_UNLOAD_UNLOCK) != 0 && hat != kas.a_hat) {
2263 * Handle book keeping in the htable and hat
2267 PGCNT_DEC(hat, l);
2311 hat_unload(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2316 ASSERT(hat == kas.a_hat || va + len <= _userlimit);
2322 ASSERT(hat == kas.a_hat);
2325 hat_unload_callback(hat, addr, len, flags, NULL);
2344 handle_ranges(hat_t *hat, hat_callback_t *cb, uint_t cnt, range_info_t *range)
2351 hat_tlb_inval_range(hat, (uintptr_t)range[cnt].rng_va, len);
2375 hat_t *hat,
2391 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2400 ht = htable_getpte(hat, vaddr, &entry, &old_pte, 0);
2413 old_pte = htable_walk(hat, &ht, &vaddr, eaddr);
2428 handle_ranges(hat, cb, r_cnt, r);
2460 handle_ranges(hat, cb, r_cnt, r);
2469 hat_flush_range(hat_t *hat, caddr_t va, size_t size)
2475 sz = hat_getpagesize(hat, va);
2501 hat_sync(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2515 ASSERT(hat == kas.a_hat || eaddr <= _userlimit);
2520 pte = htable_walk(hat, &ht, &vaddr, eaddr);
2575 * void hat_map(hat, addr, len, flags)
2579 hat_map(hat_t *hat, caddr_t addr, size_t len, uint_t flags)
2585 * uint_t hat_getattr(hat, addr, *attr)
2586 * returns attr for <hat,addr> in *attr. returns 0 if there was a
2591 hat_getattr(hat_t *hat, caddr_t addr, uint_t *attr)
2597 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2602 ht = htable_getpte(hat, vaddr, NULL, &pte, mmu.max_page_level);
2632 hat_updateattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr, int what)
2644 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2647 oldpte = htable_walk(hat, &ht, &vaddr, eaddr);
2735 hat_setattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2737 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2738 hat_updateattr(hat, addr, len, attr, HAT_SET_ATTR);
2742 hat_clrattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2744 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2745 hat_updateattr(hat, addr, len, attr, HAT_CLR_ATTR);
2749 hat_chgattr(hat_t *hat, caddr_t addr, size_t len, uint_t attr)
2751 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2752 hat_updateattr(hat, addr, len, attr, HAT_LOAD_ATTR);
2756 hat_chgprot(hat_t *hat, caddr_t addr, size_t len, uint_t vprot)
2758 ASSERT(hat == kas.a_hat || (uintptr_t)addr + len <= _userlimit);
2759 hat_updateattr(hat, addr, len, vprot & HAT_PROT_MASK, HAT_LOAD_ATTR);
2763 * size_t hat_getpagesize(hat, addr)
2764 * returns pagesize in bytes for <hat, addr>. returns -1 of there is
2768 hat_getpagesize(hat_t *hat, caddr_t addr)
2774 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2777 ht = htable_getpage(hat, vaddr, NULL);
2788 * pfn_t hat_getpfnum(hat, addr)
2789 * returns pfn for <hat, addr> or PFN_INVALID if mapping is invalid.
2792 hat_getpfnum(hat_t *hat, caddr_t addr)
2799 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2825 ht = htable_getpage(hat, vaddr, &entry);
2841 * int hat_probe(hat, addr)
2846 hat_probe(hat_t *hat, caddr_t addr)
2853 ASSERT(hat == kas.a_hat || vaddr <= _userlimit);
2854 ASSERT(hat == kas.a_hat || AS_LOCK_HELD(hat->hat_as));
2871 ht = htable_getpage(hat, vaddr, &entry);
2880 is_it_dism(hat_t *hat, caddr_t va)
2886 seg = as_findseg(hat->hat_as, va, 0);
2901 * and protections to use for this hat. If we find a full properly aligned
2907 hat_t *hat,
2932 * We might be asked to share an empty DISM hat by as_dup()
2934 ASSERT(hat != kas.a_hat);
2950 is_dism = is_it_dism(hat, addr);
2964 ht = htable_lookup(hat, vaddr, l);
3019 ht = htable_create(hat, vaddr, l, ism_ht);
3023 hat->hat_ism_pgcnt +=
3075 while (hati_load_common(hat, vaddr, pp, prot, flags,
3102 hat_unshare(hat_t *hat, caddr_t addr, size_t len, uint_t ismszc)
3111 ASSERT(hat != kas.a_hat);
3134 ht = htable_lookup(hat, vaddr, l);
3142 hat->hat_ism_pgcnt -= ht->ht_valid_cnt <<
3155 if (!(hat->hat_flags & HAT_FREEING) && need_demaps)
3156 hat_tlb_inval(hat, DEMAP_ALL_ADDR);
3162 if (!is_it_dism(hat, addr))
3164 hat_unload(hat, addr, len, flags);
3526 * assumptions that hat depends on upper layer VM to prevent multiple
3793 hat_t *hat,
3844 * Setup the given brand new hat structure as the new HAT on this cpu's mmu.
3848 hat_setup(hat_t *hat, int flags)
3853 hat_switch(hat);
4012 hat_enter(hat_t *hat)
4014 mutex_enter(&hat->hat_mutex);
4018 hat_exit(hat_t *hat)
4020 mutex_exit(&hat->hat_mutex);
4149 hat_join_srd(struct hat *hat, vnode_t *evp)
4155 hat_join_region(struct hat *hat,
4171 hat_leave_region(struct hat *hat, hat_region_cookie_t rcookie, uint_t flags)
4178 hat_dup_region(struct hat *hat, hat_region_cookie_t rcookie)
4341 hat_kpm_fault(hat_t *hat, caddr_t vaddr)
4343 panic("pagefault in seg_kpm. hat: 0x%p vaddr: 0x%p",
4344 (void *)hat, (void *)vaddr);
4455 hat_prepare_mapping(hat_t *hat, caddr_t addr, uint64_t *pte_ma)
4463 ht = htable_create(hat, (uintptr_t)addr, 0, NULL);
4479 hat_release_mapping(hat_t *hat, caddr_t addr)
4485 ht = htable_lookup(hat, (uintptr_t)addr, 0);