1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #ifndef _VM_HTABLE_H 27 #define _VM_HTABLE_H 28 29 #ifdef __cplusplus 30 extern "C" { 31 #endif 32 33 #if defined(__GNUC__) && defined(_ASM_INLINES) && defined(_KERNEL) 34 #include <asm/htable.h> 35 #endif 36 37 extern void atomic_andb(uint8_t *addr, uint8_t value); 38 extern void atomic_orb(uint8_t *addr, uint8_t value); 39 extern void atomic_inc16(uint16_t *addr); 40 extern void atomic_dec16(uint16_t *addr); 41 extern void mmu_tlbflush_entry(caddr_t addr); 42 43 /* 44 * Each hardware page table has an htable_t describing it. 45 * 46 * We use a reference counter mechanism to detect when we can free an htable. 47 * In the implmentation the reference count is split into 2 separate counters: 48 * 49 * ht_busy is a traditional reference count of uses of the htable pointer 50 * 51 * ht_valid_cnt is a count of how references are implied by valid PTE/PTP 52 * entries in the pagetable 53 * 54 * ht_busy is only incremented by htable_lookup() or htable_create() 55 * while holding the appropriate hash_table mutex. While installing a new 56 * valid PTE or PTP, in order to increment ht_valid_cnt a thread must have 57 * done an htable_lookup() or htable_create() but not the htable_release yet. 58 * 59 * htable_release(), while holding the mutex, can know that if 60 * busy == 1 and valid_cnt == 0, the htable can be free'd. 61 * 62 * The fields have been ordered to make htable_lookup() fast. Hence, 63 * ht_hat, ht_vaddr, ht_level and ht_next need to be clustered together. 64 */ 65 struct htable { 66 struct htable *ht_next; /* forward link for hash table */ 67 struct hat *ht_hat; /* hat this mapping comes from */ 68 uintptr_t ht_vaddr; /* virt addr at start of this table */ 69 int8_t ht_level; /* page table level: 0=4K, 1=2M, ... */ 70 uint8_t ht_flags; /* see below */ 71 int16_t ht_busy; /* implements locking protocol */ 72 int16_t ht_valid_cnt; /* # of valid entries in this table */ 73 uint32_t ht_lock_cnt; /* # of locked entries in this table */ 74 /* never used for kernel hat */ 75 pfn_t ht_pfn; /* pfn of page of the pagetable */ 76 struct htable *ht_prev; /* backward link for hash table */ 77 struct htable *ht_parent; /* htable that points to this htable */ 78 struct htable *ht_shares; /* for HTABLE_SHARED_PFN only */ 79 }; 80 typedef struct htable htable_t; 81 82 /* 83 * Flags values for htable ht_flags field: 84 * 85 * HTABLE_VLP - this is the top level htable of a VLP HAT. 86 * 87 * HTABLE_SHARED_PFN - this htable had its PFN assigned from sharing another 88 * htable. Used by hat_share() for ISM. 89 */ 90 #define HTABLE_VLP (0x01) 91 #define HTABLE_SHARED_PFN (0x02) 92 93 /* 94 * The htable hash table hashing function. The 28 is so that high 95 * order bits are include in the hash index to skew the wrap 96 * around of addresses. Even though the hash buckets are stored per 97 * hat we include the value of hat pointer in the hash function so 98 * that the secondary hash for the htable mutex winds up begin different in 99 * every address space. 100 */ 101 #define HTABLE_HASH(hat, va, lvl) \ 102 ((((va) >> LEVEL_SHIFT(1)) + ((va) >> 28) + (lvl) + \ 103 ((uintptr_t)(hat) >> 4)) & ((hat)->hat_num_hash - 1)) 104 105 /* 106 * Each CPU gets a unique hat_cpu_info structure in cpu_hat_info. 107 */ 108 struct hat_cpu_info { 109 kmutex_t hci_mutex; /* mutex to ensure sequential usage */ 110 #if defined(__amd64) 111 pfn_t hci_vlp_pfn; /* pfn of hci_vlp_l3ptes */ 112 x86pte_t *hci_vlp_l3ptes; /* VLP Level==3 pagetable (top) */ 113 x86pte_t *hci_vlp_l2ptes; /* VLP Level==2 pagetable */ 114 #endif /* __amd64 */ 115 }; 116 117 118 /* 119 * Compute the last page aligned VA mapped by an htable. 120 * 121 * Given a va and a level, compute the virtual address of the start of the 122 * next page at that level. 123 * 124 * XX64 - The check for the VA hole needs to be better generalized. 125 */ 126 #if defined(__amd64) 127 #define HTABLE_NUM_PTES(ht) (((ht)->ht_flags & HTABLE_VLP) ? 4 : 512) 128 129 #define HTABLE_LAST_PAGE(ht) \ 130 ((ht)->ht_level == mmu.max_level ? ((uintptr_t)0UL - MMU_PAGESIZE) :\ 131 ((ht)->ht_vaddr - MMU_PAGESIZE + \ 132 ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level)))) 133 134 #define NEXT_ENTRY_VA(va, l) \ 135 ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l) == mmu.hole_start ? \ 136 mmu.hole_end : (va & LEVEL_MASK(l)) + LEVEL_SIZE(l)) 137 138 #elif defined(__i386) 139 140 #define HTABLE_NUM_PTES(ht) \ 141 (!mmu.pae_hat ? 1024 : ((ht)->ht_level == 2 ? 4 : 512)) 142 143 #define HTABLE_LAST_PAGE(ht) ((ht)->ht_vaddr - MMU_PAGESIZE + \ 144 ((uintptr_t)HTABLE_NUM_PTES(ht) << LEVEL_SHIFT((ht)->ht_level))) 145 146 #define NEXT_ENTRY_VA(va, l) ((va & LEVEL_MASK(l)) + LEVEL_SIZE(l)) 147 148 #endif 149 150 #if defined(_KERNEL) 151 152 /* 153 * initialization function called from hat_init() 154 */ 155 extern void htable_init(void); 156 157 /* 158 * Functions to lookup, or "lookup and create", the htable corresponding 159 * to the virtual address "vaddr" in the "hat" at the given "level" of 160 * page tables. htable_lookup() may return NULL if no such entry exists. 161 * 162 * On return the given htable is marked busy (a shared lock) - this prevents 163 * the htable from being stolen or freed) until htable_release() is called. 164 * 165 * If kalloc_flag is set on an htable_create() we can't call kmem allocation 166 * routines for this htable, since it's for the kernel hat itself. 167 * 168 * htable_acquire() is used when an htable pointer has been extracted from 169 * an hment and we need to get a reference to the htable. 170 */ 171 extern htable_t *htable_lookup(struct hat *hat, uintptr_t vaddr, level_t level); 172 extern htable_t *htable_create(struct hat *hat, uintptr_t vaddr, level_t level, 173 htable_t *shared); 174 extern void htable_acquire(htable_t *); 175 176 extern void htable_release(htable_t *ht); 177 extern void htable_destroy(htable_t *ht); 178 179 /* 180 * Code to free all remaining htables for a hat. Called after the hat is no 181 * longer in use by any thread. 182 */ 183 extern void htable_purge_hat(struct hat *hat); 184 185 /* 186 * Find the htable, page table entry index, and PTE of the given virtual 187 * address. If not found returns NULL. When found, returns the htable_t *, 188 * sets entry, and has a hold on the htable. 189 */ 190 extern htable_t *htable_getpte(struct hat *, uintptr_t, uint_t *, x86pte_t *, 191 level_t); 192 193 /* 194 * Similar to hat_getpte(), except that this only succeeds if a valid 195 * page mapping is present. 196 */ 197 extern htable_t *htable_getpage(struct hat *hat, uintptr_t va, uint_t *entry); 198 199 /* 200 * Called to allocate initial/additional htables for reserve. 201 */ 202 extern void htable_initial_reserve(uint_t); 203 extern void htable_reserve(uint_t); 204 205 /* 206 * Used to readjust the htable reserve after the reserve list has been used. 207 * Also called after boot to release left over boot reserves. 208 */ 209 extern void htable_adjust_reserve(void); 210 211 /* 212 * return number of bytes mapped by all the htables in a given hat 213 */ 214 extern size_t htable_mapped(struct hat *); 215 216 217 /* 218 * Attach initial pagetables as htables 219 */ 220 extern void htable_attach(struct hat *, uintptr_t, level_t, struct htable *, 221 pfn_t); 222 223 /* 224 * Routine to find the next populated htable at or above a given virtual 225 * address. Can specify an upper limit, or HTABLE_WALK_TO_END to indicate 226 * that it should search the entire address space. Similar to 227 * hat_getpte(), but used for walking through address ranges. It can be 228 * used like this: 229 * 230 * va = ... 231 * ht = NULL; 232 * while (va < end_va) { 233 * pte = htable_walk(hat, &ht, &va, end_va); 234 * if (!pte) 235 * break; 236 * 237 * ... code to operate on page at va ... 238 * 239 * va += LEVEL_SIZE(ht->ht_level); 240 * } 241 * if (ht) 242 * htable_release(ht); 243 * 244 */ 245 extern x86pte_t htable_walk(struct hat *hat, htable_t **ht, uintptr_t *va, 246 uintptr_t eaddr); 247 248 #define HTABLE_WALK_TO_END ((uintptr_t)-1) 249 250 /* 251 * Utilities convert between virtual addresses and page table entry indeces. 252 */ 253 extern uint_t htable_va2entry(uintptr_t va, htable_t *ht); 254 extern uintptr_t htable_e2va(htable_t *ht, uint_t entry); 255 256 /* 257 * Interfaces that provide access to page table entries via the htable. 258 * 259 * Note that all accesses except x86pte_copy() and x86pte_zero() are atomic. 260 */ 261 extern void x86pte_cpu_init(cpu_t *); 262 extern void x86pte_cpu_fini(cpu_t *); 263 264 extern x86pte_t x86pte_get(htable_t *, uint_t entry); 265 266 /* 267 * x86pte_set returns LPAGE_ERROR if it's asked to overwrite a page table 268 * link with a large page mapping. 269 */ 270 #define LPAGE_ERROR (-(x86pte_t)1) 271 extern x86pte_t x86pte_set(htable_t *, uint_t entry, x86pte_t new, void *); 272 273 extern x86pte_t x86pte_inval(htable_t *ht, uint_t entry, 274 x86pte_t old, x86pte_t *ptr); 275 276 extern x86pte_t x86pte_update(htable_t *ht, uint_t entry, 277 x86pte_t old, x86pte_t new); 278 279 extern void x86pte_copy(htable_t *src, htable_t *dest, uint_t entry, 280 uint_t cnt); 281 282 /* 283 * access to a pagetable knowing only the pfn 284 */ 285 extern x86pte_t *x86pte_mapin(pfn_t, uint_t, htable_t *); 286 extern void x86pte_mapout(void); 287 288 /* 289 * these are actually inlines for "lock; incw", "lock; decw", etc. instructions. 290 */ 291 #define HTABLE_INC(x) atomic_inc16((uint16_t *)&x) 292 #define HTABLE_DEC(x) atomic_dec16((uint16_t *)&x) 293 #define HTABLE_LOCK_INC(ht) atomic_inc_32(&(ht)->ht_lock_cnt) 294 #define HTABLE_LOCK_DEC(ht) atomic_dec_32(&(ht)->ht_lock_cnt) 295 296 #ifdef __xpv 297 extern void xen_flush_va(caddr_t va); 298 extern void xen_gflush_va(caddr_t va, cpuset_t); 299 extern void xen_flush_tlb(void); 300 extern void xen_gflush_tlb(cpuset_t); 301 extern void xen_pin(pfn_t, level_t); 302 extern void xen_unpin(pfn_t); 303 extern int xen_kpm_page(pfn_t, uint_t); 304 305 /* 306 * The hypervisor maps all page tables into our address space read-only. 307 * Under normal circumstances, the hypervisor then handles all updates to 308 * the page tables underneath the covers for us. However, when we are 309 * trying to dump core after a hypervisor panic, the hypervisor is no 310 * longer available to do these updates. To work around the protection 311 * problem, we simply disable write-protect checking for the duration of a 312 * pagetable update operation. 313 */ 314 #define XPV_ALLOW_PAGETABLE_UPDATES() \ 315 { \ 316 if (IN_XPV_PANIC()) \ 317 setcr0((getcr0() & ~CR0_WP) & 0xffffffff); \ 318 } 319 #define XPV_DISALLOW_PAGETABLE_UPDATES() \ 320 { \ 321 if (IN_XPV_PANIC() > 0) \ 322 setcr0((getcr0() | CR0_WP) & 0xffffffff); \ 323 } 324 325 #else /* __xpv */ 326 327 #define XPV_ALLOW_PAGETABLE_UPDATES() 328 #define XPV_DISALLOW_PAGETABLE_UPDATES() 329 330 #endif 331 332 #endif /* _KERNEL */ 333 334 335 #ifdef __cplusplus 336 } 337 #endif 338 339 #endif /* _VM_HTABLE_H */ 340