1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 /* This file is dual-licensed; see usr/src/contrib/bhyve/LICENSE */ 12 13 /* 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2024 Oxide Computer Company 16 */ 17 18 #include <sys/types.h> 19 #include <sys/atomic.h> 20 #include <sys/kmem.h> 21 #include <sys/sysmacros.h> 22 #include <sys/sunddi.h> 23 #include <sys/panic.h> 24 #include <vm/hat.h> 25 #include <vm/as.h> 26 #include <vm/hat_i86.h> 27 28 #include <sys/vmm_gpt.h> 29 30 /* 31 * VMM Generic Page Tables 32 * 33 * Bhyve runs on AMD and Intel hosts and both support nested page tables 34 * describing the guest's physical address space. But the two use different and 35 * mutually incompatible page table formats: Intel uses the EPT, which is based 36 * on the Itanium page table format, while AMD uses the nPT, which is based on 37 * the x86_64 page table format. 38 * 39 * The GPT abstracts these format differences, and provides a single interface 40 * for interacting with either kind of table structure. 41 * 42 * At a high-level, the GPT is a tree that mirrors the paging table radix tree. 43 * It is parameterized with operations on PTEs that are specific to the table 44 * type (EPT or nPT) and also keeps track of how many pages the table maps, as 45 * well as a pointer to the root node in the tree. 46 * 47 * A node in the GPT keep pointers to its parent (NULL for the root), its 48 * left-most child, and its siblings. The node understands its position in the 49 * tree in terms of the level it appears at and the index it occupies at its 50 * parent's level, as well as how many children it has. It also owns the 51 * physical memory page for the hardware page table entries that map its 52 * children. Thus, for a node at any given level in the tree, the nested PTE 53 * for that node's child at index $i$ is the i'th uint64_t in that node's entry 54 * page and the entry page is part of the paging structure consumed by hardware. 55 * 56 * The GPT interface provides functions for populating and vacating the tree for 57 * regions in the guest physical address space, and for mapping and unmapping 58 * pages in populated regions. Users must populate a region before mapping 59 * pages into it, and must unmap pages before vacating the region. 60 * 61 * The interface also exposes a function for walking the table from the root to 62 * a leaf entry, populating an array of pointers to PTEs. This walk uses the 63 * hardware page structure itself, and is thus fast, though as a result it 64 * potentially aliases entries; caveat emptor. The walk primitive is used for 65 * mapping, unmapping, and lookups. 66 * 67 * Format-specific differences are abstracted by parameterizing the GPT with a 68 * set of PTE operations specific to the platform. The GPT code makes use of 69 * these when mapping or populating entries, resetting accessed and dirty bits 70 * on entries, and similar operations. 71 */ 72 73 /* 74 * A GPT node. 75 * 76 * Each node contains pointers to its parent, its left-most child, and its 77 * siblings. Interior nodes also maintain a reference count, and each node 78 * contains its level and index in its parent's table. Finally, each node 79 * contains the host PFN of the page that it links into the page table, as well 80 * as a kernel pointer to table. 81 * 82 * On leaf nodes, the reference count tracks how many entries in the table are 83 * covered by mapping from the containing vmspace. This is maintained during 84 * calls to vmm_populate_region() and vmm_gpt_vacate_region() as part of vmspace 85 * map/unmap operations, rather than in the data path of faults populating the 86 * PTEs themselves. 87 * 88 * Note, this is carefully sized to fit exactly into a 64-byte cache line. 89 */ 90 typedef struct vmm_gpt_node vmm_gpt_node_t; 91 struct vmm_gpt_node { 92 uint64_t vgn_host_pfn; 93 uint16_t vgn_level; 94 uint16_t vgn_index; 95 uint32_t vgn_ref_cnt; 96 vmm_gpt_node_t *vgn_parent; 97 vmm_gpt_node_t *vgn_children; 98 vmm_gpt_node_t *vgn_sib_next; 99 vmm_gpt_node_t *vgn_sib_prev; 100 uint64_t *vgn_entries; 101 uint64_t vgn_gpa; 102 }; 103 104 /* Maximum node index determined by number of entries in page table (512) */ 105 #define PTE_PER_TABLE 512 106 #define MAX_NODE_IDX (PTE_PER_TABLE - 1) 107 108 /* 109 * A VMM Generic Page Table. 110 * 111 * The generic page table is a format-agnostic, 4-level paging structure 112 * modeling a second-level page table (EPT on Intel, nPT on AMD). It 113 * contains a counter of pages the table maps, a pointer to the root node 114 * in the table, and is parameterized with a set of PTE operations specific 115 * to the table type. 116 */ 117 struct vmm_gpt { 118 vmm_gpt_node_t *vgpt_root; 119 vmm_pte_ops_t *vgpt_pte_ops; 120 }; 121 122 /* 123 * Allocates a vmm_gpt_node_t structure with corresponding page of memory to 124 * hold the PTEs it contains. 125 */ 126 static vmm_gpt_node_t * 127 vmm_gpt_node_alloc(void) 128 { 129 vmm_gpt_node_t *node; 130 caddr_t page; 131 132 node = kmem_zalloc(sizeof (*node), KM_SLEEP); 133 /* 134 * Note: despite the man page, allocating PAGESIZE bytes is 135 * guaranteed to be page-aligned. 136 */ 137 page = kmem_zalloc(PAGESIZE, KM_SLEEP); 138 node->vgn_entries = (uint64_t *)page; 139 node->vgn_host_pfn = hat_getpfnum(kas.a_hat, page); 140 141 return (node); 142 } 143 144 /* 145 * Allocates and initializes a vmm_gpt_t. 146 */ 147 vmm_gpt_t * 148 vmm_gpt_alloc(vmm_pte_ops_t *pte_ops) 149 { 150 vmm_gpt_t *gpt; 151 152 VERIFY(pte_ops != NULL); 153 gpt = kmem_zalloc(sizeof (*gpt), KM_SLEEP); 154 gpt->vgpt_pte_ops = pte_ops; 155 gpt->vgpt_root = vmm_gpt_node_alloc(); 156 157 return (gpt); 158 } 159 160 /* 161 * Frees a given node. The node is expected to have no familial (parent, 162 * children, siblings) associations at this point. Accordingly, its reference 163 * count should be zero. 164 */ 165 static void 166 vmm_gpt_node_free(vmm_gpt_node_t *node) 167 { 168 ASSERT(node != NULL); 169 ASSERT3U(node->vgn_ref_cnt, ==, 0); 170 ASSERT(node->vgn_host_pfn != PFN_INVALID); 171 ASSERT(node->vgn_entries != NULL); 172 ASSERT(node->vgn_parent == NULL); 173 174 kmem_free(node->vgn_entries, PAGESIZE); 175 kmem_free(node, sizeof (*node)); 176 } 177 178 /* 179 * Frees a vmm_gpt_t. Any lingering nodes in the GPT will be freed too. 180 */ 181 void 182 vmm_gpt_free(vmm_gpt_t *gpt) 183 { 184 /* Empty anything remaining in the tree */ 185 vmm_gpt_vacate_region(gpt, 0, UINT64_MAX & PAGEMASK); 186 187 VERIFY(gpt->vgpt_root != NULL); 188 VERIFY3U(gpt->vgpt_root->vgn_ref_cnt, ==, 0); 189 190 vmm_gpt_node_free(gpt->vgpt_root); 191 kmem_free(gpt, sizeof (*gpt)); 192 } 193 194 /* 195 * Given a GPA, return its corresponding index in a paging structure at the 196 * provided level. 197 */ 198 static inline uint16_t 199 vmm_gpt_lvl_index(vmm_gpt_node_level_t level, uint64_t gpa) 200 { 201 ASSERT(level < MAX_GPT_LEVEL); 202 203 const uint16_t mask = (1U << 9) - 1; 204 switch (level) { 205 case LEVEL4: return ((gpa >> 39) & mask); 206 case LEVEL3: return ((gpa >> 30) & mask); 207 case LEVEL2: return ((gpa >> 21) & mask); 208 case LEVEL1: return ((gpa >> 12) & mask); 209 default: 210 panic("impossible level value"); 211 }; 212 } 213 214 /* Get mask for addresses of entries at a given table level. */ 215 static inline uint64_t 216 vmm_gpt_lvl_mask(vmm_gpt_node_level_t level) 217 { 218 ASSERT(level < MAX_GPT_LEVEL); 219 220 switch (level) { 221 case LEVEL4: return (0xffffff8000000000ul); /* entries cover 512G */ 222 case LEVEL3: return (0xffffffffc0000000ul); /* entries cover 1G */ 223 case LEVEL2: return (0xffffffffffe00000ul); /* entries cover 2M */ 224 case LEVEL1: return (0xfffffffffffff000ul); /* entries cover 4K */ 225 default: 226 panic("impossible level value"); 227 }; 228 } 229 230 /* Get length of GPA covered by entries at a given table level. */ 231 static inline uint64_t 232 vmm_gpt_lvl_len(vmm_gpt_node_level_t level) 233 { 234 ASSERT(level < MAX_GPT_LEVEL); 235 236 switch (level) { 237 case LEVEL4: return (0x8000000000ul); /* entries cover 512G */ 238 case LEVEL3: return (0x40000000ul); /* entries cover 1G */ 239 case LEVEL2: return (0x200000ul); /* entries cover 2M */ 240 case LEVEL1: return (0x1000ul); /* entries cover 4K */ 241 default: 242 panic("impossible level value"); 243 }; 244 } 245 246 /* 247 * Get the ending GPA which this node could possibly cover given its base 248 * address and level. 249 */ 250 static inline uint64_t 251 vmm_gpt_node_end(vmm_gpt_node_t *node) 252 { 253 ASSERT(node->vgn_level > LEVEL4); 254 return (node->vgn_gpa + vmm_gpt_lvl_len(node->vgn_level - 1)); 255 } 256 257 /* 258 * Is this node the last entry in its parent node, based solely by its GPA? 259 */ 260 static inline bool 261 vmm_gpt_node_is_last(vmm_gpt_node_t *node) 262 { 263 return (node->vgn_index == MAX_NODE_IDX); 264 } 265 266 /* 267 * How many table entries (if any) in this node are covered by the range of 268 * [start, end). 269 */ 270 static uint16_t 271 vmm_gpt_node_entries_covered(vmm_gpt_node_t *node, uint64_t start, uint64_t end) 272 { 273 const uint64_t node_end = vmm_gpt_node_end(node); 274 275 /* Is this node covered at all by the region? */ 276 if (start >= node_end || end <= node->vgn_gpa) { 277 return (0); 278 } 279 280 const uint64_t mask = vmm_gpt_lvl_mask(node->vgn_level); 281 const uint64_t covered_start = MAX(node->vgn_gpa, start & mask); 282 const uint64_t covered_end = MIN(node_end, end & mask); 283 const uint64_t per_entry = vmm_gpt_lvl_len(node->vgn_level); 284 285 return ((covered_end - covered_start) / per_entry); 286 } 287 288 /* 289 * Find the next node (by address) in the tree at the same level. 290 * 291 * Returns NULL if this is the last node in the tree or if `only_seq` was true 292 * and there is an address gap between this node and the next. 293 */ 294 static vmm_gpt_node_t * 295 vmm_gpt_node_next(vmm_gpt_node_t *node, bool only_seq) 296 { 297 ASSERT3P(node->vgn_parent, !=, NULL); 298 ASSERT3U(node->vgn_level, >, LEVEL4); 299 300 /* 301 * Next node sequentially would be the one at the address starting at 302 * the end of what is covered by this node. 303 */ 304 const uint64_t gpa_match = vmm_gpt_node_end(node); 305 306 /* Try our next sibling */ 307 vmm_gpt_node_t *next = node->vgn_sib_next; 308 if (next != NULL) { 309 if (next->vgn_gpa == gpa_match || !only_seq) { 310 return (next); 311 } 312 } else { 313 /* 314 * If the next-sibling pointer is NULL on the node, it can mean 315 * one of two things: 316 * 317 * 1. This entry represents the space leading up to the trailing 318 * boundary of what this node covers. 319 * 320 * 2. The node is not entirely populated, and there is a gap 321 * between the last populated entry, and the trailing 322 * boundary of the node. 323 * 324 * Either way, the proper course of action is to check the first 325 * child of our parent's next sibling. 326 */ 327 vmm_gpt_node_t *pibling = node->vgn_parent->vgn_sib_next; 328 if (pibling != NULL) { 329 next = pibling->vgn_children; 330 if (next != NULL) { 331 if (next->vgn_gpa == gpa_match || !only_seq) { 332 return (next); 333 } 334 } 335 } 336 } 337 338 return (NULL); 339 } 340 341 342 /* 343 * Finds the child for the given GPA in the given parent node. 344 * Returns a pointer to node, or NULL if it is not found. 345 */ 346 static vmm_gpt_node_t * 347 vmm_gpt_node_find_child(vmm_gpt_node_t *parent, uint64_t gpa) 348 { 349 const uint16_t index = vmm_gpt_lvl_index(parent->vgn_level, gpa); 350 for (vmm_gpt_node_t *child = parent->vgn_children; 351 child != NULL && child->vgn_index <= index; 352 child = child->vgn_sib_next) { 353 if (child->vgn_index == index) 354 return (child); 355 } 356 357 return (NULL); 358 } 359 360 /* 361 * Add a child node to the GPT at a position determined by GPA, parent, and (if 362 * present) preceding sibling. 363 * 364 * If `parent` node contains any children, `prev_sibling` must be populated with 365 * a pointer to the node preceding (by GPA) the to-be-added child node. 366 */ 367 static void 368 vmm_gpt_node_add(vmm_gpt_t *gpt, vmm_gpt_node_t *parent, 369 vmm_gpt_node_t *child, uint64_t gpa, vmm_gpt_node_t *prev_sibling) 370 { 371 ASSERT3U(parent->vgn_level, <, LEVEL1); 372 ASSERT3U(child->vgn_parent, ==, NULL); 373 374 const uint16_t idx = vmm_gpt_lvl_index(parent->vgn_level, gpa); 375 child->vgn_index = idx; 376 child->vgn_level = parent->vgn_level + 1; 377 child->vgn_gpa = gpa & vmm_gpt_lvl_mask(parent->vgn_level); 378 379 /* Establish familial connections */ 380 child->vgn_parent = parent; 381 if (prev_sibling != NULL) { 382 ASSERT3U(prev_sibling->vgn_gpa, <, child->vgn_gpa); 383 384 child->vgn_sib_next = prev_sibling->vgn_sib_next; 385 if (child->vgn_sib_next != NULL) { 386 child->vgn_sib_next->vgn_sib_prev = child; 387 } 388 child->vgn_sib_prev = prev_sibling; 389 prev_sibling->vgn_sib_next = child; 390 } else if (parent->vgn_children != NULL) { 391 vmm_gpt_node_t *next_sibling = parent->vgn_children; 392 393 ASSERT3U(next_sibling->vgn_gpa, >, child->vgn_gpa); 394 ASSERT3U(next_sibling->vgn_sib_prev, ==, NULL); 395 396 child->vgn_sib_next = next_sibling; 397 child->vgn_sib_prev = NULL; 398 next_sibling->vgn_sib_prev = child; 399 parent->vgn_children = child; 400 } else { 401 parent->vgn_children = child; 402 child->vgn_sib_next = NULL; 403 child->vgn_sib_prev = NULL; 404 } 405 406 /* Configure PTE for child table */ 407 parent->vgn_entries[idx] = 408 gpt->vgpt_pte_ops->vpeo_map_table(child->vgn_host_pfn); 409 parent->vgn_ref_cnt++; 410 } 411 412 /* 413 * Remove a child node from its relatives (parent, siblings) and free it. 414 */ 415 static void 416 vmm_gpt_node_remove(vmm_gpt_node_t *child) 417 { 418 ASSERT3P(child->vgn_children, ==, NULL); 419 ASSERT3U(child->vgn_ref_cnt, ==, 0); 420 ASSERT3P(child->vgn_parent, !=, NULL); 421 422 /* Unlink child from its siblings and parent */ 423 vmm_gpt_node_t *parent = child->vgn_parent; 424 vmm_gpt_node_t *prev = child->vgn_sib_prev; 425 vmm_gpt_node_t *next = child->vgn_sib_next; 426 if (prev != NULL) { 427 ASSERT3P(prev->vgn_sib_next, ==, child); 428 prev->vgn_sib_next = next; 429 } 430 if (next != NULL) { 431 ASSERT3P(next->vgn_sib_prev, ==, child); 432 next->vgn_sib_prev = prev; 433 } 434 if (prev == NULL) { 435 ASSERT3P(parent->vgn_children, ==, child); 436 parent->vgn_children = next; 437 } 438 child->vgn_parent = NULL; 439 child->vgn_sib_next = NULL; 440 child->vgn_sib_prev = NULL; 441 parent->vgn_entries[child->vgn_index] = 0; 442 parent->vgn_ref_cnt--; 443 444 vmm_gpt_node_free(child); 445 } 446 447 /* 448 * Walks the GPT for the given GPA, accumulating entries to the given depth. If 449 * the walk terminates before the depth is reached, the remaining entries are 450 * written with NULLs. 451 */ 452 void 453 vmm_gpt_walk(vmm_gpt_t *gpt, uint64_t gpa, uint64_t **entries, 454 vmm_gpt_node_level_t depth) 455 { 456 uint64_t *current_entries, entry; 457 pfn_t pfn; 458 459 ASSERT(gpt != NULL); 460 current_entries = gpt->vgpt_root->vgn_entries; 461 for (uint_t i = 0; i < depth; i++) { 462 if (current_entries == NULL) { 463 entries[i] = NULL; 464 continue; 465 } 466 entries[i] = ¤t_entries[vmm_gpt_lvl_index(i, gpa)]; 467 entry = *entries[i]; 468 if (!gpt->vgpt_pte_ops->vpeo_pte_is_present(entry)) { 469 current_entries = NULL; 470 continue; 471 } 472 pfn = gpt->vgpt_pte_ops->vpeo_pte_pfn(entry); 473 current_entries = (uint64_t *)hat_kpm_pfn2va(pfn); 474 } 475 } 476 477 /* 478 * Looks up an entry given GPA. 479 */ 480 uint64_t * 481 vmm_gpt_lookup(vmm_gpt_t *gpt, uint64_t gpa) 482 { 483 uint64_t *entries[MAX_GPT_LEVEL]; 484 485 vmm_gpt_walk(gpt, gpa, entries, MAX_GPT_LEVEL); 486 487 return (entries[LEVEL1]); 488 } 489 490 /* 491 * Populate child table nodes for a given level between the provided interval 492 * of [addr, addr + len). Caller is expected to provide a pointer to the parent 493 * node which would contain the child node for GPA at `addr`. A pointer to said 494 * child node will be returned when the operation is complete. 495 */ 496 static vmm_gpt_node_t * 497 vmm_gpt_populate_region_lvl(vmm_gpt_t *gpt, uint64_t addr, uint64_t len, 498 vmm_gpt_node_t *node_start) 499 { 500 const vmm_gpt_node_level_t lvl = node_start->vgn_level; 501 const uint64_t end = addr + len; 502 const uint64_t incr = vmm_gpt_lvl_len(lvl); 503 uint64_t gpa = addr & vmm_gpt_lvl_mask(lvl); 504 vmm_gpt_node_t *parent = node_start; 505 506 /* Try to locate node at starting address */ 507 vmm_gpt_node_t *prev = NULL, *node = parent->vgn_children; 508 while (node != NULL && node->vgn_gpa < gpa) { 509 prev = node; 510 node = node->vgn_sib_next; 511 } 512 513 /* 514 * If no node exists at the starting address, create one and link it 515 * into the parent. 516 */ 517 if (node == NULL || node->vgn_gpa > gpa) { 518 /* Need to insert node for starting GPA */ 519 node = vmm_gpt_node_alloc(); 520 vmm_gpt_node_add(gpt, parent, node, gpa, prev); 521 } 522 523 vmm_gpt_node_t *front_node = node; 524 prev = node; 525 gpa += incr; 526 527 /* 528 * With a node at the starting address, walk forward creating nodes in 529 * any of the gaps. 530 */ 531 for (; gpa < end; gpa += incr, prev = node) { 532 node = vmm_gpt_node_next(prev, true); 533 if (node != NULL) { 534 ASSERT3U(node->vgn_gpa, ==, gpa); 535 536 /* We may have crossed into a new parent */ 537 parent = node->vgn_parent; 538 continue; 539 } 540 541 if (vmm_gpt_node_is_last(prev)) { 542 /* 543 * The node preceding this was the last one in its 544 * containing parent, so move on to that parent's 545 * sibling. We expect (demand) that it exist already. 546 */ 547 parent = vmm_gpt_node_next(parent, true); 548 ASSERT(parent != NULL); 549 550 /* 551 * Forget our previous sibling, since it is of no use 552 * for assigning the new node to the a now-different 553 * parent. 554 */ 555 prev = NULL; 556 557 } 558 node = vmm_gpt_node_alloc(); 559 vmm_gpt_node_add(gpt, parent, node, gpa, prev); 560 } 561 562 return (front_node); 563 } 564 565 /* 566 * Ensures that PTEs for the region of address space bounded by 567 * [addr, addr + len) exist in the tree. 568 */ 569 void 570 vmm_gpt_populate_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len) 571 { 572 ASSERT0(addr & PAGEOFFSET); 573 ASSERT0(len & PAGEOFFSET); 574 575 /* 576 * Starting at the top of the tree, ensure that tables covering the 577 * requested region exist at each level. 578 */ 579 vmm_gpt_node_t *node = gpt->vgpt_root; 580 for (uint_t lvl = LEVEL4; lvl < LEVEL1; lvl++) { 581 ASSERT3U(node->vgn_level, ==, lvl); 582 583 node = vmm_gpt_populate_region_lvl(gpt, addr, len, node); 584 } 585 586 587 /* 588 * Establish reference counts for the soon-to-be memory PTEs which will 589 * be filling these LEVEL1 tables. 590 */ 591 uint64_t gpa = addr; 592 const uint64_t end = addr + len; 593 while (gpa < end) { 594 ASSERT(node != NULL); 595 ASSERT3U(node->vgn_level, ==, LEVEL1); 596 597 const uint16_t covered = 598 vmm_gpt_node_entries_covered(node, addr, end); 599 600 ASSERT(covered != 0); 601 ASSERT3U(node->vgn_ref_cnt, <, PTE_PER_TABLE); 602 ASSERT3U(node->vgn_ref_cnt + covered, <=, PTE_PER_TABLE); 603 604 node->vgn_ref_cnt += covered; 605 606 vmm_gpt_node_t *next = vmm_gpt_node_next(node, true); 607 if (next != NULL) { 608 gpa = next->vgn_gpa; 609 node = next; 610 } else { 611 /* 612 * We do not expect to find a subsequent node after 613 * filling the last node in the table, completing PTE 614 * accounting for the specified range. 615 */ 616 VERIFY3U(end, <=, vmm_gpt_node_end(node)); 617 break; 618 } 619 } 620 } 621 622 /* 623 * Format a PTE and install it in the provided PTE-pointer. 624 */ 625 bool 626 vmm_gpt_map_at(vmm_gpt_t *gpt, uint64_t *ptep, pfn_t pfn, uint_t prot, 627 uint8_t attr) 628 { 629 uint64_t entry, old_entry; 630 631 entry = gpt->vgpt_pte_ops->vpeo_map_page(pfn, prot, attr); 632 old_entry = atomic_cas_64(ptep, 0, entry); 633 if (old_entry != 0) { 634 ASSERT3U(gpt->vgpt_pte_ops->vpeo_pte_pfn(entry), ==, 635 gpt->vgpt_pte_ops->vpeo_pte_pfn(old_entry)); 636 return (false); 637 } 638 639 return (true); 640 } 641 642 /* 643 * Inserts an entry for a given GPA into the table. The caller must 644 * ensure that a conflicting PFN is not mapped at the requested location. 645 * Racing operations to map the same PFN at one location is acceptable and 646 * properly handled. 647 */ 648 bool 649 vmm_gpt_map(vmm_gpt_t *gpt, uint64_t gpa, pfn_t pfn, uint_t prot, uint8_t attr) 650 { 651 uint64_t *entries[MAX_GPT_LEVEL]; 652 653 ASSERT(gpt != NULL); 654 vmm_gpt_walk(gpt, gpa, entries, MAX_GPT_LEVEL); 655 ASSERT(entries[LEVEL1] != NULL); 656 657 return (vmm_gpt_map_at(gpt, entries[LEVEL1], pfn, prot, attr)); 658 } 659 660 /* 661 * Cleans up the unused inner nodes in the GPT for a region of guest physical 662 * address space of [addr, addr + len). The region must map no pages. 663 */ 664 void 665 vmm_gpt_vacate_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len) 666 { 667 ASSERT0(addr & PAGEOFFSET); 668 ASSERT0(len & PAGEOFFSET); 669 670 const uint64_t end = addr + len; 671 vmm_gpt_node_t *node, *starts[MAX_GPT_LEVEL] = { 672 [LEVEL4] = gpt->vgpt_root, 673 }; 674 675 for (vmm_gpt_node_level_t lvl = LEVEL4; lvl < LEVEL1; lvl++) { 676 node = vmm_gpt_node_find_child(starts[lvl], addr); 677 if (node == NULL) { 678 break; 679 } 680 starts[lvl + 1] = node; 681 } 682 683 /* 684 * Starting at the bottom of the tree, ensure that PTEs for pages have 685 * been cleared for the region, and remove the corresponding reference 686 * counts from the containing LEVEL1 tables. 687 */ 688 uint64_t gpa = addr; 689 node = starts[LEVEL1]; 690 while (gpa < end && node != NULL) { 691 const uint16_t covered = 692 vmm_gpt_node_entries_covered(node, addr, end); 693 694 ASSERT3U(node->vgn_ref_cnt, >=, covered); 695 node->vgn_ref_cnt -= covered; 696 697 node = vmm_gpt_node_next(node, false); 698 if (node != NULL) { 699 gpa = node->vgn_gpa; 700 } 701 } 702 703 /* 704 * With the page PTE references eliminated, work up from the bottom of 705 * the table, removing nodes which have no remaining references. 706 * 707 * This stops short of LEVEL4, which is the root table of the GPT. It 708 * is left standing to be cleaned up when the vmm_gpt_t is destroyed. 709 */ 710 for (vmm_gpt_node_level_t lvl = LEVEL1; lvl > LEVEL4; lvl--) { 711 gpa = addr; 712 node = starts[lvl]; 713 714 while (gpa < end && node != NULL) { 715 vmm_gpt_node_t *next = vmm_gpt_node_next(node, false); 716 717 if (node->vgn_ref_cnt == 0) { 718 vmm_gpt_node_remove(node); 719 } 720 if (next != NULL) { 721 gpa = next->vgn_gpa; 722 } 723 node = next; 724 } 725 } 726 } 727 728 /* 729 * Remove a mapping from the table. Returns false if the page was not mapped, 730 * otherwise returns true. 731 */ 732 bool 733 vmm_gpt_unmap(vmm_gpt_t *gpt, uint64_t gpa) 734 { 735 uint64_t *entries[MAX_GPT_LEVEL], entry; 736 737 ASSERT(gpt != NULL); 738 vmm_gpt_walk(gpt, gpa, entries, MAX_GPT_LEVEL); 739 if (entries[LEVEL1] == NULL) 740 return (false); 741 742 entry = *entries[LEVEL1]; 743 *entries[LEVEL1] = 0; 744 return (gpt->vgpt_pte_ops->vpeo_pte_is_present(entry)); 745 } 746 747 /* 748 * Un-maps the region of guest physical address space bounded by [start..end). 749 * Returns the number of pages that are unmapped. 750 */ 751 size_t 752 vmm_gpt_unmap_region(vmm_gpt_t *gpt, uint64_t addr, uint64_t len) 753 { 754 ASSERT0(addr & PAGEOFFSET); 755 ASSERT0(len & PAGEOFFSET); 756 757 const uint64_t end = addr + len; 758 size_t num_unmapped = 0; 759 for (uint64_t gpa = addr; gpa < end; gpa += PAGESIZE) { 760 if (vmm_gpt_unmap(gpt, gpa) != 0) { 761 num_unmapped++; 762 } 763 } 764 765 return (num_unmapped); 766 } 767 768 /* 769 * Returns a value indicating whether or not this GPT maps the given 770 * GPA. If the GPA is mapped, *protp will be filled with the protection 771 * bits of the entry. Otherwise, it will be ignored. 772 */ 773 bool 774 vmm_gpt_is_mapped(vmm_gpt_t *gpt, uint64_t *ptep, pfn_t *pfnp, uint_t *protp) 775 { 776 uint64_t entry; 777 778 ASSERT(pfnp != NULL); 779 ASSERT(protp != NULL); 780 781 if (ptep == NULL) { 782 return (false); 783 } 784 entry = *ptep; 785 if (!gpt->vgpt_pte_ops->vpeo_pte_is_present(entry)) { 786 return (false); 787 } 788 *pfnp = gpt->vgpt_pte_ops->vpeo_pte_pfn(entry); 789 *protp = gpt->vgpt_pte_ops->vpeo_pte_prot(entry); 790 return (true); 791 } 792 793 /* 794 * Resets the accessed bit on the page table entry pointed to be `entry`. 795 * If `on` is true, the bit will be set, otherwise it will be cleared. 796 * The old value of the bit is returned. 797 */ 798 uint_t 799 vmm_gpt_reset_accessed(vmm_gpt_t *gpt, uint64_t *entry, bool on) 800 { 801 ASSERT(entry != NULL); 802 return (gpt->vgpt_pte_ops->vpeo_reset_accessed(entry, on)); 803 } 804 805 /* 806 * Resets the dirty bit on the page table entry pointed to be `entry`. 807 * If `on` is true, the bit will be set, otherwise it will be cleared. 808 * The old value of the bit is returned. 809 */ 810 uint_t 811 vmm_gpt_reset_dirty(vmm_gpt_t *gpt, uint64_t *entry, bool on) 812 { 813 ASSERT(entry != NULL); 814 return (gpt->vgpt_pte_ops->vpeo_reset_dirty(entry, on)); 815 } 816 817 /* 818 * Query state from PTE pointed to by `entry`. 819 */ 820 bool 821 vmm_gpt_query(vmm_gpt_t *gpt, uint64_t *entry, vmm_gpt_query_t query) 822 { 823 ASSERT(entry != NULL); 824 return (gpt->vgpt_pte_ops->vpeo_query(entry, query)); 825 } 826 827 /* 828 * Get properly formatted PML4 (EPTP/nCR3) for GPT. 829 */ 830 uint64_t 831 vmm_gpt_get_pmtp(vmm_gpt_t *gpt, bool track_dirty) 832 { 833 const pfn_t root_pfn = gpt->vgpt_root->vgn_host_pfn; 834 return (gpt->vgpt_pte_ops->vpeo_get_pmtp(root_pfn, track_dirty)); 835 } 836 837 /* 838 * Does the GPT hardware support dirty-page-tracking? 839 */ 840 bool 841 vmm_gpt_can_track_dirty(vmm_gpt_t *gpt) 842 { 843 return (gpt->vgpt_pte_ops->vpeo_hw_ad_supported()); 844 } 845