1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 /* This file is dual-licensed; see usr/src/contrib/bhyve/LICENSE */ 12 13 /* 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2022 Oxide Computer Company 16 */ 17 18 #ifndef _VMM_VM_H 19 #define _VMM_VM_H 20 21 #include <sys/types.h> 22 23 typedef struct vmspace vmspace_t; 24 typedef struct vm_client vm_client_t; 25 typedef struct vm_page vm_page_t; 26 typedef struct vm_object vm_object_t; 27 28 struct vmm_pte_ops; 29 30 typedef void (*vmc_inval_cb_t)(void *, uintptr_t, size_t); 31 32 /* vmspace_t operations */ 33 vmspace_t *vmspace_alloc(size_t, struct vmm_pte_ops *, bool); 34 void vmspace_destroy(vmspace_t *); 35 int vmspace_map(vmspace_t *, vm_object_t *, uintptr_t, uintptr_t, size_t, 36 uint8_t); 37 int vmspace_unmap(vmspace_t *, uintptr_t, uintptr_t); 38 int vmspace_populate(vmspace_t *, uintptr_t, uintptr_t); 39 vm_client_t *vmspace_client_alloc(vmspace_t *); 40 uint64_t vmspace_table_root(vmspace_t *); 41 uint64_t vmspace_table_gen(vmspace_t *); 42 uint64_t vmspace_resident_count(vmspace_t *); 43 int vmspace_track_dirty(vmspace_t *, uint64_t, size_t, uint8_t *); 44 45 /* vm_client_t operations */ 46 vm_page_t *vmc_hold(vm_client_t *, uintptr_t, int); 47 vm_page_t *vmc_hold_ext(vm_client_t *, uintptr_t, int, int); 48 uint64_t vmc_table_enter(vm_client_t *); 49 void vmc_table_exit(vm_client_t *); 50 int vmc_fault(vm_client_t *, uintptr_t, int); 51 vm_client_t *vmc_clone(vm_client_t *); 52 int vmc_set_inval_cb(vm_client_t *, vmc_inval_cb_t, void *); 53 void vmc_destroy(vm_client_t *); 54 55 /* vm_object_t operations */ 56 vm_object_t *vm_object_mem_allocate(size_t, bool); 57 vm_object_t *vmm_mmio_alloc(vmspace_t *, uintptr_t, size_t, uintptr_t); 58 void vm_object_reference(vm_object_t *); 59 void vm_object_release(vm_object_t *); 60 pfn_t vm_object_pfn(vm_object_t *, uintptr_t); 61 62 /* vm_page_t operations */ 63 const void *vmp_get_readable(const vm_page_t *); 64 void *vmp_get_writable(const vm_page_t *); 65 pfn_t vmp_get_pfn(const vm_page_t *); 66 void vmp_mark_dirty(vm_page_t *); 67 void vmp_chain(vm_page_t *, vm_page_t *); 68 vm_page_t *vmp_next(const vm_page_t *); 69 bool vmp_release(vm_page_t *); 70 bool vmp_release_chain(vm_page_t *); 71 72 /* 73 * Flags for vmc_hold_ext(): 74 */ 75 76 /* The default flags are empty */ 77 #define VPF_DEFAULT 0 78 79 /* 80 * When a page is held for potential writes, the consumer may not perform those 81 * writes immediately, or in some cases ever. They may wish to defer the page 82 * being considered dirty until such a determination is made. By establishing a 83 * page hold with this flag, the consumer commits to a later vmp_mark_dirty() 84 * call if they write any data though the vm_page. Doing so will effectively 85 * clear the flag and subject the page to expected dirty-tracking logic. 86 */ 87 #define VPF_DEFER_DIRTY (1 << 0) 88 89 /* seg_vmm mapping */ 90 struct vm; 91 int vm_segmap_obj(struct vm *, int, off_t, off_t, struct as *, caddr_t *, 92 uint_t, uint_t, uint_t); 93 int vm_segmap_space(struct vm *, off_t, struct as *, caddr_t *, off_t, uint_t, 94 uint_t, uint_t); 95 96 /* Glue functions */ 97 vm_paddr_t vtophys(void *); 98 void invalidate_cache_all(void); 99 100 /* 101 * The VM_MAXUSER_ADDRESS determines the upper size limit of a vmspace. 102 * This value is sized well below the host userlimit, halving the 103 * available space below the VA hole to avoid Intel EPT limits and 104 * leave room available in the usable VA range for other mmap tricks. 105 */ 106 #define VM_MAXUSER_ADDRESS 0x00003ffffffffffful 107 108 #endif /* _VMM_VM_H */ 109