Lines Matching +full:gpa +full:- +full:0

1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
69 #define VM_LOWMEM_LIMIT 0
98 memset(&vmc, 0, sizeof(vmc)); in vm_ctl_create()
101 return (-1); in vm_ctl_create()
112 if (modfind("vmm") < 0) { in vm_create()
114 if (error != 0) in vm_create()
115 return (-1); in vm_create()
118 fd = open("/dev/vmmctl", O_RDWR, 0); in vm_create()
119 if (fd < 0) in vm_create()
122 if (error != 0) { in vm_create()
126 return (-1); in vm_create()
129 return (0); in vm_create()
135 return (vm_openf(name, 0)); in vm_open()
150 vm->fd = vm->ctlfd = -1; in vm_openf()
151 vm->memflags = 0; in vm_openf()
152 vm->name = (char *)(vm + 1); in vm_openf()
153 strcpy(vm->name, name); in vm_openf()
154 memset(vm->memsegs, 0, sizeof(vm->memsegs)); in vm_openf()
156 if ((vm->ctlfd = open("/dev/vmmctl", O_RDWR, 0)) < 0) in vm_openf()
159 vm->fd = vm_device_open(vm->name); in vm_openf()
160 if (vm->fd < 0 && errno == ENOENT) { in vm_openf()
162 if (vm_ctl_create(vm->name, vm->ctlfd) != 0) in vm_openf()
164 vm->fd = vm_device_open(vm->name); in vm_openf()
168 if (vm->fd < 0) in vm_openf()
171 if (!created && (flags & VMMAPI_OPEN_REINIT) != 0 && vm_reinit(vm) != 0) in vm_openf()
190 if (vm->fd >= 0) in vm_close()
191 (void)close(vm->fd); in vm_close()
192 if (vm->ctlfd >= 0) in vm_close()
193 (void)close(vm->ctlfd); in vm_close()
202 memset(&vmd, 0, sizeof(vmd)); in vm_destroy()
203 (void)strlcpy(vmd.name, vm->name, sizeof(vmd.name)); in vm_destroy()
204 if (ioctl(vm->ctlfd, VMMCTL_VM_DESTROY, &vmd) != 0) in vm_destroy()
216 vcpu->ctx = ctx; in vm_vcpu_open()
217 vcpu->vcpuid = vcpuid; in vm_vcpu_open()
230 return (vcpu->vcpuid); in vcpu_id()
240 optval = strtoul(opt, &endptr, 0); in vm_parse_memsize()
241 if (*opt != '\0' && *endptr == '\0') { in vm_parse_memsize()
250 error = 0; in vm_parse_memsize()
268 ctx->memflags = flags; in vm_set_memflags()
275 return (ctx->memflags); in vm_get_memflags()
279 * Map segment 'segid' starting at 'off' into guest address range [gpa,gpa+len).
282 vm_mmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, int segid, vm_ooffset_t off, in vm_mmap_memseg() argument
288 memmap.gpa = gpa; in vm_mmap_memseg()
293 memmap.flags = 0; in vm_mmap_memseg()
295 if (ctx->memflags & VM_MEM_F_WIRED) in vm_mmap_memseg()
302 error = vm_mmap_getnext(ctx, &gpa, &segid, &off, &len, &prot, &flags); in vm_mmap_memseg()
303 if (error == 0 && gpa == memmap.gpa) { in vm_mmap_memseg()
307 return (-1); in vm_mmap_memseg()
309 return (0); in vm_mmap_memseg()
313 error = ioctl(ctx->fd, VM_MMAP_MEMSEG, &memmap); in vm_mmap_memseg()
322 *guest_baseaddr = ctx->baseaddr; in vm_get_guestmem_from_ctx()
323 *lowmem_size = ctx->memsegs[VM_MEMSEG_LOW].size; in vm_get_guestmem_from_ctx()
324 *highmem_size = ctx->memsegs[VM_MEMSEG_HIGH].size; in vm_get_guestmem_from_ctx()
325 return (0); in vm_get_guestmem_from_ctx()
329 vm_munmap_memseg(struct vmctx *ctx, vm_paddr_t gpa, size_t len) in vm_munmap_memseg() argument
334 munmap.gpa = gpa; in vm_munmap_memseg()
337 error = ioctl(ctx->fd, VM_MUNMAP_MEMSEG, &munmap); in vm_munmap_memseg()
342 vm_mmap_getnext(struct vmctx *ctx, vm_paddr_t *gpa, int *segid, in vm_mmap_getnext() argument
349 memmap.gpa = *gpa; in vm_mmap_getnext()
350 error = ioctl(ctx->fd, VM_MMAP_GETNEXT, &memmap); in vm_mmap_getnext()
351 if (error == 0) { in vm_mmap_getnext()
352 *gpa = memmap.gpa; in vm_mmap_getnext()
363 * Return 0 if the segments are identical and non-zero otherwise.
374 return (0); in cmpseg()
376 return (-1); in cmpseg()
396 if (memseg.len != 0) { in vm_alloc_memseg()
399 return (-1); in vm_alloc_memseg()
401 return (0); in vm_alloc_memseg()
412 return (-1); in vm_alloc_memseg()
416 error = ioctl(ctx->fd, VM_ALLOC_MEMSEG, &memseg); in vm_alloc_memseg()
430 error = ioctl(ctx->fd, VM_GET_MEMSEG, &memseg); in vm_get_memseg()
431 if (error == 0) { in vm_get_memseg()
436 error = -1; in vm_get_memseg()
443 setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char *base) in setup_memory_segment() argument
448 /* Map 'len' bytes starting at 'gpa' in the guest address space */ in setup_memory_segment()
449 error = vm_mmap_memseg(ctx, gpa, VM_SYSMEM, gpa, len, PROT_ALL); in setup_memory_segment()
454 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) in setup_memory_segment()
458 ptr = mmap(base + gpa, len, PROT_RW, flags, ctx->fd, gpa); in setup_memory_segment()
460 return (-1); in setup_memory_segment()
462 return (0); in setup_memory_segment()
469 vm_paddr_t gpa; in vm_setup_memory() local
480 ctx->memsegs[VM_MEMSEG_LOW].size = VM_LOWMEM_LIMIT; in vm_setup_memory()
481 ctx->memsegs[VM_MEMSEG_HIGH].size = memsize - VM_LOWMEM_LIMIT; in vm_setup_memory()
482 objsize = VM_HIGHMEM_BASE + ctx->memsegs[VM_MEMSEG_HIGH].size; in vm_setup_memory()
484 ctx->memsegs[VM_MEMSEG_LOW].size = memsize; in vm_setup_memory()
485 ctx->memsegs[VM_MEMSEG_HIGH].size = 0; in vm_setup_memory()
498 ptr = mmap(NULL, len, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, 0); in vm_setup_memory()
500 return (-1); in vm_setup_memory()
503 if (ctx->memsegs[VM_MEMSEG_HIGH].size > 0) { in vm_setup_memory()
504 gpa = VM_HIGHMEM_BASE; in vm_setup_memory()
505 len = ctx->memsegs[VM_MEMSEG_HIGH].size; in vm_setup_memory()
506 error = setup_memory_segment(ctx, gpa, len, baseaddr); in vm_setup_memory()
511 if (ctx->memsegs[VM_MEMSEG_LOW].size > 0) { in vm_setup_memory()
512 gpa = 0; in vm_setup_memory()
513 len = ctx->memsegs[VM_MEMSEG_LOW].size; in vm_setup_memory()
514 error = setup_memory_segment(ctx, gpa, len, baseaddr); in vm_setup_memory()
519 ctx->baseaddr = baseaddr; in vm_setup_memory()
521 return (0); in vm_setup_memory()
525 * Returns a non-NULL pointer if [gaddr, gaddr+len) is entirely contained in
536 lowsize = ctx->memsegs[VM_MEMSEG_LOW].size; in vm_map_gpa()
537 if (lowsize > 0) { in vm_map_gpa()
539 return (ctx->baseaddr + gaddr); in vm_map_gpa()
542 highsize = ctx->memsegs[VM_MEMSEG_HIGH].size; in vm_map_gpa()
543 if (highsize > 0 && gaddr >= VM_HIGHMEM_BASE) { in vm_map_gpa()
546 return (ctx->baseaddr + gaddr); in vm_map_gpa()
558 offaddr = (char *)addr - ctx->baseaddr; in vm_rev_map_gpa()
560 lowsize = ctx->memsegs[VM_MEMSEG_LOW].size; in vm_rev_map_gpa()
561 if (lowsize > 0) in vm_rev_map_gpa()
565 highsize = ctx->memsegs[VM_MEMSEG_HIGH].size; in vm_rev_map_gpa()
566 if (highsize > 0) in vm_rev_map_gpa()
571 return ((vm_paddr_t)-1); in vm_rev_map_gpa()
578 return (ctx->name); in vm_get_name()
585 return (ctx->memsegs[VM_MEMSEG_LOW].size); in vm_get_lowmem_size()
599 return (ctx->memsegs[VM_MEMSEG_HIGH].size); in vm_get_highmem_size()
610 fd = -1; in vm_create_devmem()
612 if (name == NULL || strlen(name) == 0) { in vm_create_devmem()
622 strlcat(pathname, ctx->name, sizeof(pathname)); in vm_create_devmem()
627 if (fd < 0) in vm_create_devmem()
635 base = mmap(NULL, len2, PROT_NONE, MAP_GUARD | MAP_ALIGNED_SUPER, -1, in vm_create_devmem()
636 0); in vm_create_devmem()
641 if ((ctx->memflags & VM_MEM_F_INCORE) == 0) in vm_create_devmem()
645 ptr = mmap(base + VM_MMAP_GUARD_SIZE, len, PROT_RW, flags, fd, 0); in vm_create_devmem()
647 if (fd >= 0) in vm_create_devmem()
660 *(int *)arg = vcpu->vcpuid; in vcpu_ioctl()
661 return (ioctl(vcpu->ctx->fd, cmd, arg)); in vcpu_ioctl()
737 return (ioctl(ctx->fd, VM_SUSPEND, &vmsuspend)); in vm_suspend()
744 return (ioctl(ctx->fd, VM_REINIT, 0)); in vm_reinit()
752 for (i = 0; i < VM_CAP_MAX; i++) { in vm_capability_name2type()
754 strcmp(vm_capstrmap[i], capname) == 0) in vm_capability_name2type()
758 return (-1); in vm_capability_name2type()
764 if (type >= 0 && type < VM_CAP_MAX) in vm_capability_type2name()
808 count = 0; in vm_get_stats()
809 for (index = 0;; index += nitems(vmstats.statbuf)) { in vm_get_stats()
811 if (vcpu_ioctl(vcpu, VM_STATS, &vmstats) != 0) in vm_get_stats()
847 if (ioctl(ctx->fd, VM_STAT_DESC, &statdesc) == 0) in vm_get_stat_desc()
855 vm_get_gpa_pmap(struct vmctx *ctx, uint64_t gpa, uint64_t *pte, int *num) in vm_get_gpa_pmap() argument
861 gpapte.gpa = gpa; in vm_get_gpa_pmap()
863 error = ioctl(ctx->fd, VM_GET_GPA_PMAP, &gpapte); in vm_get_gpa_pmap()
865 if (error == 0) { in vm_get_gpa_pmap()
867 for (i = 0; i < gpapte.ptenum; i++) in vm_get_gpa_pmap()
876 uint64_t gla, int prot, uint64_t *gpa, int *fault) in vm_gla2gpa() argument
887 if (error == 0) { in vm_gla2gpa()
889 *gpa = gg.gpa; in vm_gla2gpa()
897 uint64_t gla, int prot, uint64_t *gpa, int *fault) in vm_gla2gpa_nofault() argument
908 if (error == 0) { in vm_gla2gpa_nofault()
910 *gpa = gg.gpa; in vm_gla2gpa_nofault()
926 uint64_t gpa, off; in vm_copy_setup() local
929 for (i = 0; i < iovcnt; i++) { in vm_copy_setup()
930 iov[i].iov_base = 0; in vm_copy_setup()
931 iov[i].iov_len = 0; in vm_copy_setup()
935 assert(iovcnt > 0); in vm_copy_setup()
936 error = vm_gla2gpa(vcpu, paging, gla, prot, &gpa, fault); in vm_copy_setup()
940 off = gpa & PAGE_MASK; in vm_copy_setup()
941 n = MIN(len, PAGE_SIZE - off); in vm_copy_setup()
943 va = vm_map_gpa(vcpu->ctx, gpa, n); in vm_copy_setup()
947 iov->iov_base = va; in vm_copy_setup()
948 iov->iov_len = n; in vm_copy_setup()
950 iovcnt--; in vm_copy_setup()
953 len -= n; in vm_copy_setup()
955 return (0); in vm_copy_setup()
964 * emulation code shared with the kernel. The in-kernel in vm_copy_teardown()
965 * version of this is non-empty. in vm_copy_teardown()
978 assert(iov->iov_len); in vm_copyin()
979 n = min(len, iov->iov_len); in vm_copyin()
980 src = iov->iov_base; in vm_copyin()
985 len -= n; in vm_copyin()
998 assert(iov->iov_len); in vm_copyout()
999 n = min(len, iov->iov_len); in vm_copyout()
1000 dst = iov->iov_base; in vm_copyout()
1005 len -= n; in vm_copyout()
1020 error = ioctl(ctx->fd, VM_GET_CPUS, &vm_cpuset); in vm_get_cpus()
1063 ac.vcpuid = -1; in vm_suspend_all_cpus()
1064 error = ioctl(ctx->fd, VM_SUSPEND_CPU, &ac); in vm_suspend_all_cpus()
1097 ac.vcpuid = -1; in vm_resume_all_cpus()
1098 error = ioctl(ctx->fd, VM_RESUME_CPU, &ac); in vm_resume_all_cpus()
1111 if (error == 0) { in vm_get_intinfo()
1144 if (ioctl(ctx->fd, VM_SNAPSHOT_REQ, meta) == -1) { in vm_snapshot_req()
1147 __func__, meta->dev_name, errno); in vm_snapshot_req()
1149 return (-1); in vm_snapshot_req()
1151 return (0); in vm_snapshot_req()
1159 dummy = 0; in vm_restore_time()
1160 return (ioctl(ctx->fd, VM_RESTORE_TIME, &dummy)); in vm_restore_time()
1175 return (ioctl(ctx->fd, VM_SET_TOPOLOGY, &topology)); in vm_set_topology()
1186 error = ioctl(ctx->fd, VM_GET_TOPOLOGY, &topology); in vm_get_topology()
1187 if (error == 0) { in vm_get_topology()
1202 if (caph_rights_limit(ctx->fd, &rights) != 0) in vm_limit_rights()
1203 return (-1); in vm_limit_rights()
1204 if (caph_ioctls_limit(ctx->fd, vm_ioctl_cmds, vm_ioctl_ncmds) != 0) in vm_limit_rights()
1205 return (-1); in vm_limit_rights()
1206 return (0); in vm_limit_rights()
1217 return (ctx->fd); in vm_get_device_fd()
1228 sz = vm_ioctl_ncmds * sizeof(vm_ioctl_cmds[0]); in vm_get_ioctls()