Lines Matching full:vm
43 #include <vm/vm.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_extern.h>
46 #include <vm/vm_param.h>
47 #include <vm/vm_kern.h>
48 #include <vm/vm_page.h>
49 #include <vm/vm_object.h>
50 #include <vm/vm_pager.h>
51 #include <vm/vm_pageout.h>
52 #include <vm/vm_phys.h>
108 hwt_vm_alloc_pages(struct hwt_vm *vm, int kva_req) in hwt_vm_alloc_pages() argument
129 vm->kvaddr = kva_alloc(vm->npages * PAGE_SIZE); in hwt_vm_alloc_pages()
130 if (!vm->kvaddr) in hwt_vm_alloc_pages()
134 vm->obj = cdev_pager_allocate(vm, OBJT_MGTDEVICE, in hwt_vm_alloc_pages()
135 &hwt_vm_pager_ops, vm->npages * PAGE_SIZE, PROT_READ, 0, in hwt_vm_alloc_pages()
138 for (i = 0; i < vm->npages; i++) { in hwt_vm_alloc_pages()
169 vm->pages[i] = m; in hwt_vm_alloc_pages()
171 VM_OBJECT_WLOCK(vm->obj); in hwt_vm_alloc_pages()
172 vm_page_insert(m, vm->obj, i); in hwt_vm_alloc_pages()
174 pmap_qenter(vm->kvaddr + i * PAGE_SIZE, &m, 1); in hwt_vm_alloc_pages()
175 VM_OBJECT_WUNLOCK(vm->obj); in hwt_vm_alloc_pages()
194 struct hwt_vm *vm; in hwt_vm_mmap_single() local
196 vm = cdev->si_drv1; in hwt_vm_mmap_single()
201 vm_object_reference(vm->obj); in hwt_vm_mmap_single()
202 *objp = vm->obj; in hwt_vm_mmap_single()
246 struct hwt_vm *vm; in hwt_vm_ioctl() local
257 vm = dev->si_drv1; in hwt_vm_ioctl()
258 KASSERT(vm != NULL, ("si_drv1 is NULL")); in hwt_vm_ioctl()
260 ctx = vm->ctx; in hwt_vm_ioctl()
324 KASSERT(vm->thr != NULL, ("thr is NULL")); in hwt_vm_ioctl()
326 wakeup(vm->thr); in hwt_vm_ioctl()
333 error = hwt_backend_read(ctx, vm, &ident, &offset, &data); in hwt_vm_ioctl()
399 hwt_vm_create_cdev(struct hwt_vm *vm, char *path) in hwt_vm_create_cdev() argument
412 args.mda_si_drv1 = vm; in hwt_vm_create_cdev()
414 error = make_dev_s(&args, &vm->cdev, "%s", path); in hwt_vm_create_cdev()
422 hwt_vm_alloc_buffers(struct hwt_vm *vm, int kva_req) in hwt_vm_alloc_buffers() argument
426 vm->pages = malloc(sizeof(struct vm_page *) * vm->npages, in hwt_vm_alloc_buffers()
429 error = hwt_vm_alloc_pages(vm, kva_req); in hwt_vm_alloc_buffers()
439 hwt_vm_destroy_buffers(struct hwt_vm *vm) in hwt_vm_destroy_buffers() argument
444 if (vm->ctx->hwt_backend->kva_req && vm->kvaddr != 0) { in hwt_vm_destroy_buffers()
445 pmap_qremove(vm->kvaddr, vm->npages); in hwt_vm_destroy_buffers()
446 kva_free(vm->kvaddr, vm->npages * PAGE_SIZE); in hwt_vm_destroy_buffers()
448 VM_OBJECT_WLOCK(vm->obj); in hwt_vm_destroy_buffers()
449 for (i = 0; i < vm->npages; i++) { in hwt_vm_destroy_buffers()
450 m = vm->pages[i]; in hwt_vm_destroy_buffers()
455 cdev_pager_free_page(vm->obj, m); in hwt_vm_destroy_buffers()
461 vm_pager_deallocate(vm->obj); in hwt_vm_destroy_buffers()
462 VM_OBJECT_WUNLOCK(vm->obj); in hwt_vm_destroy_buffers()
464 free(vm->pages, M_HWT_VM); in hwt_vm_destroy_buffers()
468 hwt_vm_free(struct hwt_vm *vm) in hwt_vm_free() argument
473 if (vm->cdev) in hwt_vm_free()
474 destroy_dev_sched(vm->cdev); in hwt_vm_free()
475 hwt_vm_destroy_buffers(vm); in hwt_vm_free()
476 free(vm, M_HWT_VM); in hwt_vm_free()
482 struct hwt_vm *vm; in hwt_vm_alloc() local
485 vm = malloc(sizeof(struct hwt_vm), M_HWT_VM, M_WAITOK | M_ZERO); in hwt_vm_alloc()
486 vm->npages = bufsize / PAGE_SIZE; in hwt_vm_alloc()
488 error = hwt_vm_alloc_buffers(vm, kva_req); in hwt_vm_alloc()
490 free(vm, M_HWT_VM); in hwt_vm_alloc()
494 error = hwt_vm_create_cdev(vm, path); in hwt_vm_alloc()
496 hwt_vm_free(vm); in hwt_vm_alloc()
500 *vm0 = vm; in hwt_vm_alloc()