| /linux/samples/nitro_enclaves/ |
| H A D | ne_ioctl_sample.c | 149 void *userspace_addr; member 261 ne_user_mem_region->userspace_addr = mmap(NULL, ne_user_mem_region->memory_size, in ne_alloc_user_mem_region() 265 if (ne_user_mem_region->userspace_addr == MAP_FAILED) { in ne_alloc_user_mem_region() 366 void *userspace_addr = ne_user_mem_regions[i].userspace_addr; in ne_load_enclave_image() local 382 memcpy(userspace_addr + memory_offset, in ne_load_enclave_image() 413 .userspace_addr = (__u64)ne_user_mem_region.userspace_addr, in ne_set_user_mem_region() 502 munmap(ne_user_mem_regions[i].userspace_addr, in ne_free_mem_regions()
|
| /linux/drivers/virt/nitro_enclaves/ |
| H A D | ne_misc_dev.c | 763 if (!IS_ALIGNED(mem_region.userspace_addr, NE_MIN_MEM_REGION_SIZE)) { in ne_sanity_check_user_mem_region() 770 if ((mem_region.userspace_addr & (NE_MIN_MEM_REGION_SIZE - 1)) || in ne_sanity_check_user_mem_region() 771 !access_ok((void __user *)(unsigned long)mem_region.userspace_addr, in ne_sanity_check_user_mem_region() 782 u64 userspace_addr = ne_mem_region->userspace_addr; in ne_sanity_check_user_mem_region() local 784 if ((userspace_addr <= mem_region.userspace_addr && in ne_sanity_check_user_mem_region() 785 mem_region.userspace_addr < (userspace_addr + memory_size)) || in ne_sanity_check_user_mem_region() 786 (mem_region.userspace_addr <= userspace_addr && in ne_sanity_check_user_mem_region() 787 (mem_region.userspace_addr + mem_region.memory_size) > userspace_addr)) { in ne_sanity_check_user_mem_region() 966 gup_rc = get_user_pages_unlocked(mem_region.userspace_addr + memory_size, 1, in ne_set_user_memory_region_ioctl() 1014 ne_mem_region->userspace_addr = mem_region.userspace_addr; in ne_set_user_memory_region_ioctl()
|
| H A D | ne_misc_dev.h | 32 u64 userspace_addr; member
|
| /linux/include/uapi/linux/ |
| H A D | nitro_enclaves.h | 329 __u64 userspace_addr; member
|
| H A D | vhost_types.h | 121 __u64 userspace_addr; member
|
| H A D | mshv.h | 132 __u64 userspace_addr; member
|
| H A D | kvm.h | 30 __u64 userspace_addr; /* start of the userspace allocated memory */ member 39 __u64 userspace_addr; member
|
| /linux/drivers/hv/ |
| H A D | mshv_regions.c | 264 __u64 userspace_addr; in mshv_region_pin() local 269 userspace_addr = region->start_uaddr + in mshv_region_pin() 282 ret = pin_user_pages_fast(userspace_addr, nr_pages, in mshv_region_pin()
|
| H A D | mshv_root_main.c | 1173 mem->userspace_addr, mem->flags); in mshv_partition_create_region() 1283 !access_ok((const void *)mem.userspace_addr, mem.size)) in mshv_map_user_memory() 1287 vma = vma_lookup(current->mm, mem.userspace_addr); in mshv_map_user_memory() 1356 if (region->start_uaddr != mem.userspace_addr || in mshv_unmap_user_memory() 1383 !PAGE_ALIGNED(mem.userspace_addr) || in mshv_partition_ioctl_set_memory()
|
| /linux/tools/testing/selftests/kvm/arm64/ |
| H A D | page_fault_test.c | 355 args->hva = (void *)region->region.userspace_addr; in setup_uffd_args() 409 void *hva = (void *)region->region.userspace_addr; in punch_hole_in_backing_store() 431 hva = (void *)region->region.userspace_addr; in mmio_on_test_gpa_handler() 533 hva = (void *)region->region.userspace_addr; in load_exec_code_for_test()
|
| /linux/virt/kvm/ |
| H A D | kvm_main.c | 597 hva_start = max_t(unsigned long, range->start, slot->userspace_addr); in kvm_handle_hva_range() 599 slot->userspace_addr + (slot->npages << PAGE_SHIFT)); in kvm_handle_hva_range() 1560 new->hva_node[idx].start = new->userspace_addr; in kvm_replace_memslot() 1561 new->hva_node[idx].last = new->userspace_addr + in kvm_replace_memslot() 1815 dest->userspace_addr = src->userspace_addr; in kvm_copy_memslot() 2044 if ((mem->userspace_addr & (PAGE_SIZE - 1)) || in kvm_set_memory_region() 2045 (mem->userspace_addr != untagged_addr(mem->userspace_addr)) || in kvm_set_memory_region() 2046 !access_ok((void __user *)(unsigned long)mem->userspace_addr, in kvm_set_memory_region() 2102 if ((mem->userspace_addr != old->userspace_addr) || in kvm_set_memory_region() 2129 new->userspace_addr = mem->userspace_addr; in kvm_set_memory_region() [all …]
|
| /linux/arch/loongarch/kvm/ |
| H A D | mmu.c | 398 hva_start = new->userspace_addr; in kvm_arch_prepare_memory_region() 623 start = memslot->userspace_addr; in fault_supports_huge_mapping()
|
| /linux/tools/testing/selftests/kvm/s390/ |
| H A D | ucontrol_test.c | 446 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F() 452 .userspace_addr = (uintptr_t)self->code_hva, in TEST_F()
|
| /linux/arch/arm64/kvm/ |
| H A D | mmu.c | 1030 hva_t hva = memslot->userspace_addr; in stage2_unmap_memslot() 1062 gpa_t gpa = addr + (vm_start - memslot->userspace_addr); in stage2_unmap_memslot() 1347 uaddr_start = memslot->userspace_addr; in fault_supports_stage2_huge_mapping() 2439 hva = new->userspace_addr; in kvm_arch_prepare_memory_region()
|
| /linux/arch/s390/kvm/ |
| H A D | pv.c | 323 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len); in kvm_s390_destroy_lower_2g()
|
| H A D | kvm-s390.c | 3425 .userspace_addr = 0, in kvm_arch_init_vm() 5956 if (new->userspace_addr & 0xffffful) in kvm_arch_prepare_memory_region() 6008 rc = gmap_map_segment(kvm->arch.gmap, new->userspace_addr, in kvm_arch_commit_memory_region()
|
| /linux/tools/virtio/ |
| H A D | virtio_test.c | 148 dev->mem->regions[0].userspace_addr = (long)dev->buf; in vdev_info_init()
|
| H A D | vhost_net_test.c | 245 dev->mem->regions[0].userspace_addr = (long)dev->buf; in vdev_info_init()
|
| /linux/tools/include/uapi/linux/ |
| H A D | kvm.h | 30 __u64 userspace_addr; /* start of the userspace allocated memory */ member 39 __u64 userspace_addr; member
|
| /linux/tools/testing/selftests/kvm/lib/ |
| H A D | kvm_util.c | 896 .userspace_addr = (uintptr_t)hva, in __vm_set_user_memory_region() 924 .userspace_addr = (uintptr_t)hva, in __vm_set_user_memory_region2() 1090 region->region.userspace_addr = (uintptr_t) region->host_mem; in vm_mem_add()
|
| /linux/arch/riscv/kvm/ |
| H A D | mmu.c | 192 hva = new->userspace_addr; in kvm_arch_prepare_memory_region()
|
| /linux/include/linux/ |
| H A D | kvm_host.h | 602 unsigned long userspace_addr; member 1875 return slot->userspace_addr + offset * PAGE_SIZE; in __gfn_to_hva_memslot() 1886 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; in hva_to_gfn_memslot()
|
| /linux/arch/powerpc/kvm/ |
| H A D | book3s_hv_uvmem.c | 622 addr = slot->userspace_addr; in kvmppc_uvmem_drop_pages()
|
| /linux/Documentation/virt/kvm/ |
| H A D | api.rst | 1371 __u64 userspace_addr; /* start of the userspace allocated memory */ 1396 field userspace_addr, which must point at user addressable memory for 1400 On architectures that support a form of address tagging, userspace_addr must 1403 It is recommended that the lower 21 bits of guest_phys_addr and userspace_addr 6337 __u64 userspace_addr; /* start of the userspace allocated memory */ 6345 userspace_addr (shared memory). However, "valid" for userspace_addr simply 6347 mapping for userspace_addr is not required to be valid/populated at the time of 6352 userspace_addr vs. guest_memfd, based on the gfn's KVM_MEMORY_ATTRIBUTE_PRIVATE
|
| /linux/drivers/vhost/ |
| H A D | vhost.c | 2010 region->userspace_addr, in vhost_set_memory()
|