Lines Matching defs:data

128 static void check_mmio_access(struct vm_data *data, struct kvm_run *run)
130 TEST_ASSERT(data->mmio_ok, "Unexpected mmio exit");
134 TEST_ASSERT(run->mmio.phys_addr >= data->mmio_gpa_min &&
135 run->mmio.phys_addr <= data->mmio_gpa_max,
142 struct vm_data *data = __data;
143 struct kvm_vcpu *vcpu = data->vcpu;
159 check_mmio_access(data, run);
189 static void *vm_gpa2hva(struct vm_data *data, uint64_t gpa, uint64_t *rempages)
194 uint32_t guest_page_size = data->vm->page_size;
197 TEST_ASSERT(gpa < MEM_GPA + data->npages * guest_page_size,
203 slot = min(gpage / data->pages_per_slot, (uint64_t)data->nslots - 1);
204 slotoffs = gpage - (slot * data->pages_per_slot);
209 if (slot == data->nslots - 1)
210 slotpages = data->npages - slot * data->pages_per_slot;
212 slotpages = data->pages_per_slot;
219 base = data->hva_slots[slot];
223 static uint64_t vm_slot2gpa(struct vm_data *data, uint32_t slot)
225 uint32_t guest_page_size = data->vm->page_size;
227 TEST_ASSERT(slot < data->nslots, "Too high slot number");
229 return MEM_GPA + slot * data->pages_per_slot * guest_page_size;
234 struct vm_data *data;
236 data = malloc(sizeof(*data));
237 TEST_ASSERT(data, "malloc(vmdata) failed");
239 data->vm = NULL;
240 data->vcpu = NULL;
241 data->hva_slots = NULL;
243 return data;
262 static uint64_t get_max_slots(struct vm_data *data, uint32_t host_page_size)
264 uint32_t guest_page_size = data->vm->page_size;
268 mempages = data->npages;
269 slots = data->nslots;
284 static bool prepare_vm(struct vm_data *data, int nslots, uint64_t *maxslots,
298 data->vm = __vm_create_with_one_vcpu(&data->vcpu, mempages, guest_code);
299 TEST_ASSERT(data->vm->page_size == guest_page_size, "Invalid VM page size");
301 data->npages = mempages;
302 TEST_ASSERT(data->npages > 1, "Can't test without any memory");
303 data->nslots = nslots;
304 data->pages_per_slot = data->npages / data->nslots;
305 rempages = data->npages % data->nslots;
307 data->pages_per_slot, rempages)) {
308 *maxslots = get_max_slots(data, host_page_size);
312 data->hva_slots = malloc(sizeof(*data->hva_slots) * data->nslots);
313 TEST_ASSERT(data->hva_slots, "malloc() fail");
316 data->nslots, data->pages_per_slot, rempages);
319 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
322 npages = data->pages_per_slot;
323 if (slot == data->nslots)
326 vm_userspace_mem_region_add(data->vm, VM_MEM_SRC_ANONYMOUS,
333 for (slot = 1, guest_addr = MEM_GPA; slot <= data->nslots; slot++) {
337 npages = data->pages_per_slot;
338 if (slot == data->nslots)
341 gpa = vm_phy_pages_alloc(data->vm, npages, guest_addr, slot);
345 data->hva_slots[slot - 1] = addr_gpa2hva(data->vm, guest_addr);
346 memset(data->hva_slots[slot - 1], 0, npages * guest_page_size);
351 virt_map(data->vm, MEM_GPA, MEM_GPA, data->npages);
353 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
354 sync->guest_page_size = data->vm->page_size;
359 data->mmio_ok = false;
364 static void launch_vm(struct vm_data *data)
368 pthread_create(&data->vcpu_thread, NULL, vcpu_worker, data);
374 static void free_vm(struct vm_data *data)
376 kvm_vm_free(data->vm);
377 free(data->hva_slots);
378 free(data);
381 static void wait_guest_exit(struct vm_data *data)
383 pthread_join(data->vcpu_thread, NULL);
578 static bool test_memslot_move_prepare(struct vm_data *data,
582 uint32_t guest_page_size = data->vm->page_size;
587 vm_enable_cap(data->vm, KVM_CAP_DISABLE_QUIRKS2, KVM_X86_QUIRK_SLOT_ZAP_ALL);
590 movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
595 vm_gpa2hva(data, movesrcgpa, &lastpages);
606 data->mmio_ok = true;
607 data->mmio_gpa_min = movesrcgpa;
608 data->mmio_gpa_max = movesrcgpa + MEM_TEST_MOVE_SIZE / 2 - 1;
614 static bool test_memslot_move_prepare_active(struct vm_data *data,
618 return test_memslot_move_prepare(data, sync, maxslots, true);
621 static bool test_memslot_move_prepare_inactive(struct vm_data *data,
625 return test_memslot_move_prepare(data, sync, maxslots, false);
628 static void test_memslot_move_loop(struct vm_data *data, struct sync_area *sync)
632 movesrcgpa = vm_slot2gpa(data, data->nslots - 1);
633 vm_mem_region_move(data->vm, data->nslots - 1 + 1,
635 vm_mem_region_move(data->vm, data->nslots - 1 + 1, movesrcgpa);
638 static void test_memslot_do_unmap(struct vm_data *data,
642 uint32_t guest_page_size = data->vm->page_size;
649 hva = vm_gpa2hva(data, gpa, &npages);
663 static void test_memslot_map_unmap_check(struct vm_data *data,
668 uint32_t guest_page_size = data->vm->page_size;
674 val = (typeof(val))vm_gpa2hva(data, gpa, NULL);
681 static void test_memslot_map_loop(struct vm_data *data, struct sync_area *sync)
683 uint32_t guest_page_size = data->vm->page_size;
690 test_memslot_do_unmap(data, guest_pages / 2, guest_pages / 2);
700 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
701 test_memslot_map_unmap_check(data, guest_pages / 2 - 1, MEM_TEST_VAL_1);
702 test_memslot_do_unmap(data, 0, guest_pages / 2);
715 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
716 test_memslot_map_unmap_check(data, guest_pages - 1, MEM_TEST_VAL_2);
719 static void test_memslot_unmap_loop_common(struct vm_data *data,
723 uint32_t guest_page_size = data->vm->page_size;
735 test_memslot_map_unmap_check(data, 0, MEM_TEST_VAL_1);
737 test_memslot_do_unmap(data, ctr, chunk);
741 test_memslot_map_unmap_check(data, guest_pages / 2, MEM_TEST_VAL_2);
743 test_memslot_do_unmap(data, ctr, chunk);
746 static void test_memslot_unmap_loop(struct vm_data *data,
750 uint32_t guest_page_size = data->vm->page_size;
754 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
757 static void test_memslot_unmap_loop_chunked(struct vm_data *data,
760 uint32_t guest_page_size = data->vm->page_size;
763 test_memslot_unmap_loop_common(data, sync, guest_chunk_pages);
766 static void test_memslot_rw_loop(struct vm_data *data, struct sync_area *sync)
769 uint32_t guest_page_size = data->vm->page_size;
773 *(uint64_t *)vm_gpa2hva(data, gptr, NULL) = MEM_TEST_VAL_2;
779 uint64_t *vptr = (typeof(vptr))vm_gpa2hva(data, gptr, NULL);
795 bool (*prepare)(struct vm_data *data, struct sync_area *sync,
797 void (*loop)(struct vm_data *data, struct sync_area *sync);
808 struct vm_data *data;
813 data = alloc_vm();
814 if (!prepare_vm(data, nslots, maxslots, tdata->guest_code,
820 sync = (typeof(sync))vm_gpa2hva(data, MEM_SYNC_GPA, NULL);
822 !tdata->prepare(data, sync, maxslots)) {
827 launch_vm(data);
837 tdata->loop(data, sync);
843 wait_guest_exit(data);
846 free_vm(data);
1047 static bool test_loop(const struct test_data *data,
1055 if (!test_execute(targs->nslots, &maxslots, targs->seconds, data,
1088 if (!data->mem_size &&
1118 const struct test_data *data = &tests[tctr];
1126 data->name, targs.runs, targs.seconds);
1129 if (!test_loop(data, &targs,