xref: /linux/tools/testing/selftests/kvm/s390x/cmma_test.c (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Test for s390x CMMA migration
4  *
5  * Copyright IBM Corp. 2023
6  *
7  * Authors:
8  *  Nico Boehr <nrb@linux.ibm.com>
9  */
10 #include <fcntl.h>
11 #include <stdio.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <sys/ioctl.h>
15 
16 #include "test_util.h"
17 #include "kvm_util.h"
18 #include "kselftest.h"
19 #include "ucall_common.h"
20 #include "processor.h"
21 
22 #define MAIN_PAGE_COUNT 512
23 
24 #define TEST_DATA_PAGE_COUNT 512
25 #define TEST_DATA_MEMSLOT 1
26 #define TEST_DATA_START_GFN PAGE_SIZE
27 
28 #define TEST_DATA_TWO_PAGE_COUNT 256
29 #define TEST_DATA_TWO_MEMSLOT 2
30 #define TEST_DATA_TWO_START_GFN (2 * PAGE_SIZE)
31 
32 static char cmma_value_buf[MAIN_PAGE_COUNT + TEST_DATA_PAGE_COUNT];
33 
34 /**
35  * Dirty CMMA attributes of exactly one page in the TEST_DATA memslot,
36  * so use_cmma goes on and the CMMA related ioctls do something.
37  */
guest_do_one_essa(void)38 static void guest_do_one_essa(void)
39 {
40 	asm volatile(
41 		/* load TEST_DATA_START_GFN into r1 */
42 		"	llilf 1,%[start_gfn]\n"
43 		/* calculate the address from the gfn */
44 		"	sllg 1,1,12(0)\n"
45 		/* set the first page in TEST_DATA memslot to STABLE */
46 		"	.insn rrf,0xb9ab0000,2,1,1,0\n"
47 		/* hypercall */
48 		"	diag 0,0,0x501\n"
49 		"0:	j 0b"
50 		:
51 		: [start_gfn] "L"(TEST_DATA_START_GFN)
52 		: "r1", "r2", "memory", "cc"
53 	);
54 }
55 
56 /**
57  * Touch CMMA attributes of all pages in TEST_DATA memslot. Set them to stable
58  * state.
59  */
guest_dirty_test_data(void)60 static void guest_dirty_test_data(void)
61 {
62 	asm volatile(
63 		/* r1 = TEST_DATA_START_GFN */
64 		"	xgr 1,1\n"
65 		"	llilf 1,%[start_gfn]\n"
66 		/* r5 = TEST_DATA_PAGE_COUNT */
67 		"	lghi 5,%[page_count]\n"
68 		/* r5 += r1 */
69 		"2:	agfr 5,1\n"
70 		/* r2 = r1 << PAGE_SHIFT */
71 		"1:	sllg 2,1,12(0)\n"
72 		/* essa(r4, r2, SET_STABLE) */
73 		"	.insn rrf,0xb9ab0000,4,2,1,0\n"
74 		/* i++ */
75 		"	agfi 1,1\n"
76 		/* if r1 < r5 goto 1 */
77 		"	cgrjl 1,5,1b\n"
78 		/* hypercall */
79 		"	diag 0,0,0x501\n"
80 		"0:	j 0b"
81 		:
82 		: [start_gfn] "L"(TEST_DATA_START_GFN),
83 		  [page_count] "L"(TEST_DATA_PAGE_COUNT)
84 		:
85 			/* the counter in our loop over the pages */
86 			"r1",
87 			/* the calculated page physical address */
88 			"r2",
89 			/* ESSA output register */
90 			"r4",
91 			/* last page */
92 			"r5",
93 			"cc", "memory"
94 	);
95 }
96 
create_main_memslot(struct kvm_vm * vm)97 static void create_main_memslot(struct kvm_vm *vm)
98 {
99 	int i;
100 
101 	vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, MAIN_PAGE_COUNT, 0);
102 	/* set the array of memslots to zero like __vm_create does */
103 	for (i = 0; i < NR_MEM_REGIONS; i++)
104 		vm->memslots[i] = 0;
105 }
106 
create_test_memslot(struct kvm_vm * vm)107 static void create_test_memslot(struct kvm_vm *vm)
108 {
109 	vm_userspace_mem_region_add(vm,
110 				    VM_MEM_SRC_ANONYMOUS,
111 				    TEST_DATA_START_GFN << vm->page_shift,
112 				    TEST_DATA_MEMSLOT,
113 				    TEST_DATA_PAGE_COUNT,
114 				    0
115 				   );
116 	vm->memslots[MEM_REGION_TEST_DATA] = TEST_DATA_MEMSLOT;
117 }
118 
create_memslots(struct kvm_vm * vm)119 static void create_memslots(struct kvm_vm *vm)
120 {
121 	/*
122 	 * Our VM has the following memory layout:
123 	 * +------+---------------------------+
124 	 * | GFN  | Memslot                   |
125 	 * +------+---------------------------+
126 	 * | 0    |                           |
127 	 * | ...  | MAIN (Code, Stack, ...)   |
128 	 * | 511  |                           |
129 	 * +------+---------------------------+
130 	 * | 4096 |                           |
131 	 * | ...  | TEST_DATA                 |
132 	 * | 4607 |                           |
133 	 * +------+---------------------------+
134 	 */
135 	create_main_memslot(vm);
136 	create_test_memslot(vm);
137 }
138 
finish_vm_setup(struct kvm_vm * vm)139 static void finish_vm_setup(struct kvm_vm *vm)
140 {
141 	struct userspace_mem_region *slot0;
142 
143 	kvm_vm_elf_load(vm, program_invocation_name);
144 
145 	slot0 = memslot2region(vm, 0);
146 	ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size);
147 
148 	kvm_arch_vm_post_create(vm);
149 }
150 
create_vm_two_memslots(void)151 static struct kvm_vm *create_vm_two_memslots(void)
152 {
153 	struct kvm_vm *vm;
154 
155 	vm = vm_create_barebones();
156 
157 	create_memslots(vm);
158 
159 	finish_vm_setup(vm);
160 
161 	return vm;
162 }
163 
enable_cmma(struct kvm_vm * vm)164 static void enable_cmma(struct kvm_vm *vm)
165 {
166 	int r;
167 
168 	r = __kvm_device_attr_set(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA, NULL);
169 	TEST_ASSERT(!r, "enabling cmma failed r=%d errno=%d", r, errno);
170 }
171 
enable_dirty_tracking(struct kvm_vm * vm)172 static void enable_dirty_tracking(struct kvm_vm *vm)
173 {
174 	vm_mem_region_set_flags(vm, 0, KVM_MEM_LOG_DIRTY_PAGES);
175 	vm_mem_region_set_flags(vm, TEST_DATA_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
176 }
177 
__enable_migration_mode(struct kvm_vm * vm)178 static int __enable_migration_mode(struct kvm_vm *vm)
179 {
180 	return __kvm_device_attr_set(vm->fd,
181 				     KVM_S390_VM_MIGRATION,
182 				     KVM_S390_VM_MIGRATION_START,
183 				     NULL
184 				    );
185 }
186 
enable_migration_mode(struct kvm_vm * vm)187 static void enable_migration_mode(struct kvm_vm *vm)
188 {
189 	int r = __enable_migration_mode(vm);
190 
191 	TEST_ASSERT(!r, "enabling migration mode failed r=%d errno=%d", r, errno);
192 }
193 
is_migration_mode_on(struct kvm_vm * vm)194 static bool is_migration_mode_on(struct kvm_vm *vm)
195 {
196 	u64 out;
197 	int r;
198 
199 	r = __kvm_device_attr_get(vm->fd,
200 				  KVM_S390_VM_MIGRATION,
201 				  KVM_S390_VM_MIGRATION_STATUS,
202 				  &out
203 				 );
204 	TEST_ASSERT(!r, "getting migration mode status failed r=%d errno=%d", r, errno);
205 	return out;
206 }
207 
vm_get_cmma_bits(struct kvm_vm * vm,u64 flags,int * errno_out)208 static int vm_get_cmma_bits(struct kvm_vm *vm, u64 flags, int *errno_out)
209 {
210 	struct kvm_s390_cmma_log args;
211 	int rc;
212 
213 	errno = 0;
214 
215 	args = (struct kvm_s390_cmma_log){
216 		.start_gfn = 0,
217 		.count = sizeof(cmma_value_buf),
218 		.flags = flags,
219 		.values = (__u64)&cmma_value_buf[0]
220 	};
221 	rc = __vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
222 
223 	*errno_out = errno;
224 	return rc;
225 }
226 
test_get_cmma_basic(void)227 static void test_get_cmma_basic(void)
228 {
229 	struct kvm_vm *vm = create_vm_two_memslots();
230 	struct kvm_vcpu *vcpu;
231 	int rc, errno_out;
232 
233 	/* GET_CMMA_BITS without CMMA enabled should fail */
234 	rc = vm_get_cmma_bits(vm, 0, &errno_out);
235 	TEST_ASSERT_EQ(rc, -1);
236 	TEST_ASSERT_EQ(errno_out, ENXIO);
237 
238 	enable_cmma(vm);
239 	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
240 
241 	vcpu_run(vcpu);
242 
243 	/* GET_CMMA_BITS without migration mode and without peeking should fail */
244 	rc = vm_get_cmma_bits(vm, 0, &errno_out);
245 	TEST_ASSERT_EQ(rc, -1);
246 	TEST_ASSERT_EQ(errno_out, EINVAL);
247 
248 	/* GET_CMMA_BITS without migration mode and with peeking should work */
249 	rc = vm_get_cmma_bits(vm, KVM_S390_CMMA_PEEK, &errno_out);
250 	TEST_ASSERT_EQ(rc, 0);
251 	TEST_ASSERT_EQ(errno_out, 0);
252 
253 	enable_dirty_tracking(vm);
254 	enable_migration_mode(vm);
255 
256 	/* GET_CMMA_BITS with invalid flags */
257 	rc = vm_get_cmma_bits(vm, 0xfeedc0fe, &errno_out);
258 	TEST_ASSERT_EQ(rc, -1);
259 	TEST_ASSERT_EQ(errno_out, EINVAL);
260 
261 	kvm_vm_free(vm);
262 }
263 
assert_exit_was_hypercall(struct kvm_vcpu * vcpu)264 static void assert_exit_was_hypercall(struct kvm_vcpu *vcpu)
265 {
266 	TEST_ASSERT_EQ(vcpu->run->exit_reason, 13);
267 	TEST_ASSERT_EQ(vcpu->run->s390_sieic.icptcode, 4);
268 	TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipa, 0x8300);
269 	TEST_ASSERT_EQ(vcpu->run->s390_sieic.ipb, 0x5010000);
270 }
271 
test_migration_mode(void)272 static void test_migration_mode(void)
273 {
274 	struct kvm_vm *vm = vm_create_barebones();
275 	struct kvm_vcpu *vcpu;
276 	u64 orig_psw;
277 	int rc;
278 
279 	/* enabling migration mode on a VM without memory should fail */
280 	rc = __enable_migration_mode(vm);
281 	TEST_ASSERT_EQ(rc, -1);
282 	TEST_ASSERT_EQ(errno, EINVAL);
283 	TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
284 	errno = 0;
285 
286 	create_memslots(vm);
287 	finish_vm_setup(vm);
288 
289 	enable_cmma(vm);
290 	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
291 	orig_psw = vcpu->run->psw_addr;
292 
293 	/*
294 	 * Execute one essa instruction in the guest. Otherwise the guest will
295 	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
296 	 */
297 	vcpu_run(vcpu);
298 	assert_exit_was_hypercall(vcpu);
299 
300 	/* migration mode when memslots have dirty tracking off should fail */
301 	rc = __enable_migration_mode(vm);
302 	TEST_ASSERT_EQ(rc, -1);
303 	TEST_ASSERT_EQ(errno, EINVAL);
304 	TEST_ASSERT(!is_migration_mode_on(vm), "migration mode should still be off");
305 	errno = 0;
306 
307 	/* enable dirty tracking */
308 	enable_dirty_tracking(vm);
309 
310 	/* enabling migration mode should work now */
311 	rc = __enable_migration_mode(vm);
312 	TEST_ASSERT_EQ(rc, 0);
313 	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
314 	errno = 0;
315 
316 	/* execute another ESSA instruction to see this goes fine */
317 	vcpu->run->psw_addr = orig_psw;
318 	vcpu_run(vcpu);
319 	assert_exit_was_hypercall(vcpu);
320 
321 	/*
322 	 * With migration mode on, create a new memslot with dirty tracking off.
323 	 * This should turn off migration mode.
324 	 */
325 	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
326 	vm_userspace_mem_region_add(vm,
327 				    VM_MEM_SRC_ANONYMOUS,
328 				    TEST_DATA_TWO_START_GFN << vm->page_shift,
329 				    TEST_DATA_TWO_MEMSLOT,
330 				    TEST_DATA_TWO_PAGE_COUNT,
331 				    0
332 				   );
333 	TEST_ASSERT(!is_migration_mode_on(vm),
334 		    "creating memslot without dirty tracking turns off migration mode"
335 		   );
336 
337 	/* ESSA instructions should still execute fine */
338 	vcpu->run->psw_addr = orig_psw;
339 	vcpu_run(vcpu);
340 	assert_exit_was_hypercall(vcpu);
341 
342 	/*
343 	 * Turn on dirty tracking on the new memslot.
344 	 * It should be possible to turn migration mode back on again.
345 	 */
346 	vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, KVM_MEM_LOG_DIRTY_PAGES);
347 	rc = __enable_migration_mode(vm);
348 	TEST_ASSERT_EQ(rc, 0);
349 	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
350 	errno = 0;
351 
352 	/*
353 	 * Turn off dirty tracking again, this time with just a flag change.
354 	 * Again, migration mode should turn off.
355 	 */
356 	TEST_ASSERT(is_migration_mode_on(vm), "migration mode should be on");
357 	vm_mem_region_set_flags(vm, TEST_DATA_TWO_MEMSLOT, 0);
358 	TEST_ASSERT(!is_migration_mode_on(vm),
359 		    "disabling dirty tracking should turn off migration mode"
360 		   );
361 
362 	/* ESSA instructions should still execute fine */
363 	vcpu->run->psw_addr = orig_psw;
364 	vcpu_run(vcpu);
365 	assert_exit_was_hypercall(vcpu);
366 
367 	kvm_vm_free(vm);
368 }
369 
370 /**
371  * Given a VM with the MAIN and TEST_DATA memslot, assert that both slots have
372  * CMMA attributes of all pages in both memslots and nothing more dirty.
373  * This has the useful side effect of ensuring nothing is CMMA dirty after this
374  * function.
375  */
assert_all_slots_cmma_dirty(struct kvm_vm * vm)376 static void assert_all_slots_cmma_dirty(struct kvm_vm *vm)
377 {
378 	struct kvm_s390_cmma_log args;
379 
380 	/*
381 	 * First iteration - everything should be dirty.
382 	 * Start at the main memslot...
383 	 */
384 	args = (struct kvm_s390_cmma_log){
385 		.start_gfn = 0,
386 		.count = sizeof(cmma_value_buf),
387 		.flags = 0,
388 		.values = (__u64)&cmma_value_buf[0]
389 	};
390 	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
391 	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
392 	TEST_ASSERT_EQ(args.count, MAIN_PAGE_COUNT);
393 	TEST_ASSERT_EQ(args.remaining, TEST_DATA_PAGE_COUNT);
394 	TEST_ASSERT_EQ(args.start_gfn, 0);
395 
396 	/* ...and then - after a hole - the TEST_DATA memslot should follow */
397 	args = (struct kvm_s390_cmma_log){
398 		.start_gfn = MAIN_PAGE_COUNT,
399 		.count = sizeof(cmma_value_buf),
400 		.flags = 0,
401 		.values = (__u64)&cmma_value_buf[0]
402 	};
403 	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
404 	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
405 	TEST_ASSERT_EQ(args.count, TEST_DATA_PAGE_COUNT);
406 	TEST_ASSERT_EQ(args.start_gfn, TEST_DATA_START_GFN);
407 	TEST_ASSERT_EQ(args.remaining, 0);
408 
409 	/* ...and nothing else should be there */
410 	args = (struct kvm_s390_cmma_log){
411 		.start_gfn = TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT,
412 		.count = sizeof(cmma_value_buf),
413 		.flags = 0,
414 		.values = (__u64)&cmma_value_buf[0]
415 	};
416 	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
417 	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
418 	TEST_ASSERT_EQ(args.count, 0);
419 	TEST_ASSERT_EQ(args.start_gfn, 0);
420 	TEST_ASSERT_EQ(args.remaining, 0);
421 }
422 
423 /**
424  * Given a VM, assert no pages are CMMA dirty.
425  */
assert_no_pages_cmma_dirty(struct kvm_vm * vm)426 static void assert_no_pages_cmma_dirty(struct kvm_vm *vm)
427 {
428 	struct kvm_s390_cmma_log args;
429 
430 	/* If we start from GFN 0 again, nothing should be dirty. */
431 	args = (struct kvm_s390_cmma_log){
432 		.start_gfn = 0,
433 		.count = sizeof(cmma_value_buf),
434 		.flags = 0,
435 		.values = (__u64)&cmma_value_buf[0]
436 	};
437 	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
438 	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, &args);
439 	if (args.count || args.remaining || args.start_gfn)
440 		TEST_FAIL("pages are still dirty start_gfn=0x%llx count=%u remaining=%llu",
441 			  args.start_gfn,
442 			  args.count,
443 			  args.remaining
444 			 );
445 }
446 
test_get_inital_dirty(void)447 static void test_get_inital_dirty(void)
448 {
449 	struct kvm_vm *vm = create_vm_two_memslots();
450 	struct kvm_vcpu *vcpu;
451 
452 	enable_cmma(vm);
453 	vcpu = vm_vcpu_add(vm, 1, guest_do_one_essa);
454 
455 	/*
456 	 * Execute one essa instruction in the guest. Otherwise the guest will
457 	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
458 	 */
459 	vcpu_run(vcpu);
460 	assert_exit_was_hypercall(vcpu);
461 
462 	enable_dirty_tracking(vm);
463 	enable_migration_mode(vm);
464 
465 	assert_all_slots_cmma_dirty(vm);
466 
467 	/* Start from the beginning again and make sure nothing else is dirty */
468 	assert_no_pages_cmma_dirty(vm);
469 
470 	kvm_vm_free(vm);
471 }
472 
query_cmma_range(struct kvm_vm * vm,u64 start_gfn,u64 gfn_count,struct kvm_s390_cmma_log * res_out)473 static void query_cmma_range(struct kvm_vm *vm,
474 			     u64 start_gfn, u64 gfn_count,
475 			     struct kvm_s390_cmma_log *res_out)
476 {
477 	*res_out = (struct kvm_s390_cmma_log){
478 		.start_gfn = start_gfn,
479 		.count = gfn_count,
480 		.flags = 0,
481 		.values = (__u64)&cmma_value_buf[0]
482 	};
483 	memset(cmma_value_buf, 0xff, sizeof(cmma_value_buf));
484 	vm_ioctl(vm, KVM_S390_GET_CMMA_BITS, res_out);
485 }
486 
487 /**
488  * Assert the given cmma_log struct that was executed by query_cmma_range()
489  * indicates the first dirty gfn is at first_dirty_gfn and contains exactly
490  * dirty_gfn_count CMMA values.
491  */
assert_cmma_dirty(u64 first_dirty_gfn,u64 dirty_gfn_count,const struct kvm_s390_cmma_log * res)492 static void assert_cmma_dirty(u64 first_dirty_gfn,
493 			      u64 dirty_gfn_count,
494 			      const struct kvm_s390_cmma_log *res)
495 {
496 	TEST_ASSERT_EQ(res->start_gfn, first_dirty_gfn);
497 	TEST_ASSERT_EQ(res->count, dirty_gfn_count);
498 	for (size_t i = 0; i < dirty_gfn_count; i++)
499 		TEST_ASSERT_EQ(cmma_value_buf[0], 0x0); /* stable state */
500 	TEST_ASSERT_EQ(cmma_value_buf[dirty_gfn_count], 0xff); /* not touched */
501 }
502 
test_get_skip_holes(void)503 static void test_get_skip_holes(void)
504 {
505 	size_t gfn_offset;
506 	struct kvm_vm *vm = create_vm_two_memslots();
507 	struct kvm_s390_cmma_log log;
508 	struct kvm_vcpu *vcpu;
509 	u64 orig_psw;
510 
511 	enable_cmma(vm);
512 	vcpu = vm_vcpu_add(vm, 1, guest_dirty_test_data);
513 
514 	orig_psw = vcpu->run->psw_addr;
515 
516 	/*
517 	 * Execute some essa instructions in the guest. Otherwise the guest will
518 	 * not have use_cmm enabled and GET_CMMA_BITS will return no pages.
519 	 */
520 	vcpu_run(vcpu);
521 	assert_exit_was_hypercall(vcpu);
522 
523 	enable_dirty_tracking(vm);
524 	enable_migration_mode(vm);
525 
526 	/* un-dirty all pages */
527 	assert_all_slots_cmma_dirty(vm);
528 
529 	/* Then, dirty just the TEST_DATA memslot */
530 	vcpu->run->psw_addr = orig_psw;
531 	vcpu_run(vcpu);
532 
533 	gfn_offset = TEST_DATA_START_GFN;
534 	/**
535 	 * Query CMMA attributes of one page, starting at page 0. Since the
536 	 * main memslot was not touched by the VM, this should yield the first
537 	 * page of the TEST_DATA memslot.
538 	 * The dirty bitmap should now look like this:
539 	 * 0: not dirty
540 	 * [0x1, 0x200): dirty
541 	 */
542 	query_cmma_range(vm, 0, 1, &log);
543 	assert_cmma_dirty(gfn_offset, 1, &log);
544 	gfn_offset++;
545 
546 	/**
547 	 * Query CMMA attributes of 32 (0x20) pages past the end of the TEST_DATA
548 	 * memslot. This should wrap back to the beginning of the TEST_DATA
549 	 * memslot, page 1.
550 	 * The dirty bitmap should now look like this:
551 	 * [0, 0x21): not dirty
552 	 * [0x21, 0x200): dirty
553 	 */
554 	query_cmma_range(vm, TEST_DATA_START_GFN + TEST_DATA_PAGE_COUNT, 0x20, &log);
555 	assert_cmma_dirty(gfn_offset, 0x20, &log);
556 	gfn_offset += 0x20;
557 
558 	/* Skip 32 pages */
559 	gfn_offset += 0x20;
560 
561 	/**
562 	 * After skipping 32 pages, query the next 32 (0x20) pages.
563 	 * The dirty bitmap should now look like this:
564 	 * [0, 0x21): not dirty
565 	 * [0x21, 0x41): dirty
566 	 * [0x41, 0x61): not dirty
567 	 * [0x61, 0x200): dirty
568 	 */
569 	query_cmma_range(vm, gfn_offset, 0x20, &log);
570 	assert_cmma_dirty(gfn_offset, 0x20, &log);
571 	gfn_offset += 0x20;
572 
573 	/**
574 	 * Query 1 page from the beginning of the TEST_DATA memslot. This should
575 	 * yield page 0x21.
576 	 * The dirty bitmap should now look like this:
577 	 * [0, 0x22): not dirty
578 	 * [0x22, 0x41): dirty
579 	 * [0x41, 0x61): not dirty
580 	 * [0x61, 0x200): dirty
581 	 */
582 	query_cmma_range(vm, TEST_DATA_START_GFN, 1, &log);
583 	assert_cmma_dirty(TEST_DATA_START_GFN + 0x21, 1, &log);
584 	gfn_offset++;
585 
586 	/**
587 	 * Query 15 (0xF) pages from page 0x23 in TEST_DATA memslot.
588 	 * This should yield pages [0x23, 0x33).
589 	 * The dirty bitmap should now look like this:
590 	 * [0, 0x22): not dirty
591 	 * 0x22: dirty
592 	 * [0x23, 0x33): not dirty
593 	 * [0x33, 0x41): dirty
594 	 * [0x41, 0x61): not dirty
595 	 * [0x61, 0x200): dirty
596 	 */
597 	gfn_offset = TEST_DATA_START_GFN + 0x23;
598 	query_cmma_range(vm, gfn_offset, 15, &log);
599 	assert_cmma_dirty(gfn_offset, 15, &log);
600 
601 	/**
602 	 * Query 17 (0x11) pages from page 0x22 in TEST_DATA memslot.
603 	 * This should yield page [0x22, 0x33)
604 	 * The dirty bitmap should now look like this:
605 	 * [0, 0x33): not dirty
606 	 * [0x33, 0x41): dirty
607 	 * [0x41, 0x61): not dirty
608 	 * [0x61, 0x200): dirty
609 	 */
610 	gfn_offset = TEST_DATA_START_GFN + 0x22;
611 	query_cmma_range(vm, gfn_offset, 17, &log);
612 	assert_cmma_dirty(gfn_offset, 17, &log);
613 
614 	/**
615 	 * Query 25 (0x19) pages from page 0x40 in TEST_DATA memslot.
616 	 * This should yield page 0x40 and nothing more, since there are more
617 	 * than 16 non-dirty pages after page 0x40.
618 	 * The dirty bitmap should now look like this:
619 	 * [0, 0x33): not dirty
620 	 * [0x33, 0x40): dirty
621 	 * [0x40, 0x61): not dirty
622 	 * [0x61, 0x200): dirty
623 	 */
624 	gfn_offset = TEST_DATA_START_GFN + 0x40;
625 	query_cmma_range(vm, gfn_offset, 25, &log);
626 	assert_cmma_dirty(gfn_offset, 1, &log);
627 
628 	/**
629 	 * Query pages [0x33, 0x40).
630 	 * The dirty bitmap should now look like this:
631 	 * [0, 0x61): not dirty
632 	 * [0x61, 0x200): dirty
633 	 */
634 	gfn_offset = TEST_DATA_START_GFN + 0x33;
635 	query_cmma_range(vm, gfn_offset, 0x40 - 0x33, &log);
636 	assert_cmma_dirty(gfn_offset, 0x40 - 0x33, &log);
637 
638 	/**
639 	 * Query the remaining pages [0x61, 0x200).
640 	 */
641 	gfn_offset = TEST_DATA_START_GFN;
642 	query_cmma_range(vm, gfn_offset, TEST_DATA_PAGE_COUNT - 0x61, &log);
643 	assert_cmma_dirty(TEST_DATA_START_GFN + 0x61, TEST_DATA_PAGE_COUNT - 0x61, &log);
644 
645 	assert_no_pages_cmma_dirty(vm);
646 }
647 
648 struct testdef {
649 	const char *name;
650 	void (*test)(void);
651 } testlist[] = {
652 	{ "migration mode and dirty tracking", test_migration_mode },
653 	{ "GET_CMMA_BITS: basic calls", test_get_cmma_basic },
654 	{ "GET_CMMA_BITS: all pages are dirty initally", test_get_inital_dirty },
655 	{ "GET_CMMA_BITS: holes are skipped", test_get_skip_holes },
656 };
657 
658 /**
659  * The kernel may support CMMA, but the machine may not (i.e. if running as
660  * guest-3).
661  *
662  * In this case, the CMMA capabilities are all there, but the CMMA-related
663  * ioctls fail. To find out whether the machine supports CMMA, create a
664  * temporary VM and then query the CMMA feature of the VM.
665  */
machine_has_cmma(void)666 static int machine_has_cmma(void)
667 {
668 	struct kvm_vm *vm = vm_create_barebones();
669 	int r;
670 
671 	r = !__kvm_has_device_attr(vm->fd, KVM_S390_VM_MEM_CTRL, KVM_S390_VM_MEM_ENABLE_CMMA);
672 	kvm_vm_free(vm);
673 
674 	return r;
675 }
676 
main(int argc,char * argv[])677 int main(int argc, char *argv[])
678 {
679 	int idx;
680 
681 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_SYNC_REGS));
682 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_CMMA_MIGRATION));
683 	TEST_REQUIRE(machine_has_cmma());
684 
685 	ksft_print_header();
686 
687 	ksft_set_plan(ARRAY_SIZE(testlist));
688 
689 	for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
690 		testlist[idx].test();
691 		ksft_test_result_pass("%s\n", testlist[idx].name);
692 	}
693 
694 	ksft_finished();	/* Print results and exit() accordingly */
695 }
696