xref: /linux/tools/testing/selftests/kvm/s390x/memop.c (revision 3efc57369a0ce8f76bf0804f7e673982384e4ac9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Test for s390x KVM_S390_MEM_OP
4  *
5  * Copyright (C) 2019, Red Hat, Inc.
6  */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
10 #include <sys/ioctl.h>
11 #include <pthread.h>
12 
13 #include <linux/bits.h>
14 
15 #include "test_util.h"
16 #include "kvm_util.h"
17 #include "kselftest.h"
18 #include "ucall_common.h"
19 #include "processor.h"
20 
21 enum mop_target {
22 	LOGICAL,
23 	SIDA,
24 	ABSOLUTE,
25 	INVALID,
26 };
27 
28 enum mop_access_mode {
29 	READ,
30 	WRITE,
31 	CMPXCHG,
32 };
33 
34 struct mop_desc {
35 	uintptr_t gaddr;
36 	uintptr_t gaddr_v;
37 	uint64_t set_flags;
38 	unsigned int f_check : 1;
39 	unsigned int f_inject : 1;
40 	unsigned int f_key : 1;
41 	unsigned int _gaddr_v : 1;
42 	unsigned int _set_flags : 1;
43 	unsigned int _sida_offset : 1;
44 	unsigned int _ar : 1;
45 	uint32_t size;
46 	enum mop_target target;
47 	enum mop_access_mode mode;
48 	void *buf;
49 	uint32_t sida_offset;
50 	void *old;
51 	uint8_t old_value[16];
52 	bool *cmpxchg_success;
53 	uint8_t ar;
54 	uint8_t key;
55 };
56 
57 const uint8_t NO_KEY = 0xff;
58 
ksmo_from_desc(struct mop_desc * desc)59 static struct kvm_s390_mem_op ksmo_from_desc(struct mop_desc *desc)
60 {
61 	struct kvm_s390_mem_op ksmo = {
62 		.gaddr = (uintptr_t)desc->gaddr,
63 		.size = desc->size,
64 		.buf = ((uintptr_t)desc->buf),
65 		.reserved = "ignored_ignored_ignored_ignored"
66 	};
67 
68 	switch (desc->target) {
69 	case LOGICAL:
70 		if (desc->mode == READ)
71 			ksmo.op = KVM_S390_MEMOP_LOGICAL_READ;
72 		if (desc->mode == WRITE)
73 			ksmo.op = KVM_S390_MEMOP_LOGICAL_WRITE;
74 		break;
75 	case SIDA:
76 		if (desc->mode == READ)
77 			ksmo.op = KVM_S390_MEMOP_SIDA_READ;
78 		if (desc->mode == WRITE)
79 			ksmo.op = KVM_S390_MEMOP_SIDA_WRITE;
80 		break;
81 	case ABSOLUTE:
82 		if (desc->mode == READ)
83 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_READ;
84 		if (desc->mode == WRITE)
85 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_WRITE;
86 		if (desc->mode == CMPXCHG) {
87 			ksmo.op = KVM_S390_MEMOP_ABSOLUTE_CMPXCHG;
88 			ksmo.old_addr = (uint64_t)desc->old;
89 			memcpy(desc->old_value, desc->old, desc->size);
90 		}
91 		break;
92 	case INVALID:
93 		ksmo.op = -1;
94 	}
95 	if (desc->f_check)
96 		ksmo.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
97 	if (desc->f_inject)
98 		ksmo.flags |= KVM_S390_MEMOP_F_INJECT_EXCEPTION;
99 	if (desc->_set_flags)
100 		ksmo.flags = desc->set_flags;
101 	if (desc->f_key && desc->key != NO_KEY) {
102 		ksmo.flags |= KVM_S390_MEMOP_F_SKEY_PROTECTION;
103 		ksmo.key = desc->key;
104 	}
105 	if (desc->_ar)
106 		ksmo.ar = desc->ar;
107 	else
108 		ksmo.ar = 0;
109 	if (desc->_sida_offset)
110 		ksmo.sida_offset = desc->sida_offset;
111 
112 	return ksmo;
113 }
114 
115 struct test_info {
116 	struct kvm_vm *vm;
117 	struct kvm_vcpu *vcpu;
118 };
119 
120 #define PRINT_MEMOP false
print_memop(struct kvm_vcpu * vcpu,const struct kvm_s390_mem_op * ksmo)121 static void print_memop(struct kvm_vcpu *vcpu, const struct kvm_s390_mem_op *ksmo)
122 {
123 	if (!PRINT_MEMOP)
124 		return;
125 
126 	if (!vcpu)
127 		printf("vm memop(");
128 	else
129 		printf("vcpu memop(");
130 	switch (ksmo->op) {
131 	case KVM_S390_MEMOP_LOGICAL_READ:
132 		printf("LOGICAL, READ, ");
133 		break;
134 	case KVM_S390_MEMOP_LOGICAL_WRITE:
135 		printf("LOGICAL, WRITE, ");
136 		break;
137 	case KVM_S390_MEMOP_SIDA_READ:
138 		printf("SIDA, READ, ");
139 		break;
140 	case KVM_S390_MEMOP_SIDA_WRITE:
141 		printf("SIDA, WRITE, ");
142 		break;
143 	case KVM_S390_MEMOP_ABSOLUTE_READ:
144 		printf("ABSOLUTE, READ, ");
145 		break;
146 	case KVM_S390_MEMOP_ABSOLUTE_WRITE:
147 		printf("ABSOLUTE, WRITE, ");
148 		break;
149 	case KVM_S390_MEMOP_ABSOLUTE_CMPXCHG:
150 		printf("ABSOLUTE, CMPXCHG, ");
151 		break;
152 	}
153 	printf("gaddr=%llu, size=%u, buf=%llu, ar=%u, key=%u, old_addr=%llx",
154 	       ksmo->gaddr, ksmo->size, ksmo->buf, ksmo->ar, ksmo->key,
155 	       ksmo->old_addr);
156 	if (ksmo->flags & KVM_S390_MEMOP_F_CHECK_ONLY)
157 		printf(", CHECK_ONLY");
158 	if (ksmo->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION)
159 		printf(", INJECT_EXCEPTION");
160 	if (ksmo->flags & KVM_S390_MEMOP_F_SKEY_PROTECTION)
161 		printf(", SKEY_PROTECTION");
162 	puts(")");
163 }
164 
err_memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)165 static int err_memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
166 			   struct mop_desc *desc)
167 {
168 	struct kvm_vcpu *vcpu = info.vcpu;
169 
170 	if (!vcpu)
171 		return __vm_ioctl(info.vm, KVM_S390_MEM_OP, ksmo);
172 	else
173 		return __vcpu_ioctl(vcpu, KVM_S390_MEM_OP, ksmo);
174 }
175 
memop_ioctl(struct test_info info,struct kvm_s390_mem_op * ksmo,struct mop_desc * desc)176 static void memop_ioctl(struct test_info info, struct kvm_s390_mem_op *ksmo,
177 			struct mop_desc *desc)
178 {
179 	int r;
180 
181 	r = err_memop_ioctl(info, ksmo, desc);
182 	if (ksmo->op == KVM_S390_MEMOP_ABSOLUTE_CMPXCHG) {
183 		if (desc->cmpxchg_success) {
184 			int diff = memcmp(desc->old_value, desc->old, desc->size);
185 			*desc->cmpxchg_success = !diff;
186 		}
187 	}
188 	TEST_ASSERT(!r, __KVM_IOCTL_ERROR("KVM_S390_MEM_OP", r));
189 }
190 
191 #define MEMOP(err, info_p, mop_target_p, access_mode_p, buf_p, size_p, ...)	\
192 ({										\
193 	struct test_info __info = (info_p);					\
194 	struct mop_desc __desc = {						\
195 		.target = (mop_target_p),					\
196 		.mode = (access_mode_p),					\
197 		.buf = (buf_p),							\
198 		.size = (size_p),						\
199 		__VA_ARGS__							\
200 	};									\
201 	struct kvm_s390_mem_op __ksmo;						\
202 										\
203 	if (__desc._gaddr_v) {							\
204 		if (__desc.target == ABSOLUTE)					\
205 			__desc.gaddr = addr_gva2gpa(__info.vm, __desc.gaddr_v);	\
206 		else								\
207 			__desc.gaddr = __desc.gaddr_v;				\
208 	}									\
209 	__ksmo = ksmo_from_desc(&__desc);					\
210 	print_memop(__info.vcpu, &__ksmo);					\
211 	err##memop_ioctl(__info, &__ksmo, &__desc);				\
212 })
213 
214 #define MOP(...) MEMOP(, __VA_ARGS__)
215 #define ERR_MOP(...) MEMOP(err_, __VA_ARGS__)
216 
217 #define GADDR(a) .gaddr = ((uintptr_t)a)
218 #define GADDR_V(v) ._gaddr_v = 1, .gaddr_v = ((uintptr_t)v)
219 #define CHECK_ONLY .f_check = 1
220 #define SET_FLAGS(f) ._set_flags = 1, .set_flags = (f)
221 #define SIDA_OFFSET(o) ._sida_offset = 1, .sida_offset = (o)
222 #define AR(a) ._ar = 1, .ar = (a)
223 #define KEY(a) .f_key = 1, .key = (a)
224 #define INJECT .f_inject = 1
225 #define CMPXCHG_OLD(o) .old = (o)
226 #define CMPXCHG_SUCCESS(s) .cmpxchg_success = (s)
227 
228 #define CHECK_N_DO(f, ...) ({ f(__VA_ARGS__, CHECK_ONLY); f(__VA_ARGS__); })
229 
230 #define CR0_FETCH_PROTECTION_OVERRIDE	(1UL << (63 - 38))
231 #define CR0_STORAGE_PROTECTION_OVERRIDE	(1UL << (63 - 39))
232 
233 static uint8_t __aligned(PAGE_SIZE) mem1[65536];
234 static uint8_t __aligned(PAGE_SIZE) mem2[65536];
235 
236 struct test_default {
237 	struct kvm_vm *kvm_vm;
238 	struct test_info vm;
239 	struct test_info vcpu;
240 	struct kvm_run *run;
241 	int size;
242 };
243 
test_default_init(void * guest_code)244 static struct test_default test_default_init(void *guest_code)
245 {
246 	struct kvm_vcpu *vcpu;
247 	struct test_default t;
248 
249 	t.size = min((size_t)kvm_check_cap(KVM_CAP_S390_MEM_OP), sizeof(mem1));
250 	t.kvm_vm = vm_create_with_one_vcpu(&vcpu, guest_code);
251 	t.vm = (struct test_info) { t.kvm_vm, NULL };
252 	t.vcpu = (struct test_info) { t.kvm_vm, vcpu };
253 	t.run = vcpu->run;
254 	return t;
255 }
256 
257 enum stage {
258 	/* Synced state set by host, e.g. DAT */
259 	STAGE_INITED,
260 	/* Guest did nothing */
261 	STAGE_IDLED,
262 	/* Guest set storage keys (specifics up to test case) */
263 	STAGE_SKEYS_SET,
264 	/* Guest copied memory (locations up to test case) */
265 	STAGE_COPIED,
266 	/* End of guest code reached */
267 	STAGE_DONE,
268 };
269 
270 #define HOST_SYNC(info_p, stage)					\
271 ({									\
272 	struct test_info __info = (info_p);				\
273 	struct kvm_vcpu *__vcpu = __info.vcpu;				\
274 	struct ucall uc;						\
275 	int __stage = (stage);						\
276 									\
277 	vcpu_run(__vcpu);						\
278 	get_ucall(__vcpu, &uc);						\
279 	if (uc.cmd == UCALL_ABORT) {					\
280 		REPORT_GUEST_ASSERT(uc);				\
281 	}								\
282 	TEST_ASSERT_EQ(uc.cmd, UCALL_SYNC);				\
283 	TEST_ASSERT_EQ(uc.args[1], __stage);				\
284 })									\
285 
prepare_mem12(void)286 static void prepare_mem12(void)
287 {
288 	int i;
289 
290 	for (i = 0; i < sizeof(mem1); i++)
291 		mem1[i] = rand();
292 	memset(mem2, 0xaa, sizeof(mem2));
293 }
294 
295 #define ASSERT_MEM_EQ(p1, p2, size) \
296 	TEST_ASSERT(!memcmp(p1, p2, size), "Memory contents do not match!")
297 
default_write_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)298 static void default_write_read(struct test_info copy_cpu, struct test_info mop_cpu,
299 			       enum mop_target mop_target, uint32_t size, uint8_t key)
300 {
301 	prepare_mem12();
302 	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size,
303 		   GADDR_V(mem1), KEY(key));
304 	HOST_SYNC(copy_cpu, STAGE_COPIED);
305 	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
306 		   GADDR_V(mem2), KEY(key));
307 	ASSERT_MEM_EQ(mem1, mem2, size);
308 }
309 
default_read(struct test_info copy_cpu,struct test_info mop_cpu,enum mop_target mop_target,uint32_t size,uint8_t key)310 static void default_read(struct test_info copy_cpu, struct test_info mop_cpu,
311 			 enum mop_target mop_target, uint32_t size, uint8_t key)
312 {
313 	prepare_mem12();
314 	CHECK_N_DO(MOP, mop_cpu, mop_target, WRITE, mem1, size, GADDR_V(mem1));
315 	HOST_SYNC(copy_cpu, STAGE_COPIED);
316 	CHECK_N_DO(MOP, mop_cpu, mop_target, READ, mem2, size,
317 		   GADDR_V(mem2), KEY(key));
318 	ASSERT_MEM_EQ(mem1, mem2, size);
319 }
320 
default_cmpxchg(struct test_default * test,uint8_t key)321 static void default_cmpxchg(struct test_default *test, uint8_t key)
322 {
323 	for (int size = 1; size <= 16; size *= 2) {
324 		for (int offset = 0; offset < 16; offset += size) {
325 			uint8_t __aligned(16) new[16] = {};
326 			uint8_t __aligned(16) old[16];
327 			bool succ;
328 
329 			prepare_mem12();
330 			default_write_read(test->vcpu, test->vcpu, LOGICAL, 16, NO_KEY);
331 
332 			memcpy(&old, mem1, 16);
333 			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
334 			    size, GADDR_V(mem1 + offset),
335 			    CMPXCHG_OLD(old + offset),
336 			    CMPXCHG_SUCCESS(&succ), KEY(key));
337 			HOST_SYNC(test->vcpu, STAGE_COPIED);
338 			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
339 			TEST_ASSERT(succ, "exchange of values should succeed");
340 			memcpy(mem1 + offset, new + offset, size);
341 			ASSERT_MEM_EQ(mem1, mem2, 16);
342 
343 			memcpy(&old, mem1, 16);
344 			new[offset]++;
345 			old[offset]++;
346 			MOP(test->vm, ABSOLUTE, CMPXCHG, new + offset,
347 			    size, GADDR_V(mem1 + offset),
348 			    CMPXCHG_OLD(old + offset),
349 			    CMPXCHG_SUCCESS(&succ), KEY(key));
350 			HOST_SYNC(test->vcpu, STAGE_COPIED);
351 			MOP(test->vm, ABSOLUTE, READ, mem2, 16, GADDR_V(mem2));
352 			TEST_ASSERT(!succ, "exchange of values should not succeed");
353 			ASSERT_MEM_EQ(mem1, mem2, 16);
354 			ASSERT_MEM_EQ(&old, mem1, 16);
355 		}
356 	}
357 }
358 
guest_copy(void)359 static void guest_copy(void)
360 {
361 	GUEST_SYNC(STAGE_INITED);
362 	memcpy(&mem2, &mem1, sizeof(mem2));
363 	GUEST_SYNC(STAGE_COPIED);
364 }
365 
test_copy(void)366 static void test_copy(void)
367 {
368 	struct test_default t = test_default_init(guest_copy);
369 
370 	HOST_SYNC(t.vcpu, STAGE_INITED);
371 
372 	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, NO_KEY);
373 
374 	kvm_vm_free(t.kvm_vm);
375 }
376 
test_copy_access_register(void)377 static void test_copy_access_register(void)
378 {
379 	struct test_default t = test_default_init(guest_copy);
380 
381 	HOST_SYNC(t.vcpu, STAGE_INITED);
382 
383 	prepare_mem12();
384 	t.run->psw_mask &= ~(3UL << (63 - 17));
385 	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
386 
387 	/*
388 	 * Primary address space gets used if an access register
389 	 * contains zero. The host makes use of AR[1] so is a good
390 	 * candidate to ensure the guest AR (of zero) is used.
391 	 */
392 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size,
393 		   GADDR_V(mem1), AR(1));
394 	HOST_SYNC(t.vcpu, STAGE_COPIED);
395 
396 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, t.size,
397 		   GADDR_V(mem2), AR(1));
398 	ASSERT_MEM_EQ(mem1, mem2, t.size);
399 
400 	kvm_vm_free(t.kvm_vm);
401 }
402 
set_storage_key_range(void * addr,size_t len,uint8_t key)403 static void set_storage_key_range(void *addr, size_t len, uint8_t key)
404 {
405 	uintptr_t _addr, abs, i;
406 	int not_mapped = 0;
407 
408 	_addr = (uintptr_t)addr;
409 	for (i = _addr & PAGE_MASK; i < _addr + len; i += PAGE_SIZE) {
410 		abs = i;
411 		asm volatile (
412 			       "lra	%[abs], 0(0,%[abs])\n"
413 			"	jz	0f\n"
414 			"	llill	%[not_mapped],1\n"
415 			"	j	1f\n"
416 			"0:	sske	%[key], %[abs]\n"
417 			"1:"
418 			: [abs] "+&a" (abs), [not_mapped] "+r" (not_mapped)
419 			: [key] "r" (key)
420 			: "cc"
421 		);
422 		GUEST_ASSERT_EQ(not_mapped, 0);
423 	}
424 }
425 
guest_copy_key(void)426 static void guest_copy_key(void)
427 {
428 	set_storage_key_range(mem1, sizeof(mem1), 0x90);
429 	set_storage_key_range(mem2, sizeof(mem2), 0x90);
430 	GUEST_SYNC(STAGE_SKEYS_SET);
431 
432 	for (;;) {
433 		memcpy(&mem2, &mem1, sizeof(mem2));
434 		GUEST_SYNC(STAGE_COPIED);
435 	}
436 }
437 
test_copy_key(void)438 static void test_copy_key(void)
439 {
440 	struct test_default t = test_default_init(guest_copy_key);
441 
442 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
443 
444 	/* vm, no key */
445 	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, NO_KEY);
446 
447 	/* vm/vcpu, machting key or key 0 */
448 	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 0);
449 	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
450 	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 0);
451 	default_write_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
452 	/*
453 	 * There used to be different code paths for key handling depending on
454 	 * if the region crossed a page boundary.
455 	 * There currently are not, but the more tests the merrier.
456 	 */
457 	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 0);
458 	default_write_read(t.vcpu, t.vcpu, LOGICAL, 1, 9);
459 	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 0);
460 	default_write_read(t.vcpu, t.vm, ABSOLUTE, 1, 9);
461 
462 	/* vm/vcpu, mismatching keys on read, but no fetch protection */
463 	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
464 	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 2);
465 
466 	kvm_vm_free(t.kvm_vm);
467 }
468 
test_cmpxchg_key(void)469 static void test_cmpxchg_key(void)
470 {
471 	struct test_default t = test_default_init(guest_copy_key);
472 
473 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
474 
475 	default_cmpxchg(&t, NO_KEY);
476 	default_cmpxchg(&t, 0);
477 	default_cmpxchg(&t, 9);
478 
479 	kvm_vm_free(t.kvm_vm);
480 }
481 
cut_to_size(int size,__uint128_t val)482 static __uint128_t cut_to_size(int size, __uint128_t val)
483 {
484 	switch (size) {
485 	case 1:
486 		return (uint8_t)val;
487 	case 2:
488 		return (uint16_t)val;
489 	case 4:
490 		return (uint32_t)val;
491 	case 8:
492 		return (uint64_t)val;
493 	case 16:
494 		return val;
495 	}
496 	GUEST_FAIL("Invalid size = %u", size);
497 	return 0;
498 }
499 
popcount_eq(__uint128_t a,__uint128_t b)500 static bool popcount_eq(__uint128_t a, __uint128_t b)
501 {
502 	unsigned int count_a, count_b;
503 
504 	count_a = __builtin_popcountl((uint64_t)(a >> 64)) +
505 		  __builtin_popcountl((uint64_t)a);
506 	count_b = __builtin_popcountl((uint64_t)(b >> 64)) +
507 		  __builtin_popcountl((uint64_t)b);
508 	return count_a == count_b;
509 }
510 
rotate(int size,__uint128_t val,int amount)511 static __uint128_t rotate(int size, __uint128_t val, int amount)
512 {
513 	unsigned int bits = size * 8;
514 
515 	amount = (amount + bits) % bits;
516 	val = cut_to_size(size, val);
517 	if (!amount)
518 		return val;
519 	return (val << (bits - amount)) | (val >> amount);
520 }
521 
522 const unsigned int max_block = 16;
523 
choose_block(bool guest,int i,int * size,int * offset)524 static void choose_block(bool guest, int i, int *size, int *offset)
525 {
526 	unsigned int rand;
527 
528 	rand = i;
529 	if (guest) {
530 		rand = rand * 19 + 11;
531 		*size = 1 << ((rand % 3) + 2);
532 		rand = rand * 19 + 11;
533 		*offset = (rand % max_block) & ~(*size - 1);
534 	} else {
535 		rand = rand * 17 + 5;
536 		*size = 1 << (rand % 5);
537 		rand = rand * 17 + 5;
538 		*offset = (rand % max_block) & ~(*size - 1);
539 	}
540 }
541 
permutate_bits(bool guest,int i,int size,__uint128_t old)542 static __uint128_t permutate_bits(bool guest, int i, int size, __uint128_t old)
543 {
544 	unsigned int rand;
545 	int amount;
546 	bool swap;
547 
548 	rand = i;
549 	rand = rand * 3 + 1;
550 	if (guest)
551 		rand = rand * 3 + 1;
552 	swap = rand % 2 == 0;
553 	if (swap) {
554 		int i, j;
555 		__uint128_t new;
556 		uint8_t byte0, byte1;
557 
558 		rand = rand * 3 + 1;
559 		i = rand % size;
560 		rand = rand * 3 + 1;
561 		j = rand % size;
562 		if (i == j)
563 			return old;
564 		new = rotate(16, old, i * 8);
565 		byte0 = new & 0xff;
566 		new &= ~0xff;
567 		new = rotate(16, new, -i * 8);
568 		new = rotate(16, new, j * 8);
569 		byte1 = new & 0xff;
570 		new = (new & ~0xff) | byte0;
571 		new = rotate(16, new, -j * 8);
572 		new = rotate(16, new, i * 8);
573 		new = new | byte1;
574 		new = rotate(16, new, -i * 8);
575 		return new;
576 	}
577 	rand = rand * 3 + 1;
578 	amount = rand % (size * 8);
579 	return rotate(size, old, amount);
580 }
581 
_cmpxchg(int size,void * target,__uint128_t * old_addr,__uint128_t new)582 static bool _cmpxchg(int size, void *target, __uint128_t *old_addr, __uint128_t new)
583 {
584 	bool ret;
585 
586 	switch (size) {
587 	case 4: {
588 			uint32_t old = *old_addr;
589 
590 			asm volatile ("cs %[old],%[new],%[address]"
591 			    : [old] "+d" (old),
592 			      [address] "+Q" (*(uint32_t *)(target))
593 			    : [new] "d" ((uint32_t)new)
594 			    : "cc"
595 			);
596 			ret = old == (uint32_t)*old_addr;
597 			*old_addr = old;
598 			return ret;
599 		}
600 	case 8: {
601 			uint64_t old = *old_addr;
602 
603 			asm volatile ("csg %[old],%[new],%[address]"
604 			    : [old] "+d" (old),
605 			      [address] "+Q" (*(uint64_t *)(target))
606 			    : [new] "d" ((uint64_t)new)
607 			    : "cc"
608 			);
609 			ret = old == (uint64_t)*old_addr;
610 			*old_addr = old;
611 			return ret;
612 		}
613 	case 16: {
614 			__uint128_t old = *old_addr;
615 
616 			asm volatile ("cdsg %[old],%[new],%[address]"
617 			    : [old] "+d" (old),
618 			      [address] "+Q" (*(__uint128_t *)(target))
619 			    : [new] "d" (new)
620 			    : "cc"
621 			);
622 			ret = old == *old_addr;
623 			*old_addr = old;
624 			return ret;
625 		}
626 	}
627 	GUEST_FAIL("Invalid size = %u", size);
628 	return 0;
629 }
630 
631 const unsigned int cmpxchg_iter_outer = 100, cmpxchg_iter_inner = 10000;
632 
guest_cmpxchg_key(void)633 static void guest_cmpxchg_key(void)
634 {
635 	int size, offset;
636 	__uint128_t old, new;
637 
638 	set_storage_key_range(mem1, max_block, 0x10);
639 	set_storage_key_range(mem2, max_block, 0x10);
640 	GUEST_SYNC(STAGE_SKEYS_SET);
641 
642 	for (int i = 0; i < cmpxchg_iter_outer; i++) {
643 		do {
644 			old = 1;
645 		} while (!_cmpxchg(16, mem1, &old, 0));
646 		for (int j = 0; j < cmpxchg_iter_inner; j++) {
647 			choose_block(true, i + j, &size, &offset);
648 			do {
649 				new = permutate_bits(true, i + j, size, old);
650 			} while (!_cmpxchg(size, mem2 + offset, &old, new));
651 		}
652 	}
653 
654 	GUEST_SYNC(STAGE_DONE);
655 }
656 
run_guest(void * data)657 static void *run_guest(void *data)
658 {
659 	struct test_info *info = data;
660 
661 	HOST_SYNC(*info, STAGE_DONE);
662 	return NULL;
663 }
664 
quad_to_char(__uint128_t * quad,int size)665 static char *quad_to_char(__uint128_t *quad, int size)
666 {
667 	return ((char *)quad) + (sizeof(*quad) - size);
668 }
669 
test_cmpxchg_key_concurrent(void)670 static void test_cmpxchg_key_concurrent(void)
671 {
672 	struct test_default t = test_default_init(guest_cmpxchg_key);
673 	int size, offset;
674 	__uint128_t old, new;
675 	bool success;
676 	pthread_t thread;
677 
678 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
679 	prepare_mem12();
680 	MOP(t.vcpu, LOGICAL, WRITE, mem1, max_block, GADDR_V(mem2));
681 	pthread_create(&thread, NULL, run_guest, &t.vcpu);
682 
683 	for (int i = 0; i < cmpxchg_iter_outer; i++) {
684 		do {
685 			old = 0;
686 			new = 1;
687 			MOP(t.vm, ABSOLUTE, CMPXCHG, &new,
688 			    sizeof(new), GADDR_V(mem1),
689 			    CMPXCHG_OLD(&old),
690 			    CMPXCHG_SUCCESS(&success), KEY(1));
691 		} while (!success);
692 		for (int j = 0; j < cmpxchg_iter_inner; j++) {
693 			choose_block(false, i + j, &size, &offset);
694 			do {
695 				new = permutate_bits(false, i + j, size, old);
696 				MOP(t.vm, ABSOLUTE, CMPXCHG, quad_to_char(&new, size),
697 				    size, GADDR_V(mem2 + offset),
698 				    CMPXCHG_OLD(quad_to_char(&old, size)),
699 				    CMPXCHG_SUCCESS(&success), KEY(1));
700 			} while (!success);
701 		}
702 	}
703 
704 	pthread_join(thread, NULL);
705 
706 	MOP(t.vcpu, LOGICAL, READ, mem2, max_block, GADDR_V(mem2));
707 	TEST_ASSERT(popcount_eq(*(__uint128_t *)mem1, *(__uint128_t *)mem2),
708 		    "Must retain number of set bits");
709 
710 	kvm_vm_free(t.kvm_vm);
711 }
712 
guest_copy_key_fetch_prot(void)713 static void guest_copy_key_fetch_prot(void)
714 {
715 	/*
716 	 * For some reason combining the first sync with override enablement
717 	 * results in an exception when calling HOST_SYNC.
718 	 */
719 	GUEST_SYNC(STAGE_INITED);
720 	/* Storage protection override applies to both store and fetch. */
721 	set_storage_key_range(mem1, sizeof(mem1), 0x98);
722 	set_storage_key_range(mem2, sizeof(mem2), 0x98);
723 	GUEST_SYNC(STAGE_SKEYS_SET);
724 
725 	for (;;) {
726 		memcpy(&mem2, &mem1, sizeof(mem2));
727 		GUEST_SYNC(STAGE_COPIED);
728 	}
729 }
730 
test_copy_key_storage_prot_override(void)731 static void test_copy_key_storage_prot_override(void)
732 {
733 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
734 
735 	HOST_SYNC(t.vcpu, STAGE_INITED);
736 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
737 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
738 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
739 
740 	/* vcpu, mismatching keys, storage protection override in effect */
741 	default_write_read(t.vcpu, t.vcpu, LOGICAL, t.size, 2);
742 
743 	kvm_vm_free(t.kvm_vm);
744 }
745 
test_copy_key_fetch_prot(void)746 static void test_copy_key_fetch_prot(void)
747 {
748 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
749 
750 	HOST_SYNC(t.vcpu, STAGE_INITED);
751 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
752 
753 	/* vm/vcpu, matching key, fetch protection in effect */
754 	default_read(t.vcpu, t.vcpu, LOGICAL, t.size, 9);
755 	default_read(t.vcpu, t.vm, ABSOLUTE, t.size, 9);
756 
757 	kvm_vm_free(t.kvm_vm);
758 }
759 
760 #define ERR_PROT_MOP(...)							\
761 ({										\
762 	int rv;									\
763 										\
764 	rv = ERR_MOP(__VA_ARGS__);						\
765 	TEST_ASSERT(rv == 4, "Should result in protection exception");		\
766 })
767 
guest_error_key(void)768 static void guest_error_key(void)
769 {
770 	GUEST_SYNC(STAGE_INITED);
771 	set_storage_key_range(mem1, PAGE_SIZE, 0x18);
772 	set_storage_key_range(mem1 + PAGE_SIZE, sizeof(mem1) - PAGE_SIZE, 0x98);
773 	GUEST_SYNC(STAGE_SKEYS_SET);
774 	GUEST_SYNC(STAGE_IDLED);
775 }
776 
test_errors_key(void)777 static void test_errors_key(void)
778 {
779 	struct test_default t = test_default_init(guest_error_key);
780 
781 	HOST_SYNC(t.vcpu, STAGE_INITED);
782 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
783 
784 	/* vm/vcpu, mismatching keys, fetch protection in effect */
785 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
786 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
787 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
788 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem1), KEY(2));
789 
790 	kvm_vm_free(t.kvm_vm);
791 }
792 
test_errors_cmpxchg_key(void)793 static void test_errors_cmpxchg_key(void)
794 {
795 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
796 	int i;
797 
798 	HOST_SYNC(t.vcpu, STAGE_INITED);
799 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
800 
801 	for (i = 1; i <= 16; i *= 2) {
802 		__uint128_t old = 0;
803 
804 		ERR_PROT_MOP(t.vm, ABSOLUTE, CMPXCHG, mem2, i, GADDR_V(mem2),
805 			     CMPXCHG_OLD(&old), KEY(2));
806 	}
807 
808 	kvm_vm_free(t.kvm_vm);
809 }
810 
test_termination(void)811 static void test_termination(void)
812 {
813 	struct test_default t = test_default_init(guest_error_key);
814 	uint64_t prefix;
815 	uint64_t teid;
816 	uint64_t teid_mask = BIT(63 - 56) | BIT(63 - 60) | BIT(63 - 61);
817 	uint64_t psw[2];
818 
819 	HOST_SYNC(t.vcpu, STAGE_INITED);
820 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
821 
822 	/* vcpu, mismatching keys after first page */
823 	ERR_PROT_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), KEY(1), INJECT);
824 	/*
825 	 * The memop injected a program exception and the test needs to check the
826 	 * Translation-Exception Identification (TEID). It is necessary to run
827 	 * the guest in order to be able to read the TEID from guest memory.
828 	 * Set the guest program new PSW, so the guest state is not clobbered.
829 	 */
830 	prefix = t.run->s.regs.prefix;
831 	psw[0] = t.run->psw_mask;
832 	psw[1] = t.run->psw_addr;
833 	MOP(t.vm, ABSOLUTE, WRITE, psw, sizeof(psw), GADDR(prefix + 464));
834 	HOST_SYNC(t.vcpu, STAGE_IDLED);
835 	MOP(t.vm, ABSOLUTE, READ, &teid, sizeof(teid), GADDR(prefix + 168));
836 	/* Bits 56, 60, 61 form a code, 0 being the only one allowing for termination */
837 	TEST_ASSERT_EQ(teid & teid_mask, 0);
838 
839 	kvm_vm_free(t.kvm_vm);
840 }
841 
test_errors_key_storage_prot_override(void)842 static void test_errors_key_storage_prot_override(void)
843 {
844 	struct test_default t = test_default_init(guest_copy_key_fetch_prot);
845 
846 	HOST_SYNC(t.vcpu, STAGE_INITED);
847 	t.run->s.regs.crs[0] |= CR0_STORAGE_PROTECTION_OVERRIDE;
848 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
849 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
850 
851 	/* vm, mismatching keys, storage protection override not applicable to vm */
852 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, WRITE, mem1, t.size, GADDR_V(mem1), KEY(2));
853 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, t.size, GADDR_V(mem2), KEY(2));
854 
855 	kvm_vm_free(t.kvm_vm);
856 }
857 
858 const uint64_t last_page_addr = -PAGE_SIZE;
859 
guest_copy_key_fetch_prot_override(void)860 static void guest_copy_key_fetch_prot_override(void)
861 {
862 	int i;
863 	char *page_0 = 0;
864 
865 	GUEST_SYNC(STAGE_INITED);
866 	set_storage_key_range(0, PAGE_SIZE, 0x18);
867 	set_storage_key_range((void *)last_page_addr, PAGE_SIZE, 0x0);
868 	asm volatile ("sske %[key],%[addr]\n" :: [addr] "r"(0L), [key] "r"(0x18) : "cc");
869 	GUEST_SYNC(STAGE_SKEYS_SET);
870 
871 	for (;;) {
872 		for (i = 0; i < PAGE_SIZE; i++)
873 			page_0[i] = mem1[i];
874 		GUEST_SYNC(STAGE_COPIED);
875 	}
876 }
877 
test_copy_key_fetch_prot_override(void)878 static void test_copy_key_fetch_prot_override(void)
879 {
880 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
881 	vm_vaddr_t guest_0_page, guest_last_page;
882 
883 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
884 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
885 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
886 		print_skip("did not allocate guest pages at required positions");
887 		goto out;
888 	}
889 
890 	HOST_SYNC(t.vcpu, STAGE_INITED);
891 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
892 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
893 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
894 
895 	/* vcpu, mismatching keys on fetch, fetch protection override applies */
896 	prepare_mem12();
897 	MOP(t.vcpu, LOGICAL, WRITE, mem1, PAGE_SIZE, GADDR_V(mem1));
898 	HOST_SYNC(t.vcpu, STAGE_COPIED);
899 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
900 	ASSERT_MEM_EQ(mem1, mem2, 2048);
901 
902 	/*
903 	 * vcpu, mismatching keys on fetch, fetch protection override applies,
904 	 * wraparound
905 	 */
906 	prepare_mem12();
907 	MOP(t.vcpu, LOGICAL, WRITE, mem1, 2 * PAGE_SIZE, GADDR_V(guest_last_page));
908 	HOST_SYNC(t.vcpu, STAGE_COPIED);
909 	CHECK_N_DO(MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048,
910 		   GADDR_V(guest_last_page), KEY(2));
911 	ASSERT_MEM_EQ(mem1, mem2, 2048);
912 
913 out:
914 	kvm_vm_free(t.kvm_vm);
915 }
916 
test_errors_key_fetch_prot_override_not_enabled(void)917 static void test_errors_key_fetch_prot_override_not_enabled(void)
918 {
919 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
920 	vm_vaddr_t guest_0_page, guest_last_page;
921 
922 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
923 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
924 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
925 		print_skip("did not allocate guest pages at required positions");
926 		goto out;
927 	}
928 	HOST_SYNC(t.vcpu, STAGE_INITED);
929 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
930 
931 	/* vcpu, mismatching keys on fetch, fetch protection override not enabled */
932 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048, GADDR_V(0), KEY(2));
933 
934 out:
935 	kvm_vm_free(t.kvm_vm);
936 }
937 
test_errors_key_fetch_prot_override_enabled(void)938 static void test_errors_key_fetch_prot_override_enabled(void)
939 {
940 	struct test_default t = test_default_init(guest_copy_key_fetch_prot_override);
941 	vm_vaddr_t guest_0_page, guest_last_page;
942 
943 	guest_0_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, 0);
944 	guest_last_page = vm_vaddr_alloc(t.kvm_vm, PAGE_SIZE, last_page_addr);
945 	if (guest_0_page != 0 || guest_last_page != last_page_addr) {
946 		print_skip("did not allocate guest pages at required positions");
947 		goto out;
948 	}
949 	HOST_SYNC(t.vcpu, STAGE_INITED);
950 	t.run->s.regs.crs[0] |= CR0_FETCH_PROTECTION_OVERRIDE;
951 	t.run->kvm_dirty_regs = KVM_SYNC_CRS;
952 	HOST_SYNC(t.vcpu, STAGE_SKEYS_SET);
953 
954 	/*
955 	 * vcpu, mismatching keys on fetch,
956 	 * fetch protection override does not apply because memory range exceeded
957 	 */
958 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, 2048 + 1, GADDR_V(0), KEY(2));
959 	CHECK_N_DO(ERR_PROT_MOP, t.vcpu, LOGICAL, READ, mem2, PAGE_SIZE + 2048 + 1,
960 		   GADDR_V(guest_last_page), KEY(2));
961 	/* vm, fetch protected override does not apply */
962 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR(0), KEY(2));
963 	CHECK_N_DO(ERR_PROT_MOP, t.vm, ABSOLUTE, READ, mem2, 2048, GADDR_V(guest_0_page), KEY(2));
964 
965 out:
966 	kvm_vm_free(t.kvm_vm);
967 }
968 
guest_idle(void)969 static void guest_idle(void)
970 {
971 	GUEST_SYNC(STAGE_INITED); /* for consistency's sake */
972 	for (;;)
973 		GUEST_SYNC(STAGE_IDLED);
974 }
975 
_test_errors_common(struct test_info info,enum mop_target target,int size)976 static void _test_errors_common(struct test_info info, enum mop_target target, int size)
977 {
978 	int rv;
979 
980 	/* Bad size: */
981 	rv = ERR_MOP(info, target, WRITE, mem1, -1, GADDR_V(mem1));
982 	TEST_ASSERT(rv == -1 && errno == E2BIG, "ioctl allows insane sizes");
983 
984 	/* Zero size: */
985 	rv = ERR_MOP(info, target, WRITE, mem1, 0, GADDR_V(mem1));
986 	TEST_ASSERT(rv == -1 && (errno == EINVAL || errno == ENOMEM),
987 		    "ioctl allows 0 as size");
988 
989 	/* Bad flags: */
990 	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), SET_FLAGS(-1));
991 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows all flags");
992 
993 	/* Bad guest address: */
994 	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL), CHECK_ONLY);
995 	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address with CHECK_ONLY");
996 	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR((void *)~0xfffUL));
997 	TEST_ASSERT(rv > 0, "ioctl does not report bad guest memory address on write");
998 
999 	/* Bad host address: */
1000 	rv = ERR_MOP(info, target, WRITE, 0, size, GADDR_V(mem1));
1001 	TEST_ASSERT(rv == -1 && errno == EFAULT,
1002 		    "ioctl does not report bad host memory address");
1003 
1004 	/* Bad key: */
1005 	rv = ERR_MOP(info, target, WRITE, mem1, size, GADDR_V(mem1), KEY(17));
1006 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows invalid key");
1007 }
1008 
test_errors(void)1009 static void test_errors(void)
1010 {
1011 	struct test_default t = test_default_init(guest_idle);
1012 	int rv;
1013 
1014 	HOST_SYNC(t.vcpu, STAGE_INITED);
1015 
1016 	_test_errors_common(t.vcpu, LOGICAL, t.size);
1017 	_test_errors_common(t.vm, ABSOLUTE, t.size);
1018 
1019 	/* Bad operation: */
1020 	rv = ERR_MOP(t.vcpu, INVALID, WRITE, mem1, t.size, GADDR_V(mem1));
1021 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1022 	/* virtual addresses are not translated when passing INVALID */
1023 	rv = ERR_MOP(t.vm, INVALID, WRITE, mem1, PAGE_SIZE, GADDR(0));
1024 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows bad operations");
1025 
1026 	/* Bad access register: */
1027 	t.run->psw_mask &= ~(3UL << (63 - 17));
1028 	t.run->psw_mask |= 1UL << (63 - 17);  /* Enable AR mode */
1029 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* To sync new state to SIE block */
1030 	rv = ERR_MOP(t.vcpu, LOGICAL, WRITE, mem1, t.size, GADDR_V(mem1), AR(17));
1031 	TEST_ASSERT(rv == -1 && errno == EINVAL, "ioctl allows ARs > 15");
1032 	t.run->psw_mask &= ~(3UL << (63 - 17));   /* Disable AR mode */
1033 	HOST_SYNC(t.vcpu, STAGE_IDLED); /* Run to sync new state */
1034 
1035 	/* Check that the SIDA calls are rejected for non-protected guests */
1036 	rv = ERR_MOP(t.vcpu, SIDA, READ, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1037 	TEST_ASSERT(rv == -1 && errno == EINVAL,
1038 		    "ioctl does not reject SIDA_READ in non-protected mode");
1039 	rv = ERR_MOP(t.vcpu, SIDA, WRITE, mem1, 8, GADDR(0), SIDA_OFFSET(0x1c0));
1040 	TEST_ASSERT(rv == -1 && errno == EINVAL,
1041 		    "ioctl does not reject SIDA_WRITE in non-protected mode");
1042 
1043 	kvm_vm_free(t.kvm_vm);
1044 }
1045 
test_errors_cmpxchg(void)1046 static void test_errors_cmpxchg(void)
1047 {
1048 	struct test_default t = test_default_init(guest_idle);
1049 	__uint128_t old;
1050 	int rv, i, power = 1;
1051 
1052 	HOST_SYNC(t.vcpu, STAGE_INITED);
1053 
1054 	for (i = 0; i < 32; i++) {
1055 		if (i == power) {
1056 			power *= 2;
1057 			continue;
1058 		}
1059 		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1),
1060 			     CMPXCHG_OLD(&old));
1061 		TEST_ASSERT(rv == -1 && errno == EINVAL,
1062 			    "ioctl allows bad size for cmpxchg");
1063 	}
1064 	for (i = 1; i <= 16; i *= 2) {
1065 		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR((void *)~0xfffUL),
1066 			     CMPXCHG_OLD(&old));
1067 		TEST_ASSERT(rv > 0, "ioctl allows bad guest address for cmpxchg");
1068 	}
1069 	for (i = 2; i <= 16; i *= 2) {
1070 		rv = ERR_MOP(t.vm, ABSOLUTE, CMPXCHG, mem1, i, GADDR_V(mem1 + 1),
1071 			     CMPXCHG_OLD(&old));
1072 		TEST_ASSERT(rv == -1 && errno == EINVAL,
1073 			    "ioctl allows bad alignment for cmpxchg");
1074 	}
1075 
1076 	kvm_vm_free(t.kvm_vm);
1077 }
1078 
main(int argc,char * argv[])1079 int main(int argc, char *argv[])
1080 {
1081 	int extension_cap, idx;
1082 
1083 	TEST_REQUIRE(kvm_has_cap(KVM_CAP_S390_MEM_OP));
1084 	extension_cap = kvm_check_cap(KVM_CAP_S390_MEM_OP_EXTENSION);
1085 
1086 	struct testdef {
1087 		const char *name;
1088 		void (*test)(void);
1089 		bool requirements_met;
1090 	} testlist[] = {
1091 		{
1092 			.name = "simple copy",
1093 			.test = test_copy,
1094 			.requirements_met = true,
1095 		},
1096 		{
1097 			.name = "generic error checks",
1098 			.test = test_errors,
1099 			.requirements_met = true,
1100 		},
1101 		{
1102 			.name = "copy with storage keys",
1103 			.test = test_copy_key,
1104 			.requirements_met = extension_cap > 0,
1105 		},
1106 		{
1107 			.name = "cmpxchg with storage keys",
1108 			.test = test_cmpxchg_key,
1109 			.requirements_met = extension_cap & 0x2,
1110 		},
1111 		{
1112 			.name = "concurrently cmpxchg with storage keys",
1113 			.test = test_cmpxchg_key_concurrent,
1114 			.requirements_met = extension_cap & 0x2,
1115 		},
1116 		{
1117 			.name = "copy with key storage protection override",
1118 			.test = test_copy_key_storage_prot_override,
1119 			.requirements_met = extension_cap > 0,
1120 		},
1121 		{
1122 			.name = "copy with key fetch protection",
1123 			.test = test_copy_key_fetch_prot,
1124 			.requirements_met = extension_cap > 0,
1125 		},
1126 		{
1127 			.name = "copy with key fetch protection override",
1128 			.test = test_copy_key_fetch_prot_override,
1129 			.requirements_met = extension_cap > 0,
1130 		},
1131 		{
1132 			.name = "copy with access register mode",
1133 			.test = test_copy_access_register,
1134 			.requirements_met = true,
1135 		},
1136 		{
1137 			.name = "error checks with key",
1138 			.test = test_errors_key,
1139 			.requirements_met = extension_cap > 0,
1140 		},
1141 		{
1142 			.name = "error checks for cmpxchg with key",
1143 			.test = test_errors_cmpxchg_key,
1144 			.requirements_met = extension_cap & 0x2,
1145 		},
1146 		{
1147 			.name = "error checks for cmpxchg",
1148 			.test = test_errors_cmpxchg,
1149 			.requirements_met = extension_cap & 0x2,
1150 		},
1151 		{
1152 			.name = "termination",
1153 			.test = test_termination,
1154 			.requirements_met = extension_cap > 0,
1155 		},
1156 		{
1157 			.name = "error checks with key storage protection override",
1158 			.test = test_errors_key_storage_prot_override,
1159 			.requirements_met = extension_cap > 0,
1160 		},
1161 		{
1162 			.name = "error checks without key fetch prot override",
1163 			.test = test_errors_key_fetch_prot_override_not_enabled,
1164 			.requirements_met = extension_cap > 0,
1165 		},
1166 		{
1167 			.name = "error checks with key fetch prot override",
1168 			.test = test_errors_key_fetch_prot_override_enabled,
1169 			.requirements_met = extension_cap > 0,
1170 		},
1171 	};
1172 
1173 	ksft_print_header();
1174 	ksft_set_plan(ARRAY_SIZE(testlist));
1175 
1176 	for (idx = 0; idx < ARRAY_SIZE(testlist); idx++) {
1177 		if (testlist[idx].requirements_met) {
1178 			testlist[idx].test();
1179 			ksft_test_result_pass("%s\n", testlist[idx].name);
1180 		} else {
1181 			ksft_test_result_skip("%s - requirements not met (kernel has extension cap %#x)\n",
1182 					      testlist[idx].name, extension_cap);
1183 		}
1184 	}
1185 
1186 	ksft_finished();	/* Print results and exit() accordingly */
1187 }
1188