xref: /linux/arch/s390/kvm/pv.c (revision 2174181019e4273e583a0f0a9795e9db38984784)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Hosting Protected Virtual Machines
4  *
5  * Copyright IBM Corp. 2019, 2020
6  *    Author(s): Janosch Frank <frankja@linux.ibm.com>
7  */
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/minmax.h>
11 #include <linux/pagemap.h>
12 #include <linux/sched/signal.h>
13 #include <asm/gmap.h>
14 #include <asm/uv.h>
15 #include <asm/mman.h>
16 #include <linux/pagewalk.h>
17 #include <linux/sched/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include "kvm-s390.h"
20 
21 /**
22  * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
23  * be destroyed
24  *
25  * @list: list head for the list of leftover VMs
26  * @old_gmap_table: the gmap table of the leftover protected VM
27  * @handle: the handle of the leftover protected VM
28  * @stor_var: pointer to the variable storage of the leftover protected VM
29  * @stor_base: address of the base storage of the leftover protected VM
30  *
31  * Represents a protected VM that is still registered with the Ultravisor,
32  * but which does not correspond any longer to an active KVM VM. It should
33  * be destroyed at some point later, either asynchronously or when the
34  * process terminates.
35  */
36 struct pv_vm_to_be_destroyed {
37 	struct list_head list;
38 	unsigned long old_gmap_table;
39 	u64 handle;
40 	void *stor_var;
41 	unsigned long stor_base;
42 };
43 
44 static void kvm_s390_clear_pv_state(struct kvm *kvm)
45 {
46 	kvm->arch.pv.handle = 0;
47 	kvm->arch.pv.guest_len = 0;
48 	kvm->arch.pv.stor_base = 0;
49 	kvm->arch.pv.stor_var = NULL;
50 }
51 
52 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
53 {
54 	int cc;
55 
56 	if (!kvm_s390_pv_cpu_get_handle(vcpu))
57 		return 0;
58 
59 	cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
60 
61 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
62 		     vcpu->vcpu_id, *rc, *rrc);
63 	WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
64 
65 	/* Intended memory leak for something that should never happen. */
66 	if (!cc)
67 		free_pages(vcpu->arch.pv.stor_base,
68 			   get_order(uv_info.guest_cpu_stor_len));
69 
70 	free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
71 	vcpu->arch.sie_block->pv_handle_cpu = 0;
72 	vcpu->arch.sie_block->pv_handle_config = 0;
73 	memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
74 	vcpu->arch.sie_block->sdf = 0;
75 	/*
76 	 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
77 	 * Use the reset value of gbea to avoid leaking the kernel pointer of
78 	 * the just freed sida.
79 	 */
80 	vcpu->arch.sie_block->gbea = 1;
81 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
82 
83 	return cc ? EIO : 0;
84 }
85 
86 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
87 {
88 	struct uv_cb_csc uvcb = {
89 		.header.cmd = UVC_CMD_CREATE_SEC_CPU,
90 		.header.len = sizeof(uvcb),
91 	};
92 	void *sida_addr;
93 	int cc;
94 
95 	if (kvm_s390_pv_cpu_get_handle(vcpu))
96 		return -EINVAL;
97 
98 	vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
99 						   get_order(uv_info.guest_cpu_stor_len));
100 	if (!vcpu->arch.pv.stor_base)
101 		return -ENOMEM;
102 
103 	/* Input */
104 	uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
105 	uvcb.num = vcpu->arch.sie_block->icpua;
106 	uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
107 	uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
108 
109 	/* Alloc Secure Instruction Data Area Designation */
110 	sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
111 	if (!sida_addr) {
112 		free_pages(vcpu->arch.pv.stor_base,
113 			   get_order(uv_info.guest_cpu_stor_len));
114 		return -ENOMEM;
115 	}
116 	vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
117 
118 	cc = uv_call(0, (u64)&uvcb);
119 	*rc = uvcb.header.rc;
120 	*rrc = uvcb.header.rrc;
121 	KVM_UV_EVENT(vcpu->kvm, 3,
122 		     "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
123 		     vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
124 		     uvcb.header.rrc);
125 
126 	if (cc) {
127 		u16 dummy;
128 
129 		kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
130 		return -EIO;
131 	}
132 
133 	/* Output */
134 	vcpu->arch.pv.handle = uvcb.cpu_handle;
135 	vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
136 	vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
137 	vcpu->arch.sie_block->sdf = 2;
138 	kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
139 	return 0;
140 }
141 
142 /* only free resources when the destroy was successful */
143 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
144 {
145 	vfree(kvm->arch.pv.stor_var);
146 	free_pages(kvm->arch.pv.stor_base,
147 		   get_order(uv_info.guest_base_stor_len));
148 	kvm_s390_clear_pv_state(kvm);
149 }
150 
151 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
152 {
153 	unsigned long base = uv_info.guest_base_stor_len;
154 	unsigned long virt = uv_info.guest_virt_var_stor_len;
155 	unsigned long npages = 0, vlen = 0;
156 
157 	kvm->arch.pv.stor_var = NULL;
158 	kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
159 	if (!kvm->arch.pv.stor_base)
160 		return -ENOMEM;
161 
162 	/*
163 	 * Calculate current guest storage for allocation of the
164 	 * variable storage, which is based on the length in MB.
165 	 *
166 	 * Slots are sorted by GFN
167 	 */
168 	mutex_lock(&kvm->slots_lock);
169 	npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
170 	mutex_unlock(&kvm->slots_lock);
171 
172 	kvm->arch.pv.guest_len = npages * PAGE_SIZE;
173 
174 	/* Allocate variable storage */
175 	vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
176 	vlen += uv_info.guest_virt_base_stor_len;
177 	kvm->arch.pv.stor_var = vzalloc(vlen);
178 	if (!kvm->arch.pv.stor_var)
179 		goto out_err;
180 	return 0;
181 
182 out_err:
183 	kvm_s390_pv_dealloc_vm(kvm);
184 	return -ENOMEM;
185 }
186 
187 /**
188  * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
189  * @kvm: the KVM that was associated with this leftover protected VM
190  * @leftover: details about the leftover protected VM that needs a clean up
191  * @rc: the RC code of the Destroy Secure Configuration UVC
192  * @rrc: the RRC code of the Destroy Secure Configuration UVC
193  *
194  * Destroy one leftover protected VM.
195  * On success, kvm->mm->context.protected_count will be decremented atomically
196  * and all other resources used by the VM will be freed.
197  *
198  * Return: 0 in case of success, otherwise 1
199  */
200 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
201 					    struct pv_vm_to_be_destroyed *leftover,
202 					    u16 *rc, u16 *rrc)
203 {
204 	int cc;
205 
206 	/* It used the destroy-fast UVC, nothing left to do here */
207 	if (!leftover->handle)
208 		goto done_fast;
209 	cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
210 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
211 	WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
212 	if (cc)
213 		return cc;
214 	/*
215 	 * Intentionally leak unusable memory. If the UVC fails, the memory
216 	 * used for the VM and its metadata is permanently unusable.
217 	 * This can only happen in case of a serious KVM or hardware bug; it
218 	 * is not expected to happen in normal operation.
219 	 */
220 	free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
221 	free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
222 	vfree(leftover->stor_var);
223 done_fast:
224 	atomic_dec(&kvm->mm->context.protected_count);
225 	return 0;
226 }
227 
228 /**
229  * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
230  * @kvm: the VM whose memory is to be cleared.
231  *
232  * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
233  * The CPUs of the protected VM need to be destroyed beforehand.
234  */
235 static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
236 {
237 	const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
238 	struct kvm_memory_slot *slot;
239 	unsigned long len;
240 	int srcu_idx;
241 
242 	srcu_idx = srcu_read_lock(&kvm->srcu);
243 
244 	/* Take the memslot containing guest absolute address 0 */
245 	slot = gfn_to_memslot(kvm, 0);
246 	/* Clear all slots or parts thereof that are below 2GB */
247 	while (slot && slot->base_gfn < pages_2g) {
248 		len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
249 		s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
250 		/* Take the next memslot */
251 		slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
252 	}
253 
254 	srcu_read_unlock(&kvm->srcu, srcu_idx);
255 }
256 
257 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
258 {
259 	struct uv_cb_destroy_fast uvcb = {
260 		.header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
261 		.header.len = sizeof(uvcb),
262 		.handle = kvm_s390_pv_get_handle(kvm),
263 	};
264 	int cc;
265 
266 	cc = uv_call_sched(0, (u64)&uvcb);
267 	if (rc)
268 		*rc = uvcb.header.rc;
269 	if (rrc)
270 		*rrc = uvcb.header.rrc;
271 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
272 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
273 		     uvcb.header.rc, uvcb.header.rrc);
274 	WARN_ONCE(cc, "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
275 		  kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
276 	/* Intended memory leak on "impossible" error */
277 	if (!cc)
278 		kvm_s390_pv_dealloc_vm(kvm);
279 	return cc ? -EIO : 0;
280 }
281 
282 static inline bool is_destroy_fast_available(void)
283 {
284 	return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
285 }
286 
287 /**
288  * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
289  * @kvm: the VM
290  * @rc: return value for the RC field of the UVCB
291  * @rrc: return value for the RRC field of the UVCB
292  *
293  * Set aside the protected VM for a subsequent teardown. The VM will be able
294  * to continue immediately as a non-secure VM, and the information needed to
295  * properly tear down the protected VM is set aside. If another protected VM
296  * was already set aside without starting its teardown, this function will
297  * fail.
298  * The CPUs of the protected VM need to be destroyed beforehand.
299  *
300  * Context: kvm->lock needs to be held
301  *
302  * Return: 0 in case of success, -EINVAL if another protected VM was already set
303  * aside, -ENOMEM if the system ran out of memory.
304  */
305 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
306 {
307 	struct pv_vm_to_be_destroyed *priv;
308 	int res = 0;
309 
310 	lockdep_assert_held(&kvm->lock);
311 	/*
312 	 * If another protected VM was already prepared for teardown, refuse.
313 	 * A normal deinitialization has to be performed instead.
314 	 */
315 	if (kvm->arch.pv.set_aside)
316 		return -EINVAL;
317 
318 	/* Guest with segment type ASCE, refuse to destroy asynchronously */
319 	if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
320 		return -EINVAL;
321 
322 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
323 	if (!priv)
324 		return -ENOMEM;
325 
326 	if (is_destroy_fast_available()) {
327 		res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
328 	} else {
329 		priv->stor_var = kvm->arch.pv.stor_var;
330 		priv->stor_base = kvm->arch.pv.stor_base;
331 		priv->handle = kvm_s390_pv_get_handle(kvm);
332 		priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
333 		WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
334 		if (s390_replace_asce(kvm->arch.gmap))
335 			res = -ENOMEM;
336 	}
337 
338 	if (res) {
339 		kfree(priv);
340 		return res;
341 	}
342 
343 	kvm_s390_destroy_lower_2g(kvm);
344 	kvm_s390_clear_pv_state(kvm);
345 	kvm->arch.pv.set_aside = priv;
346 
347 	*rc = UVC_RC_EXECUTED;
348 	*rrc = 42;
349 	return 0;
350 }
351 
352 /**
353  * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
354  * @kvm: the KVM whose protected VM needs to be deinitialized
355  * @rc: the RC code of the UVC
356  * @rrc: the RRC code of the UVC
357  *
358  * Deinitialize the current protected VM. This function will destroy and
359  * cleanup the current protected VM, but it will not cleanup the guest
360  * memory. This function should only be called when the protected VM has
361  * just been created and therefore does not have any guest memory, or when
362  * the caller cleans up the guest memory separately.
363  *
364  * This function should not fail, but if it does, the donated memory must
365  * not be freed.
366  *
367  * Context: kvm->lock needs to be held
368  *
369  * Return: 0 in case of success, otherwise -EIO
370  */
371 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
372 {
373 	int cc;
374 
375 	cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
376 			   UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
377 	WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
378 	if (!cc) {
379 		atomic_dec(&kvm->mm->context.protected_count);
380 		kvm_s390_pv_dealloc_vm(kvm);
381 	} else {
382 		/* Intended memory leak on "impossible" error */
383 		s390_replace_asce(kvm->arch.gmap);
384 	}
385 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
386 	WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
387 
388 	return cc ? -EIO : 0;
389 }
390 
391 /**
392  * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
393  * with a specific KVM.
394  * @kvm: the KVM to be cleaned up
395  * @rc: the RC code of the first failing UVC
396  * @rrc: the RRC code of the first failing UVC
397  *
398  * This function will clean up all protected VMs associated with a KVM.
399  * This includes the active one, the one prepared for deinitialization with
400  * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
401  *
402  * Context: kvm->lock needs to be held unless being called from
403  * kvm_arch_destroy_vm.
404  *
405  * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
406  */
407 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
408 {
409 	struct pv_vm_to_be_destroyed *cur;
410 	bool need_zap = false;
411 	u16 _rc, _rrc;
412 	int cc = 0;
413 
414 	/*
415 	 * Nothing to do if the counter was already 0. Otherwise make sure
416 	 * the counter does not reach 0 before calling s390_uv_destroy_range.
417 	 */
418 	if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
419 		return 0;
420 
421 	*rc = 1;
422 	/* If the current VM is protected, destroy it */
423 	if (kvm_s390_pv_get_handle(kvm)) {
424 		cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
425 		need_zap = true;
426 	}
427 
428 	/* If a previous protected VM was set aside, put it in the need_cleanup list */
429 	if (kvm->arch.pv.set_aside) {
430 		list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
431 		kvm->arch.pv.set_aside = NULL;
432 	}
433 
434 	/* Cleanup all protected VMs in the need_cleanup list */
435 	while (!list_empty(&kvm->arch.pv.need_cleanup)) {
436 		cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
437 		need_zap = true;
438 		if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
439 			cc = 1;
440 			/*
441 			 * Only return the first error rc and rrc, so make
442 			 * sure it is not overwritten. All destroys will
443 			 * additionally be reported via KVM_UV_EVENT().
444 			 */
445 			if (*rc == UVC_RC_EXECUTED) {
446 				*rc = _rc;
447 				*rrc = _rrc;
448 			}
449 		}
450 		list_del(&cur->list);
451 		kfree(cur);
452 	}
453 
454 	/*
455 	 * If the mm still has a mapping, try to mark all its pages as
456 	 * accessible. The counter should not reach zero before this
457 	 * cleanup has been performed.
458 	 */
459 	if (need_zap && mmget_not_zero(kvm->mm)) {
460 		s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
461 		mmput(kvm->mm);
462 	}
463 
464 	/* Now the counter can safely reach 0 */
465 	atomic_dec(&kvm->mm->context.protected_count);
466 	return cc ? -EIO : 0;
467 }
468 
469 /**
470  * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
471  * @kvm: the VM previously associated with the protected VM
472  * @rc: return value for the RC field of the UVCB
473  * @rrc: return value for the RRC field of the UVCB
474  *
475  * Tear down the protected VM that had been previously prepared for teardown
476  * using kvm_s390_pv_set_aside_vm. Ideally this should be called by
477  * userspace asynchronously from a separate thread.
478  *
479  * Context: kvm->lock must not be held.
480  *
481  * Return: 0 in case of success, -EINVAL if no protected VM had been
482  * prepared for asynchronous teardowm, -EIO in case of other errors.
483  */
484 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
485 {
486 	struct pv_vm_to_be_destroyed *p;
487 	int ret = 0;
488 
489 	lockdep_assert_not_held(&kvm->lock);
490 	mutex_lock(&kvm->lock);
491 	p = kvm->arch.pv.set_aside;
492 	kvm->arch.pv.set_aside = NULL;
493 	mutex_unlock(&kvm->lock);
494 	if (!p)
495 		return -EINVAL;
496 
497 	/* When a fatal signal is received, stop immediately */
498 	if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
499 		goto done;
500 	if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
501 		ret = -EIO;
502 	kfree(p);
503 	p = NULL;
504 done:
505 	/*
506 	 * p is not NULL if we aborted because of a fatal signal, in which
507 	 * case queue the leftover for later cleanup.
508 	 */
509 	if (p) {
510 		mutex_lock(&kvm->lock);
511 		list_add(&p->list, &kvm->arch.pv.need_cleanup);
512 		mutex_unlock(&kvm->lock);
513 		/* Did not finish, but pretend things went well */
514 		*rc = UVC_RC_EXECUTED;
515 		*rrc = 42;
516 	}
517 	return ret;
518 }
519 
520 static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
521 					     struct mm_struct *mm)
522 {
523 	struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
524 	u16 dummy;
525 	int r;
526 
527 	/*
528 	 * No locking is needed since this is the last thread of the last user of this
529 	 * struct mm.
530 	 * When the struct kvm gets deinitialized, this notifier is also
531 	 * unregistered. This means that if this notifier runs, then the
532 	 * struct kvm is still valid.
533 	 */
534 	r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
535 	if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
536 		kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
537 }
538 
539 static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
540 	.release = kvm_s390_pv_mmu_notifier_release,
541 };
542 
543 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
544 {
545 	struct uv_cb_cgc uvcb = {
546 		.header.cmd = UVC_CMD_CREATE_SEC_CONF,
547 		.header.len = sizeof(uvcb)
548 	};
549 	int cc, ret;
550 	u16 dummy;
551 
552 	ret = kvm_s390_pv_alloc_vm(kvm);
553 	if (ret)
554 		return ret;
555 
556 	/* Inputs */
557 	uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
558 	uvcb.guest_stor_len = kvm->arch.pv.guest_len;
559 	uvcb.guest_asce = kvm->arch.gmap->asce;
560 	uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
561 	uvcb.conf_base_stor_origin =
562 		virt_to_phys((void *)kvm->arch.pv.stor_base);
563 	uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
564 
565 	cc = uv_call_sched(0, (u64)&uvcb);
566 	*rc = uvcb.header.rc;
567 	*rrc = uvcb.header.rrc;
568 	KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x",
569 		     uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc);
570 
571 	/* Outputs */
572 	kvm->arch.pv.handle = uvcb.guest_handle;
573 
574 	atomic_inc(&kvm->mm->context.protected_count);
575 	if (cc) {
576 		if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
577 			kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
578 		} else {
579 			atomic_dec(&kvm->mm->context.protected_count);
580 			kvm_s390_pv_dealloc_vm(kvm);
581 		}
582 		return -EIO;
583 	}
584 	kvm->arch.gmap->guest_handle = uvcb.guest_handle;
585 	/* Add the notifier only once. No races because we hold kvm->lock */
586 	if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
587 		kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
588 		mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
589 	}
590 	return 0;
591 }
592 
593 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
594 			      u16 *rrc)
595 {
596 	struct uv_cb_ssc uvcb = {
597 		.header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
598 		.header.len = sizeof(uvcb),
599 		.sec_header_origin = (u64)hdr,
600 		.sec_header_len = length,
601 		.guest_handle = kvm_s390_pv_get_handle(kvm),
602 	};
603 	int cc = uv_call(0, (u64)&uvcb);
604 
605 	*rc = uvcb.header.rc;
606 	*rrc = uvcb.header.rrc;
607 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
608 		     *rc, *rrc);
609 	return cc ? -EINVAL : 0;
610 }
611 
612 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
613 		      u64 offset, u16 *rc, u16 *rrc)
614 {
615 	struct uv_cb_unp uvcb = {
616 		.header.cmd = UVC_CMD_UNPACK_IMG,
617 		.header.len = sizeof(uvcb),
618 		.guest_handle = kvm_s390_pv_get_handle(kvm),
619 		.gaddr = addr,
620 		.tweak[0] = tweak,
621 		.tweak[1] = offset,
622 	};
623 	int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
624 
625 	*rc = uvcb.header.rc;
626 	*rrc = uvcb.header.rrc;
627 
628 	if (ret && ret != -EAGAIN)
629 		KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
630 			     uvcb.gaddr, *rc, *rrc);
631 	return ret;
632 }
633 
634 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
635 		       unsigned long tweak, u16 *rc, u16 *rrc)
636 {
637 	u64 offset = 0;
638 	int ret = 0;
639 
640 	if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
641 		return -EINVAL;
642 
643 	KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
644 		     addr, size);
645 
646 	while (offset < size) {
647 		ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
648 		if (ret == -EAGAIN) {
649 			cond_resched();
650 			if (fatal_signal_pending(current))
651 				break;
652 			continue;
653 		}
654 		if (ret)
655 			break;
656 		addr += PAGE_SIZE;
657 		offset += PAGE_SIZE;
658 	}
659 	if (!ret)
660 		KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
661 	return ret;
662 }
663 
664 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
665 {
666 	struct uv_cb_cpu_set_state uvcb = {
667 		.header.cmd	= UVC_CMD_CPU_SET_STATE,
668 		.header.len	= sizeof(uvcb),
669 		.cpu_handle	= kvm_s390_pv_cpu_get_handle(vcpu),
670 		.state		= state,
671 	};
672 	int cc;
673 
674 	cc = uv_call(0, (u64)&uvcb);
675 	KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
676 		     vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
677 	if (cc)
678 		return -EINVAL;
679 	return 0;
680 }
681 
682 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
683 {
684 	struct uv_cb_dump_cpu uvcb = {
685 		.header.cmd = UVC_CMD_DUMP_CPU,
686 		.header.len = sizeof(uvcb),
687 		.cpu_handle = vcpu->arch.pv.handle,
688 		.dump_area_origin = (u64)buff,
689 	};
690 	int cc;
691 
692 	cc = uv_call_sched(0, (u64)&uvcb);
693 	*rc = uvcb.header.rc;
694 	*rrc = uvcb.header.rrc;
695 	return cc;
696 }
697 
698 /* Size of the cache for the storage state dump data. 1MB for now */
699 #define DUMP_BUFF_LEN HPAGE_SIZE
700 
701 /**
702  * kvm_s390_pv_dump_stor_state
703  *
704  * @kvm: pointer to the guest's KVM struct
705  * @buff_user: Userspace pointer where we will write the results to
706  * @gaddr: Starting absolute guest address for which the storage state
707  *	   is requested.
708  * @buff_user_len: Length of the buff_user buffer
709  * @rc: Pointer to where the uvcb return code is stored
710  * @rrc: Pointer to where the uvcb return reason code is stored
711  *
712  * Stores buff_len bytes of tweak component values to buff_user
713  * starting with the 1MB block specified by the absolute guest address
714  * (gaddr). The gaddr pointer will be updated with the last address
715  * for which data was written when returning to userspace. buff_user
716  * might be written to even if an error rc is returned. For instance
717  * if we encounter a fault after writing the first page of data.
718  *
719  * Context: kvm->lock needs to be held
720  *
721  * Return:
722  *  0 on success
723  *  -ENOMEM if allocating the cache fails
724  *  -EINVAL if gaddr is not aligned to 1MB
725  *  -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
726  *  -EINVAL if the UV call fails, rc and rrc will be set in this case
727  *  -EFAULT if copying the result to buff_user failed
728  */
729 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
730 				u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
731 {
732 	struct uv_cb_dump_stor_state uvcb = {
733 		.header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
734 		.header.len = sizeof(uvcb),
735 		.config_handle = kvm->arch.pv.handle,
736 		.gaddr = *gaddr,
737 		.dump_area_origin = 0,
738 	};
739 	const u64 increment_len = uv_info.conf_dump_storage_state_len;
740 	size_t buff_kvm_size;
741 	size_t size_done = 0;
742 	u8 *buff_kvm = NULL;
743 	int cc, ret;
744 
745 	ret = -EINVAL;
746 	/* UV call processes 1MB guest storage chunks at a time */
747 	if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
748 		goto out;
749 
750 	/*
751 	 * We provide the storage state for 1MB chunks of guest
752 	 * storage. The buffer will need to be aligned to
753 	 * conf_dump_storage_state_len so we don't end on a partial
754 	 * chunk.
755 	 */
756 	if (!buff_user_len ||
757 	    !IS_ALIGNED(buff_user_len, increment_len))
758 		goto out;
759 
760 	/*
761 	 * Allocate a buffer from which we will later copy to the user
762 	 * process. We don't want userspace to dictate our buffer size
763 	 * so we limit it to DUMP_BUFF_LEN.
764 	 */
765 	ret = -ENOMEM;
766 	buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
767 	buff_kvm = vzalloc(buff_kvm_size);
768 	if (!buff_kvm)
769 		goto out;
770 
771 	ret = 0;
772 	uvcb.dump_area_origin = (u64)buff_kvm;
773 	/* We will loop until the user buffer is filled or an error occurs */
774 	do {
775 		/* Get 1MB worth of guest storage state data */
776 		cc = uv_call_sched(0, (u64)&uvcb);
777 
778 		/* All or nothing */
779 		if (cc) {
780 			ret = -EINVAL;
781 			break;
782 		}
783 
784 		size_done += increment_len;
785 		uvcb.dump_area_origin += increment_len;
786 		buff_user_len -= increment_len;
787 		uvcb.gaddr += HPAGE_SIZE;
788 
789 		/* KVM Buffer full, time to copy to the process */
790 		if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
791 			if (copy_to_user(buff_user, buff_kvm, size_done)) {
792 				ret = -EFAULT;
793 				break;
794 			}
795 
796 			buff_user += size_done;
797 			size_done = 0;
798 			uvcb.dump_area_origin = (u64)buff_kvm;
799 		}
800 	} while (buff_user_len);
801 
802 	/* Report back where we ended dumping */
803 	*gaddr = uvcb.gaddr;
804 
805 	/* Lets only log errors, we don't want to spam */
806 out:
807 	if (ret)
808 		KVM_UV_EVENT(kvm, 3,
809 			     "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
810 			     uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
811 	*rc = uvcb.header.rc;
812 	*rrc = uvcb.header.rrc;
813 	vfree(buff_kvm);
814 
815 	return ret;
816 }
817 
818 /**
819  * kvm_s390_pv_dump_complete
820  *
821  * @kvm: pointer to the guest's KVM struct
822  * @buff_user: Userspace pointer where we will write the results to
823  * @rc: Pointer to where the uvcb return code is stored
824  * @rrc: Pointer to where the uvcb return reason code is stored
825  *
826  * Completes the dumping operation and writes the completion data to
827  * user space.
828  *
829  * Context: kvm->lock needs to be held
830  *
831  * Return:
832  *  0 on success
833  *  -ENOMEM if allocating the completion buffer fails
834  *  -EINVAL if the UV call fails, rc and rrc will be set in this case
835  *  -EFAULT if copying the result to buff_user failed
836  */
837 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
838 			      u16 *rc, u16 *rrc)
839 {
840 	struct uv_cb_dump_complete complete = {
841 		.header.len = sizeof(complete),
842 		.header.cmd = UVC_CMD_DUMP_COMPLETE,
843 		.config_handle = kvm_s390_pv_get_handle(kvm),
844 	};
845 	u64 *compl_data;
846 	int ret;
847 
848 	/* Allocate dump area */
849 	compl_data = vzalloc(uv_info.conf_dump_finalize_len);
850 	if (!compl_data)
851 		return -ENOMEM;
852 	complete.dump_area_origin = (u64)compl_data;
853 
854 	ret = uv_call_sched(0, (u64)&complete);
855 	*rc = complete.header.rc;
856 	*rrc = complete.header.rrc;
857 	KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
858 		     complete.header.rc, complete.header.rrc);
859 
860 	if (!ret) {
861 		/*
862 		 * kvm_s390_pv_dealloc_vm() will also (mem)set
863 		 * this to false on a reboot or other destroy
864 		 * operation for this vm.
865 		 */
866 		kvm->arch.pv.dumping = false;
867 		kvm_s390_vcpu_unblock_all(kvm);
868 		ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
869 		if (ret)
870 			ret = -EFAULT;
871 	}
872 	vfree(compl_data);
873 	/* If the UVC returned an error, translate it to -EINVAL */
874 	if (ret > 0)
875 		ret = -EINVAL;
876 	return ret;
877 }
878