1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hosting Protected Virtual Machines
4 *
5 * Copyright IBM Corp. 2019, 2020
6 * Author(s): Janosch Frank <frankja@linux.ibm.com>
7 */
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 #include <linux/minmax.h>
11 #include <linux/pagemap.h>
12 #include <linux/sched/signal.h>
13 #include <asm/gmap.h>
14 #include <asm/uv.h>
15 #include <asm/mman.h>
16 #include <linux/pagewalk.h>
17 #include <linux/sched/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include "kvm-s390.h"
20 #include "gmap.h"
21
kvm_s390_pv_is_protected(struct kvm * kvm)22 bool kvm_s390_pv_is_protected(struct kvm *kvm)
23 {
24 lockdep_assert_held(&kvm->lock);
25 return !!kvm_s390_pv_get_handle(kvm);
26 }
27 EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected);
28
kvm_s390_pv_cpu_is_protected(struct kvm_vcpu * vcpu)29 bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu)
30 {
31 lockdep_assert_held(&vcpu->mutex);
32 return !!kvm_s390_pv_cpu_get_handle(vcpu);
33 }
34 EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected);
35
36 /**
37 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to
38 * be destroyed
39 *
40 * @list: list head for the list of leftover VMs
41 * @old_gmap_table: the gmap table of the leftover protected VM
42 * @handle: the handle of the leftover protected VM
43 * @stor_var: pointer to the variable storage of the leftover protected VM
44 * @stor_base: address of the base storage of the leftover protected VM
45 *
46 * Represents a protected VM that is still registered with the Ultravisor,
47 * but which does not correspond any longer to an active KVM VM. It should
48 * be destroyed at some point later, either asynchronously or when the
49 * process terminates.
50 */
51 struct pv_vm_to_be_destroyed {
52 struct list_head list;
53 unsigned long old_gmap_table;
54 u64 handle;
55 void *stor_var;
56 unsigned long stor_base;
57 };
58
kvm_s390_clear_pv_state(struct kvm * kvm)59 static void kvm_s390_clear_pv_state(struct kvm *kvm)
60 {
61 kvm->arch.pv.handle = 0;
62 kvm->arch.pv.guest_len = 0;
63 kvm->arch.pv.stor_base = 0;
64 kvm->arch.pv.stor_var = NULL;
65 }
66
kvm_s390_pv_destroy_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)67 int kvm_s390_pv_destroy_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
68 {
69 int cc;
70
71 if (!kvm_s390_pv_cpu_get_handle(vcpu))
72 return 0;
73
74 cc = uv_cmd_nodata(kvm_s390_pv_cpu_get_handle(vcpu), UVC_CMD_DESTROY_SEC_CPU, rc, rrc);
75
76 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT DESTROY VCPU %d: rc %x rrc %x",
77 vcpu->vcpu_id, *rc, *rrc);
78 WARN_ONCE(cc, "protvirt destroy cpu failed rc %x rrc %x", *rc, *rrc);
79
80 /* Intended memory leak for something that should never happen. */
81 if (!cc)
82 free_pages(vcpu->arch.pv.stor_base,
83 get_order(uv_info.guest_cpu_stor_len));
84
85 free_page((unsigned long)sida_addr(vcpu->arch.sie_block));
86 vcpu->arch.sie_block->pv_handle_cpu = 0;
87 vcpu->arch.sie_block->pv_handle_config = 0;
88 memset(&vcpu->arch.pv, 0, sizeof(vcpu->arch.pv));
89 vcpu->arch.sie_block->sdf = 0;
90 /*
91 * The sidad field (for sdf == 2) is now the gbea field (for sdf == 0).
92 * Use the reset value of gbea to avoid leaking the kernel pointer of
93 * the just freed sida.
94 */
95 vcpu->arch.sie_block->gbea = 1;
96 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
97
98 return cc ? EIO : 0;
99 }
100
kvm_s390_pv_create_cpu(struct kvm_vcpu * vcpu,u16 * rc,u16 * rrc)101 int kvm_s390_pv_create_cpu(struct kvm_vcpu *vcpu, u16 *rc, u16 *rrc)
102 {
103 struct uv_cb_csc uvcb = {
104 .header.cmd = UVC_CMD_CREATE_SEC_CPU,
105 .header.len = sizeof(uvcb),
106 };
107 void *sida_addr;
108 int cc;
109
110 if (kvm_s390_pv_cpu_get_handle(vcpu))
111 return -EINVAL;
112
113 vcpu->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT,
114 get_order(uv_info.guest_cpu_stor_len));
115 if (!vcpu->arch.pv.stor_base)
116 return -ENOMEM;
117
118 /* Input */
119 uvcb.guest_handle = kvm_s390_pv_get_handle(vcpu->kvm);
120 uvcb.num = vcpu->arch.sie_block->icpua;
121 uvcb.state_origin = virt_to_phys(vcpu->arch.sie_block);
122 uvcb.stor_origin = virt_to_phys((void *)vcpu->arch.pv.stor_base);
123
124 /* Alloc Secure Instruction Data Area Designation */
125 sida_addr = (void *)__get_free_page(GFP_KERNEL_ACCOUNT | __GFP_ZERO);
126 if (!sida_addr) {
127 free_pages(vcpu->arch.pv.stor_base,
128 get_order(uv_info.guest_cpu_stor_len));
129 return -ENOMEM;
130 }
131 vcpu->arch.sie_block->sidad = virt_to_phys(sida_addr);
132
133 cc = uv_call(0, (u64)&uvcb);
134 *rc = uvcb.header.rc;
135 *rrc = uvcb.header.rrc;
136 KVM_UV_EVENT(vcpu->kvm, 3,
137 "PROTVIRT CREATE VCPU: cpu %d handle %llx rc %x rrc %x",
138 vcpu->vcpu_id, uvcb.cpu_handle, uvcb.header.rc,
139 uvcb.header.rrc);
140
141 if (cc) {
142 u16 dummy;
143
144 kvm_s390_pv_destroy_cpu(vcpu, &dummy, &dummy);
145 return -EIO;
146 }
147
148 /* Output */
149 vcpu->arch.pv.handle = uvcb.cpu_handle;
150 vcpu->arch.sie_block->pv_handle_cpu = uvcb.cpu_handle;
151 vcpu->arch.sie_block->pv_handle_config = kvm_s390_pv_get_handle(vcpu->kvm);
152 vcpu->arch.sie_block->sdf = 2;
153 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
154 return 0;
155 }
156
157 /* only free resources when the destroy was successful */
kvm_s390_pv_dealloc_vm(struct kvm * kvm)158 static void kvm_s390_pv_dealloc_vm(struct kvm *kvm)
159 {
160 vfree(kvm->arch.pv.stor_var);
161 free_pages(kvm->arch.pv.stor_base,
162 get_order(uv_info.guest_base_stor_len));
163 kvm_s390_clear_pv_state(kvm);
164 }
165
kvm_s390_pv_alloc_vm(struct kvm * kvm)166 static int kvm_s390_pv_alloc_vm(struct kvm *kvm)
167 {
168 unsigned long base = uv_info.guest_base_stor_len;
169 unsigned long virt = uv_info.guest_virt_var_stor_len;
170 unsigned long npages = 0, vlen = 0;
171
172 kvm->arch.pv.stor_var = NULL;
173 kvm->arch.pv.stor_base = __get_free_pages(GFP_KERNEL_ACCOUNT, get_order(base));
174 if (!kvm->arch.pv.stor_base)
175 return -ENOMEM;
176
177 /*
178 * Calculate current guest storage for allocation of the
179 * variable storage, which is based on the length in MB.
180 *
181 * Slots are sorted by GFN
182 */
183 mutex_lock(&kvm->slots_lock);
184 npages = kvm_s390_get_gfn_end(kvm_memslots(kvm));
185 mutex_unlock(&kvm->slots_lock);
186
187 kvm->arch.pv.guest_len = npages * PAGE_SIZE;
188
189 /* Allocate variable storage */
190 vlen = ALIGN(virt * ((npages * PAGE_SIZE) / HPAGE_SIZE), PAGE_SIZE);
191 vlen += uv_info.guest_virt_base_stor_len;
192 kvm->arch.pv.stor_var = vzalloc(vlen);
193 if (!kvm->arch.pv.stor_var)
194 goto out_err;
195 return 0;
196
197 out_err:
198 kvm_s390_pv_dealloc_vm(kvm);
199 return -ENOMEM;
200 }
201
202 /**
203 * kvm_s390_pv_dispose_one_leftover - Clean up one leftover protected VM.
204 * @kvm: the KVM that was associated with this leftover protected VM
205 * @leftover: details about the leftover protected VM that needs a clean up
206 * @rc: the RC code of the Destroy Secure Configuration UVC
207 * @rrc: the RRC code of the Destroy Secure Configuration UVC
208 *
209 * Destroy one leftover protected VM.
210 * On success, kvm->mm->context.protected_count will be decremented atomically
211 * and all other resources used by the VM will be freed.
212 *
213 * Return: 0 in case of success, otherwise 1
214 */
kvm_s390_pv_dispose_one_leftover(struct kvm * kvm,struct pv_vm_to_be_destroyed * leftover,u16 * rc,u16 * rrc)215 static int kvm_s390_pv_dispose_one_leftover(struct kvm *kvm,
216 struct pv_vm_to_be_destroyed *leftover,
217 u16 *rc, u16 *rrc)
218 {
219 int cc;
220
221 /* It used the destroy-fast UVC, nothing left to do here */
222 if (!leftover->handle)
223 goto done_fast;
224 cc = uv_cmd_nodata(leftover->handle, UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
225 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY LEFTOVER VM: rc %x rrc %x", *rc, *rrc);
226 WARN_ONCE(cc, "protvirt destroy leftover vm failed rc %x rrc %x", *rc, *rrc);
227 if (cc)
228 return cc;
229 /*
230 * Intentionally leak unusable memory. If the UVC fails, the memory
231 * used for the VM and its metadata is permanently unusable.
232 * This can only happen in case of a serious KVM or hardware bug; it
233 * is not expected to happen in normal operation.
234 */
235 free_pages(leftover->stor_base, get_order(uv_info.guest_base_stor_len));
236 free_pages(leftover->old_gmap_table, CRST_ALLOC_ORDER);
237 vfree(leftover->stor_var);
238 done_fast:
239 atomic_dec(&kvm->mm->context.protected_count);
240 return 0;
241 }
242
243 /**
244 * kvm_s390_destroy_lower_2g - Destroy the first 2GB of protected guest memory.
245 * @kvm: the VM whose memory is to be cleared.
246 *
247 * Destroy the first 2GB of guest memory, to avoid prefix issues after reboot.
248 * The CPUs of the protected VM need to be destroyed beforehand.
249 */
kvm_s390_destroy_lower_2g(struct kvm * kvm)250 static void kvm_s390_destroy_lower_2g(struct kvm *kvm)
251 {
252 const unsigned long pages_2g = SZ_2G / PAGE_SIZE;
253 struct kvm_memory_slot *slot;
254 unsigned long len;
255 int srcu_idx;
256
257 srcu_idx = srcu_read_lock(&kvm->srcu);
258
259 /* Take the memslot containing guest absolute address 0 */
260 slot = gfn_to_memslot(kvm, 0);
261 /* Clear all slots or parts thereof that are below 2GB */
262 while (slot && slot->base_gfn < pages_2g) {
263 len = min_t(u64, slot->npages, pages_2g - slot->base_gfn) * PAGE_SIZE;
264 s390_uv_destroy_range(kvm->mm, slot->userspace_addr, slot->userspace_addr + len);
265 /* Take the next memslot */
266 slot = gfn_to_memslot(kvm, slot->base_gfn + slot->npages);
267 }
268
269 srcu_read_unlock(&kvm->srcu, srcu_idx);
270 }
271
kvm_s390_pv_deinit_vm_fast(struct kvm * kvm,u16 * rc,u16 * rrc)272 static int kvm_s390_pv_deinit_vm_fast(struct kvm *kvm, u16 *rc, u16 *rrc)
273 {
274 struct uv_cb_destroy_fast uvcb = {
275 .header.cmd = UVC_CMD_DESTROY_SEC_CONF_FAST,
276 .header.len = sizeof(uvcb),
277 .handle = kvm_s390_pv_get_handle(kvm),
278 };
279 int cc;
280
281 cc = uv_call_sched(0, (u64)&uvcb);
282 if (rc)
283 *rc = uvcb.header.rc;
284 if (rrc)
285 *rrc = uvcb.header.rrc;
286 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
287 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM FAST: rc %x rrc %x",
288 uvcb.header.rc, uvcb.header.rrc);
289 WARN_ONCE(cc && uvcb.header.rc != 0x104,
290 "protvirt destroy vm fast failed handle %llx rc %x rrc %x",
291 kvm_s390_pv_get_handle(kvm), uvcb.header.rc, uvcb.header.rrc);
292 /* Intended memory leak on "impossible" error */
293 if (!cc)
294 kvm_s390_pv_dealloc_vm(kvm);
295 return cc ? -EIO : 0;
296 }
297
is_destroy_fast_available(void)298 static inline bool is_destroy_fast_available(void)
299 {
300 return test_bit_inv(BIT_UVC_CMD_DESTROY_SEC_CONF_FAST, uv_info.inst_calls_list);
301 }
302
303 /**
304 * kvm_s390_pv_set_aside - Set aside a protected VM for later teardown.
305 * @kvm: the VM
306 * @rc: return value for the RC field of the UVCB
307 * @rrc: return value for the RRC field of the UVCB
308 *
309 * Set aside the protected VM for a subsequent teardown. The VM will be able
310 * to continue immediately as a non-secure VM, and the information needed to
311 * properly tear down the protected VM is set aside. If another protected VM
312 * was already set aside without starting its teardown, this function will
313 * fail.
314 * The CPUs of the protected VM need to be destroyed beforehand.
315 *
316 * Context: kvm->lock needs to be held
317 *
318 * Return: 0 in case of success, -EINVAL if another protected VM was already set
319 * aside, -ENOMEM if the system ran out of memory.
320 */
kvm_s390_pv_set_aside(struct kvm * kvm,u16 * rc,u16 * rrc)321 int kvm_s390_pv_set_aside(struct kvm *kvm, u16 *rc, u16 *rrc)
322 {
323 struct pv_vm_to_be_destroyed *priv;
324 int res = 0;
325
326 lockdep_assert_held(&kvm->lock);
327 /*
328 * If another protected VM was already prepared for teardown, refuse.
329 * A normal deinitialization has to be performed instead.
330 */
331 if (kvm->arch.pv.set_aside)
332 return -EINVAL;
333
334 /* Guest with segment type ASCE, refuse to destroy asynchronously */
335 if ((kvm->arch.gmap->asce & _ASCE_TYPE_MASK) == _ASCE_TYPE_SEGMENT)
336 return -EINVAL;
337
338 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
339 if (!priv)
340 return -ENOMEM;
341
342 if (is_destroy_fast_available()) {
343 res = kvm_s390_pv_deinit_vm_fast(kvm, rc, rrc);
344 } else {
345 priv->stor_var = kvm->arch.pv.stor_var;
346 priv->stor_base = kvm->arch.pv.stor_base;
347 priv->handle = kvm_s390_pv_get_handle(kvm);
348 priv->old_gmap_table = (unsigned long)kvm->arch.gmap->table;
349 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
350 if (s390_replace_asce(kvm->arch.gmap))
351 res = -ENOMEM;
352 }
353
354 if (res) {
355 kfree(priv);
356 return res;
357 }
358
359 kvm_s390_destroy_lower_2g(kvm);
360 kvm_s390_clear_pv_state(kvm);
361 kvm->arch.pv.set_aside = priv;
362
363 *rc = UVC_RC_EXECUTED;
364 *rrc = 42;
365 return 0;
366 }
367
368 /**
369 * kvm_s390_pv_deinit_vm - Deinitialize the current protected VM
370 * @kvm: the KVM whose protected VM needs to be deinitialized
371 * @rc: the RC code of the UVC
372 * @rrc: the RRC code of the UVC
373 *
374 * Deinitialize the current protected VM. This function will destroy and
375 * cleanup the current protected VM, but it will not cleanup the guest
376 * memory. This function should only be called when the protected VM has
377 * just been created and therefore does not have any guest memory, or when
378 * the caller cleans up the guest memory separately.
379 *
380 * This function should not fail, but if it does, the donated memory must
381 * not be freed.
382 *
383 * Context: kvm->lock needs to be held
384 *
385 * Return: 0 in case of success, otherwise -EIO
386 */
kvm_s390_pv_deinit_vm(struct kvm * kvm,u16 * rc,u16 * rrc)387 int kvm_s390_pv_deinit_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
388 {
389 int cc;
390
391 cc = uv_cmd_nodata(kvm_s390_pv_get_handle(kvm),
392 UVC_CMD_DESTROY_SEC_CONF, rc, rrc);
393 WRITE_ONCE(kvm->arch.gmap->guest_handle, 0);
394 if (!cc) {
395 atomic_dec(&kvm->mm->context.protected_count);
396 kvm_s390_pv_dealloc_vm(kvm);
397 } else {
398 /* Intended memory leak on "impossible" error */
399 s390_replace_asce(kvm->arch.gmap);
400 }
401 KVM_UV_EVENT(kvm, 3, "PROTVIRT DESTROY VM: rc %x rrc %x", *rc, *rrc);
402 WARN_ONCE(cc, "protvirt destroy vm failed rc %x rrc %x", *rc, *rrc);
403
404 return cc ? -EIO : 0;
405 }
406
407 /**
408 * kvm_s390_pv_deinit_cleanup_all - Clean up all protected VMs associated
409 * with a specific KVM.
410 * @kvm: the KVM to be cleaned up
411 * @rc: the RC code of the first failing UVC
412 * @rrc: the RRC code of the first failing UVC
413 *
414 * This function will clean up all protected VMs associated with a KVM.
415 * This includes the active one, the one prepared for deinitialization with
416 * kvm_s390_pv_set_aside, and any still pending in the need_cleanup list.
417 *
418 * Context: kvm->lock needs to be held unless being called from
419 * kvm_arch_destroy_vm.
420 *
421 * Return: 0 if all VMs are successfully cleaned up, otherwise -EIO
422 */
kvm_s390_pv_deinit_cleanup_all(struct kvm * kvm,u16 * rc,u16 * rrc)423 int kvm_s390_pv_deinit_cleanup_all(struct kvm *kvm, u16 *rc, u16 *rrc)
424 {
425 struct pv_vm_to_be_destroyed *cur;
426 bool need_zap = false;
427 u16 _rc, _rrc;
428 int cc = 0;
429
430 /*
431 * Nothing to do if the counter was already 0. Otherwise make sure
432 * the counter does not reach 0 before calling s390_uv_destroy_range.
433 */
434 if (!atomic_inc_not_zero(&kvm->mm->context.protected_count))
435 return 0;
436
437 *rc = 1;
438 /* If the current VM is protected, destroy it */
439 if (kvm_s390_pv_get_handle(kvm)) {
440 cc = kvm_s390_pv_deinit_vm(kvm, rc, rrc);
441 need_zap = true;
442 }
443
444 /* If a previous protected VM was set aside, put it in the need_cleanup list */
445 if (kvm->arch.pv.set_aside) {
446 list_add(kvm->arch.pv.set_aside, &kvm->arch.pv.need_cleanup);
447 kvm->arch.pv.set_aside = NULL;
448 }
449
450 /* Cleanup all protected VMs in the need_cleanup list */
451 while (!list_empty(&kvm->arch.pv.need_cleanup)) {
452 cur = list_first_entry(&kvm->arch.pv.need_cleanup, typeof(*cur), list);
453 need_zap = true;
454 if (kvm_s390_pv_dispose_one_leftover(kvm, cur, &_rc, &_rrc)) {
455 cc = 1;
456 /*
457 * Only return the first error rc and rrc, so make
458 * sure it is not overwritten. All destroys will
459 * additionally be reported via KVM_UV_EVENT().
460 */
461 if (*rc == UVC_RC_EXECUTED) {
462 *rc = _rc;
463 *rrc = _rrc;
464 }
465 }
466 list_del(&cur->list);
467 kfree(cur);
468 }
469
470 /*
471 * If the mm still has a mapping, try to mark all its pages as
472 * accessible. The counter should not reach zero before this
473 * cleanup has been performed.
474 */
475 if (need_zap && mmget_not_zero(kvm->mm)) {
476 s390_uv_destroy_range(kvm->mm, 0, TASK_SIZE);
477 mmput(kvm->mm);
478 }
479
480 /* Now the counter can safely reach 0 */
481 atomic_dec(&kvm->mm->context.protected_count);
482 return cc ? -EIO : 0;
483 }
484
485 /**
486 * kvm_s390_pv_deinit_aside_vm - Teardown a previously set aside protected VM.
487 * @kvm: the VM previously associated with the protected VM
488 * @rc: return value for the RC field of the UVCB
489 * @rrc: return value for the RRC field of the UVCB
490 *
491 * Tear down the protected VM that had been previously prepared for teardown
492 * using kvm_s390_pv_set_aside_vm. Ideally this should be called by
493 * userspace asynchronously from a separate thread.
494 *
495 * Context: kvm->lock must not be held.
496 *
497 * Return: 0 in case of success, -EINVAL if no protected VM had been
498 * prepared for asynchronous teardowm, -EIO in case of other errors.
499 */
kvm_s390_pv_deinit_aside_vm(struct kvm * kvm,u16 * rc,u16 * rrc)500 int kvm_s390_pv_deinit_aside_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
501 {
502 struct pv_vm_to_be_destroyed *p;
503 int ret = 0;
504
505 lockdep_assert_not_held(&kvm->lock);
506 mutex_lock(&kvm->lock);
507 p = kvm->arch.pv.set_aside;
508 kvm->arch.pv.set_aside = NULL;
509 mutex_unlock(&kvm->lock);
510 if (!p)
511 return -EINVAL;
512
513 /* When a fatal signal is received, stop immediately */
514 if (s390_uv_destroy_range_interruptible(kvm->mm, 0, TASK_SIZE_MAX))
515 goto done;
516 if (kvm_s390_pv_dispose_one_leftover(kvm, p, rc, rrc))
517 ret = -EIO;
518 kfree(p);
519 p = NULL;
520 done:
521 /*
522 * p is not NULL if we aborted because of a fatal signal, in which
523 * case queue the leftover for later cleanup.
524 */
525 if (p) {
526 mutex_lock(&kvm->lock);
527 list_add(&p->list, &kvm->arch.pv.need_cleanup);
528 mutex_unlock(&kvm->lock);
529 /* Did not finish, but pretend things went well */
530 *rc = UVC_RC_EXECUTED;
531 *rrc = 42;
532 }
533 return ret;
534 }
535
kvm_s390_pv_mmu_notifier_release(struct mmu_notifier * subscription,struct mm_struct * mm)536 static void kvm_s390_pv_mmu_notifier_release(struct mmu_notifier *subscription,
537 struct mm_struct *mm)
538 {
539 struct kvm *kvm = container_of(subscription, struct kvm, arch.pv.mmu_notifier);
540 u16 dummy;
541 int r;
542
543 /*
544 * No locking is needed since this is the last thread of the last user of this
545 * struct mm.
546 * When the struct kvm gets deinitialized, this notifier is also
547 * unregistered. This means that if this notifier runs, then the
548 * struct kvm is still valid.
549 */
550 r = kvm_s390_cpus_from_pv(kvm, &dummy, &dummy);
551 if (!r && is_destroy_fast_available() && kvm_s390_pv_get_handle(kvm))
552 kvm_s390_pv_deinit_vm_fast(kvm, &dummy, &dummy);
553 }
554
555 static const struct mmu_notifier_ops kvm_s390_pv_mmu_notifier_ops = {
556 .release = kvm_s390_pv_mmu_notifier_release,
557 };
558
kvm_s390_pv_init_vm(struct kvm * kvm,u16 * rc,u16 * rrc)559 int kvm_s390_pv_init_vm(struct kvm *kvm, u16 *rc, u16 *rrc)
560 {
561 struct uv_cb_cgc uvcb = {
562 .header.cmd = UVC_CMD_CREATE_SEC_CONF,
563 .header.len = sizeof(uvcb)
564 };
565 int cc, ret;
566 u16 dummy;
567
568 ret = kvm_s390_pv_alloc_vm(kvm);
569 if (ret)
570 return ret;
571
572 /* Inputs */
573 uvcb.guest_stor_origin = 0; /* MSO is 0 for KVM */
574 uvcb.guest_stor_len = kvm->arch.pv.guest_len;
575 uvcb.guest_asce = kvm->arch.gmap->asce;
576 uvcb.guest_sca = virt_to_phys(kvm->arch.sca);
577 uvcb.conf_base_stor_origin =
578 virt_to_phys((void *)kvm->arch.pv.stor_base);
579 uvcb.conf_virt_stor_origin = (u64)kvm->arch.pv.stor_var;
580 uvcb.flags.ap_allow_instr = kvm->arch.model.uv_feat_guest.ap;
581 uvcb.flags.ap_instr_intr = kvm->arch.model.uv_feat_guest.ap_intr;
582
583 cc = uv_call_sched(0, (u64)&uvcb);
584 *rc = uvcb.header.rc;
585 *rrc = uvcb.header.rrc;
586 KVM_UV_EVENT(kvm, 3, "PROTVIRT CREATE VM: handle %llx len %llx rc %x rrc %x flags %04x",
587 uvcb.guest_handle, uvcb.guest_stor_len, *rc, *rrc, uvcb.flags.raw);
588
589 /* Outputs */
590 kvm->arch.pv.handle = uvcb.guest_handle;
591
592 atomic_inc(&kvm->mm->context.protected_count);
593 if (cc) {
594 if (uvcb.header.rc & UVC_RC_NEED_DESTROY) {
595 kvm_s390_pv_deinit_vm(kvm, &dummy, &dummy);
596 } else {
597 atomic_dec(&kvm->mm->context.protected_count);
598 kvm_s390_pv_dealloc_vm(kvm);
599 }
600 return -EIO;
601 }
602 kvm->arch.gmap->guest_handle = uvcb.guest_handle;
603 /* Add the notifier only once. No races because we hold kvm->lock */
604 if (kvm->arch.pv.mmu_notifier.ops != &kvm_s390_pv_mmu_notifier_ops) {
605 kvm->arch.pv.mmu_notifier.ops = &kvm_s390_pv_mmu_notifier_ops;
606 mmu_notifier_register(&kvm->arch.pv.mmu_notifier, kvm->mm);
607 }
608 return 0;
609 }
610
kvm_s390_pv_set_sec_parms(struct kvm * kvm,void * hdr,u64 length,u16 * rc,u16 * rrc)611 int kvm_s390_pv_set_sec_parms(struct kvm *kvm, void *hdr, u64 length, u16 *rc,
612 u16 *rrc)
613 {
614 struct uv_cb_ssc uvcb = {
615 .header.cmd = UVC_CMD_SET_SEC_CONF_PARAMS,
616 .header.len = sizeof(uvcb),
617 .sec_header_origin = (u64)hdr,
618 .sec_header_len = length,
619 .guest_handle = kvm_s390_pv_get_handle(kvm),
620 };
621 int cc = uv_call(0, (u64)&uvcb);
622
623 *rc = uvcb.header.rc;
624 *rrc = uvcb.header.rrc;
625 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM SET PARMS: rc %x rrc %x",
626 *rc, *rrc);
627 return cc ? -EINVAL : 0;
628 }
629
unpack_one(struct kvm * kvm,unsigned long addr,u64 tweak,u64 offset,u16 * rc,u16 * rrc)630 static int unpack_one(struct kvm *kvm, unsigned long addr, u64 tweak,
631 u64 offset, u16 *rc, u16 *rrc)
632 {
633 struct uv_cb_unp uvcb = {
634 .header.cmd = UVC_CMD_UNPACK_IMG,
635 .header.len = sizeof(uvcb),
636 .guest_handle = kvm_s390_pv_get_handle(kvm),
637 .gaddr = addr,
638 .tweak[0] = tweak,
639 .tweak[1] = offset,
640 };
641 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb);
642 unsigned long vmaddr;
643 bool unlocked;
644
645 *rc = uvcb.header.rc;
646 *rrc = uvcb.header.rrc;
647
648 if (ret == -ENXIO) {
649 mmap_read_lock(kvm->mm);
650 vmaddr = gfn_to_hva(kvm, gpa_to_gfn(addr));
651 if (kvm_is_error_hva(vmaddr)) {
652 ret = -EFAULT;
653 } else {
654 ret = fixup_user_fault(kvm->mm, vmaddr, FAULT_FLAG_WRITE, &unlocked);
655 if (!ret)
656 ret = __gmap_link(kvm->arch.gmap, addr, vmaddr);
657 }
658 mmap_read_unlock(kvm->mm);
659 if (!ret)
660 return -EAGAIN;
661 return ret;
662 }
663
664 if (ret && ret != -EAGAIN)
665 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: failed addr %llx with rc %x rrc %x",
666 uvcb.gaddr, *rc, *rrc);
667 return ret;
668 }
669
kvm_s390_pv_unpack(struct kvm * kvm,unsigned long addr,unsigned long size,unsigned long tweak,u16 * rc,u16 * rrc)670 int kvm_s390_pv_unpack(struct kvm *kvm, unsigned long addr, unsigned long size,
671 unsigned long tweak, u16 *rc, u16 *rrc)
672 {
673 u64 offset = 0;
674 int ret = 0;
675
676 if (addr & ~PAGE_MASK || !size || size & ~PAGE_MASK)
677 return -EINVAL;
678
679 KVM_UV_EVENT(kvm, 3, "PROTVIRT VM UNPACK: start addr %lx size %lx",
680 addr, size);
681
682 guard(srcu)(&kvm->srcu);
683
684 while (offset < size) {
685 ret = unpack_one(kvm, addr, tweak, offset, rc, rrc);
686 if (ret == -EAGAIN) {
687 cond_resched();
688 if (fatal_signal_pending(current))
689 break;
690 continue;
691 }
692 if (ret)
693 break;
694 addr += PAGE_SIZE;
695 offset += PAGE_SIZE;
696 }
697 if (!ret)
698 KVM_UV_EVENT(kvm, 3, "%s", "PROTVIRT VM UNPACK: successful");
699 return ret;
700 }
701
kvm_s390_pv_set_cpu_state(struct kvm_vcpu * vcpu,u8 state)702 int kvm_s390_pv_set_cpu_state(struct kvm_vcpu *vcpu, u8 state)
703 {
704 struct uv_cb_cpu_set_state uvcb = {
705 .header.cmd = UVC_CMD_CPU_SET_STATE,
706 .header.len = sizeof(uvcb),
707 .cpu_handle = kvm_s390_pv_cpu_get_handle(vcpu),
708 .state = state,
709 };
710 int cc;
711
712 cc = uv_call(0, (u64)&uvcb);
713 KVM_UV_EVENT(vcpu->kvm, 3, "PROTVIRT SET CPU %d STATE %d rc %x rrc %x",
714 vcpu->vcpu_id, state, uvcb.header.rc, uvcb.header.rrc);
715 if (cc)
716 return -EINVAL;
717 return 0;
718 }
719
kvm_s390_pv_dump_cpu(struct kvm_vcpu * vcpu,void * buff,u16 * rc,u16 * rrc)720 int kvm_s390_pv_dump_cpu(struct kvm_vcpu *vcpu, void *buff, u16 *rc, u16 *rrc)
721 {
722 struct uv_cb_dump_cpu uvcb = {
723 .header.cmd = UVC_CMD_DUMP_CPU,
724 .header.len = sizeof(uvcb),
725 .cpu_handle = vcpu->arch.pv.handle,
726 .dump_area_origin = (u64)buff,
727 };
728 int cc;
729
730 cc = uv_call_sched(0, (u64)&uvcb);
731 *rc = uvcb.header.rc;
732 *rrc = uvcb.header.rrc;
733 return cc;
734 }
735
736 /* Size of the cache for the storage state dump data. 1MB for now */
737 #define DUMP_BUFF_LEN HPAGE_SIZE
738
739 /**
740 * kvm_s390_pv_dump_stor_state
741 *
742 * @kvm: pointer to the guest's KVM struct
743 * @buff_user: Userspace pointer where we will write the results to
744 * @gaddr: Starting absolute guest address for which the storage state
745 * is requested.
746 * @buff_user_len: Length of the buff_user buffer
747 * @rc: Pointer to where the uvcb return code is stored
748 * @rrc: Pointer to where the uvcb return reason code is stored
749 *
750 * Stores buff_len bytes of tweak component values to buff_user
751 * starting with the 1MB block specified by the absolute guest address
752 * (gaddr). The gaddr pointer will be updated with the last address
753 * for which data was written when returning to userspace. buff_user
754 * might be written to even if an error rc is returned. For instance
755 * if we encounter a fault after writing the first page of data.
756 *
757 * Context: kvm->lock needs to be held
758 *
759 * Return:
760 * 0 on success
761 * -ENOMEM if allocating the cache fails
762 * -EINVAL if gaddr is not aligned to 1MB
763 * -EINVAL if buff_user_len is not aligned to uv_info.conf_dump_storage_state_len
764 * -EINVAL if the UV call fails, rc and rrc will be set in this case
765 * -EFAULT if copying the result to buff_user failed
766 */
kvm_s390_pv_dump_stor_state(struct kvm * kvm,void __user * buff_user,u64 * gaddr,u64 buff_user_len,u16 * rc,u16 * rrc)767 int kvm_s390_pv_dump_stor_state(struct kvm *kvm, void __user *buff_user,
768 u64 *gaddr, u64 buff_user_len, u16 *rc, u16 *rrc)
769 {
770 struct uv_cb_dump_stor_state uvcb = {
771 .header.cmd = UVC_CMD_DUMP_CONF_STOR_STATE,
772 .header.len = sizeof(uvcb),
773 .config_handle = kvm->arch.pv.handle,
774 .gaddr = *gaddr,
775 .dump_area_origin = 0,
776 };
777 const u64 increment_len = uv_info.conf_dump_storage_state_len;
778 size_t buff_kvm_size;
779 size_t size_done = 0;
780 u8 *buff_kvm = NULL;
781 int cc, ret;
782
783 ret = -EINVAL;
784 /* UV call processes 1MB guest storage chunks at a time */
785 if (!IS_ALIGNED(*gaddr, HPAGE_SIZE))
786 goto out;
787
788 /*
789 * We provide the storage state for 1MB chunks of guest
790 * storage. The buffer will need to be aligned to
791 * conf_dump_storage_state_len so we don't end on a partial
792 * chunk.
793 */
794 if (!buff_user_len ||
795 !IS_ALIGNED(buff_user_len, increment_len))
796 goto out;
797
798 /*
799 * Allocate a buffer from which we will later copy to the user
800 * process. We don't want userspace to dictate our buffer size
801 * so we limit it to DUMP_BUFF_LEN.
802 */
803 ret = -ENOMEM;
804 buff_kvm_size = min_t(u64, buff_user_len, DUMP_BUFF_LEN);
805 buff_kvm = vzalloc(buff_kvm_size);
806 if (!buff_kvm)
807 goto out;
808
809 ret = 0;
810 uvcb.dump_area_origin = (u64)buff_kvm;
811 /* We will loop until the user buffer is filled or an error occurs */
812 do {
813 /* Get 1MB worth of guest storage state data */
814 cc = uv_call_sched(0, (u64)&uvcb);
815
816 /* All or nothing */
817 if (cc) {
818 ret = -EINVAL;
819 break;
820 }
821
822 size_done += increment_len;
823 uvcb.dump_area_origin += increment_len;
824 buff_user_len -= increment_len;
825 uvcb.gaddr += HPAGE_SIZE;
826
827 /* KVM Buffer full, time to copy to the process */
828 if (!buff_user_len || size_done == DUMP_BUFF_LEN) {
829 if (copy_to_user(buff_user, buff_kvm, size_done)) {
830 ret = -EFAULT;
831 break;
832 }
833
834 buff_user += size_done;
835 size_done = 0;
836 uvcb.dump_area_origin = (u64)buff_kvm;
837 }
838 } while (buff_user_len);
839
840 /* Report back where we ended dumping */
841 *gaddr = uvcb.gaddr;
842
843 /* Lets only log errors, we don't want to spam */
844 out:
845 if (ret)
846 KVM_UV_EVENT(kvm, 3,
847 "PROTVIRT DUMP STORAGE STATE: addr %llx ret %d, uvcb rc %x rrc %x",
848 uvcb.gaddr, ret, uvcb.header.rc, uvcb.header.rrc);
849 *rc = uvcb.header.rc;
850 *rrc = uvcb.header.rrc;
851 vfree(buff_kvm);
852
853 return ret;
854 }
855
856 /**
857 * kvm_s390_pv_dump_complete
858 *
859 * @kvm: pointer to the guest's KVM struct
860 * @buff_user: Userspace pointer where we will write the results to
861 * @rc: Pointer to where the uvcb return code is stored
862 * @rrc: Pointer to where the uvcb return reason code is stored
863 *
864 * Completes the dumping operation and writes the completion data to
865 * user space.
866 *
867 * Context: kvm->lock needs to be held
868 *
869 * Return:
870 * 0 on success
871 * -ENOMEM if allocating the completion buffer fails
872 * -EINVAL if the UV call fails, rc and rrc will be set in this case
873 * -EFAULT if copying the result to buff_user failed
874 */
kvm_s390_pv_dump_complete(struct kvm * kvm,void __user * buff_user,u16 * rc,u16 * rrc)875 int kvm_s390_pv_dump_complete(struct kvm *kvm, void __user *buff_user,
876 u16 *rc, u16 *rrc)
877 {
878 struct uv_cb_dump_complete complete = {
879 .header.len = sizeof(complete),
880 .header.cmd = UVC_CMD_DUMP_COMPLETE,
881 .config_handle = kvm_s390_pv_get_handle(kvm),
882 };
883 u64 *compl_data;
884 int ret;
885
886 /* Allocate dump area */
887 compl_data = vzalloc(uv_info.conf_dump_finalize_len);
888 if (!compl_data)
889 return -ENOMEM;
890 complete.dump_area_origin = (u64)compl_data;
891
892 ret = uv_call_sched(0, (u64)&complete);
893 *rc = complete.header.rc;
894 *rrc = complete.header.rrc;
895 KVM_UV_EVENT(kvm, 3, "PROTVIRT DUMP COMPLETE: rc %x rrc %x",
896 complete.header.rc, complete.header.rrc);
897
898 if (!ret) {
899 /*
900 * kvm_s390_pv_dealloc_vm() will also (mem)set
901 * this to false on a reboot or other destroy
902 * operation for this vm.
903 */
904 kvm->arch.pv.dumping = false;
905 kvm_s390_vcpu_unblock_all(kvm);
906 ret = copy_to_user(buff_user, compl_data, uv_info.conf_dump_finalize_len);
907 if (ret)
908 ret = -EFAULT;
909 }
910 vfree(compl_data);
911 /* If the UVC returned an error, translate it to -EINVAL */
912 if (ret > 0)
913 ret = -EINVAL;
914 return ret;
915 }
916