pv.c (1260ed77798502de9c98020040d2995008de10cc) | pv.c (d6c8097803cbc3bb8d875baef542e6d77d10c203) |
---|---|
1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Hosting Protected Virtual Machines 4 * 5 * Copyright IBM Corp. 2019, 2020 6 * Author(s): Janosch Frank <frankja@linux.ibm.com> 7 */ 8#include <linux/kvm.h> 9#include <linux/kvm_host.h> 10#include <linux/minmax.h> 11#include <linux/pagemap.h> 12#include <linux/sched/signal.h> 13#include <asm/gmap.h> 14#include <asm/uv.h> 15#include <asm/mman.h> 16#include <linux/pagewalk.h> 17#include <linux/sched/mm.h> 18#include <linux/mmu_notifier.h> 19#include "kvm-s390.h" | 1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Hosting Protected Virtual Machines 4 * 5 * Copyright IBM Corp. 2019, 2020 6 * Author(s): Janosch Frank <frankja@linux.ibm.com> 7 */ 8#include <linux/kvm.h> 9#include <linux/kvm_host.h> 10#include <linux/minmax.h> 11#include <linux/pagemap.h> 12#include <linux/sched/signal.h> 13#include <asm/gmap.h> 14#include <asm/uv.h> 15#include <asm/mman.h> 16#include <linux/pagewalk.h> 17#include <linux/sched/mm.h> 18#include <linux/mmu_notifier.h> 19#include "kvm-s390.h" |
20#include "gmap.h" | |
21 22bool kvm_s390_pv_is_protected(struct kvm *kvm) 23{ 24 lockdep_assert_held(&kvm->lock); 25 return !!kvm_s390_pv_get_handle(kvm); 26} 27EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected); 28 29bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu) 30{ 31 lockdep_assert_held(&vcpu->mutex); 32 return !!kvm_s390_pv_cpu_get_handle(vcpu); 33} 34EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected); 35 36/** | 20 21bool kvm_s390_pv_is_protected(struct kvm *kvm) 22{ 23 lockdep_assert_held(&kvm->lock); 24 return !!kvm_s390_pv_get_handle(kvm); 25} 26EXPORT_SYMBOL_GPL(kvm_s390_pv_is_protected); 27 28bool kvm_s390_pv_cpu_is_protected(struct kvm_vcpu *vcpu) 29{ 30 lockdep_assert_held(&vcpu->mutex); 31 return !!kvm_s390_pv_cpu_get_handle(vcpu); 32} 33EXPORT_SYMBOL_GPL(kvm_s390_pv_cpu_is_protected); 34 35/** |
36 * kvm_s390_pv_make_secure() - make one guest page secure 37 * @kvm: the guest 38 * @gaddr: the guest address that needs to be made secure 39 * @uvcb: the UVCB specifying which operation needs to be performed 40 * 41 * Context: needs to be called with kvm->srcu held. 42 * Return: 0 on success, < 0 in case of error. 43 */ 44int kvm_s390_pv_make_secure(struct kvm *kvm, unsigned long gaddr, void *uvcb) 45{ 46 unsigned long vmaddr; 47 48 lockdep_assert_held(&kvm->srcu); 49 50 vmaddr = gfn_to_hva(kvm, gpa_to_gfn(gaddr)); 51 if (kvm_is_error_hva(vmaddr)) 52 return -EFAULT; 53 return make_hva_secure(kvm->mm, vmaddr, uvcb); 54} 55 56int kvm_s390_pv_convert_to_secure(struct kvm *kvm, unsigned long gaddr) 57{ 58 struct uv_cb_cts uvcb = { 59 .header.cmd = UVC_CMD_CONV_TO_SEC_STOR, 60 .header.len = sizeof(uvcb), 61 .guest_handle = kvm_s390_pv_get_handle(kvm), 62 .gaddr = gaddr, 63 }; 64 65 return kvm_s390_pv_make_secure(kvm, gaddr, &uvcb); 66} 67 68/** 69 * kvm_s390_pv_destroy_page() - Destroy a guest page. 70 * @kvm: the guest 71 * @gaddr: the guest address to destroy 72 * 73 * An attempt will be made to destroy the given guest page. If the attempt 74 * fails, an attempt is made to export the page. If both attempts fail, an 75 * appropriate error is returned. 76 * 77 * Context: may sleep. 78 */ 79int kvm_s390_pv_destroy_page(struct kvm *kvm, unsigned long gaddr) 80{ 81 struct page *page; 82 int rc = 0; 83 84 mmap_read_lock(kvm->mm); 85 page = gfn_to_page(kvm, gpa_to_gfn(gaddr)); 86 if (page) 87 rc = __kvm_s390_pv_destroy_page(page); 88 kvm_release_page_clean(page); 89 mmap_read_unlock(kvm->mm); 90 return rc; 91} 92 93/** |
|
37 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to 38 * be destroyed 39 * 40 * @list: list head for the list of leftover VMs 41 * @old_gmap_table: the gmap table of the leftover protected VM 42 * @handle: the handle of the leftover protected VM 43 * @stor_var: pointer to the variable storage of the leftover protected VM 44 * @stor_base: address of the base storage of the leftover protected VM --- 588 unchanged lines hidden (view full) --- 633 struct uv_cb_unp uvcb = { 634 .header.cmd = UVC_CMD_UNPACK_IMG, 635 .header.len = sizeof(uvcb), 636 .guest_handle = kvm_s390_pv_get_handle(kvm), 637 .gaddr = addr, 638 .tweak[0] = tweak, 639 .tweak[1] = offset, 640 }; | 94 * struct pv_vm_to_be_destroyed - Represents a protected VM that needs to 95 * be destroyed 96 * 97 * @list: list head for the list of leftover VMs 98 * @old_gmap_table: the gmap table of the leftover protected VM 99 * @handle: the handle of the leftover protected VM 100 * @stor_var: pointer to the variable storage of the leftover protected VM 101 * @stor_base: address of the base storage of the leftover protected VM --- 588 unchanged lines hidden (view full) --- 690 struct uv_cb_unp uvcb = { 691 .header.cmd = UVC_CMD_UNPACK_IMG, 692 .header.len = sizeof(uvcb), 693 .guest_handle = kvm_s390_pv_get_handle(kvm), 694 .gaddr = addr, 695 .tweak[0] = tweak, 696 .tweak[1] = offset, 697 }; |
641 int ret = gmap_make_secure(kvm->arch.gmap, addr, &uvcb); | 698 int ret = kvm_s390_pv_make_secure(kvm, addr, &uvcb); |
642 unsigned long vmaddr; 643 bool unlocked; 644 645 *rc = uvcb.header.rc; 646 *rrc = uvcb.header.rrc; 647 648 if (ret == -ENXIO) { 649 mmap_read_lock(kvm->mm); --- 266 unchanged lines hidden --- | 699 unsigned long vmaddr; 700 bool unlocked; 701 702 *rc = uvcb.header.rc; 703 *rrc = uvcb.header.rrc; 704 705 if (ret == -ENXIO) { 706 mmap_read_lock(kvm->mm); --- 266 unchanged lines hidden --- |