1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Guest memory management for KVM/s390 nested VMs. 4 * 5 * Copyright IBM Corp. 2008, 2020, 2024 6 * 7 * Author(s): Claudio Imbrenda <imbrenda@linux.ibm.com> 8 * Martin Schwidefsky <schwidefsky@de.ibm.com> 9 * David Hildenbrand <david@redhat.com> 10 * Janosch Frank <frankja@linux.vnet.ibm.com> 11 */ 12 13 #include <linux/compiler.h> 14 #include <linux/kvm.h> 15 #include <linux/kvm_host.h> 16 #include <linux/pgtable.h> 17 #include <linux/pagemap.h> 18 #include <linux/mman.h> 19 20 #include <asm/lowcore.h> 21 #include <asm/gmap.h> 22 #include <asm/uv.h> 23 24 #include "kvm-s390.h" 25 #include "gmap.h" 26 27 /** 28 * gmap_find_shadow - find a specific asce in the list of shadow tables 29 * @parent: pointer to the parent gmap 30 * @asce: ASCE for which the shadow table is created 31 * @edat_level: edat level to be used for the shadow translation 32 * 33 * Returns the pointer to a gmap if a shadow table with the given asce is 34 * already available, ERR_PTR(-EAGAIN) if another one is just being created, 35 * otherwise NULL 36 * 37 * Context: Called with parent->shadow_lock held 38 */ 39 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce, int edat_level) 40 { 41 struct gmap *sg; 42 43 lockdep_assert_held(&parent->shadow_lock); 44 list_for_each_entry(sg, &parent->children, list) { 45 if (!gmap_shadow_valid(sg, asce, edat_level)) 46 continue; 47 if (!sg->initialized) 48 return ERR_PTR(-EAGAIN); 49 refcount_inc(&sg->ref_count); 50 return sg; 51 } 52 return NULL; 53 } 54 55 /** 56 * gmap_shadow - create/find a shadow guest address space 57 * @parent: pointer to the parent gmap 58 * @asce: ASCE for which the shadow table is created 59 * @edat_level: edat level to be used for the shadow translation 60 * 61 * The pages of the top level page table referred by the asce parameter 62 * will be set to read-only and marked in the PGSTEs of the kvm process. 63 * The shadow table will be removed automatically on any change to the 64 * PTE mapping for the source table. 65 * 66 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory, 67 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the 68 * parent gmap table could not be protected. 69 */ 70 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, int edat_level) 71 { 72 struct gmap *sg, *new; 73 unsigned long limit; 74 int rc; 75 76 if (KVM_BUG_ON(parent->mm->context.allow_gmap_hpage_1m, (struct kvm *)parent->private) || 77 KVM_BUG_ON(gmap_is_shadow(parent), (struct kvm *)parent->private)) 78 return ERR_PTR(-EFAULT); 79 spin_lock(&parent->shadow_lock); 80 sg = gmap_find_shadow(parent, asce, edat_level); 81 spin_unlock(&parent->shadow_lock); 82 if (sg) 83 return sg; 84 /* Create a new shadow gmap */ 85 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11)); 86 if (asce & _ASCE_REAL_SPACE) 87 limit = -1UL; 88 new = gmap_alloc(limit); 89 if (!new) 90 return ERR_PTR(-ENOMEM); 91 new->mm = parent->mm; 92 new->parent = gmap_get(parent); 93 new->private = parent->private; 94 new->orig_asce = asce; 95 new->edat_level = edat_level; 96 new->initialized = false; 97 spin_lock(&parent->shadow_lock); 98 /* Recheck if another CPU created the same shadow */ 99 sg = gmap_find_shadow(parent, asce, edat_level); 100 if (sg) { 101 spin_unlock(&parent->shadow_lock); 102 gmap_free(new); 103 return sg; 104 } 105 if (asce & _ASCE_REAL_SPACE) { 106 /* only allow one real-space gmap shadow */ 107 list_for_each_entry(sg, &parent->children, list) { 108 if (sg->orig_asce & _ASCE_REAL_SPACE) { 109 spin_lock(&sg->guest_table_lock); 110 gmap_unshadow(sg); 111 spin_unlock(&sg->guest_table_lock); 112 list_del(&sg->list); 113 gmap_put(sg); 114 break; 115 } 116 } 117 } 118 refcount_set(&new->ref_count, 2); 119 list_add(&new->list, &parent->children); 120 if (asce & _ASCE_REAL_SPACE) { 121 /* nothing to protect, return right away */ 122 new->initialized = true; 123 spin_unlock(&parent->shadow_lock); 124 return new; 125 } 126 spin_unlock(&parent->shadow_lock); 127 /* protect after insertion, so it will get properly invalidated */ 128 mmap_read_lock(parent->mm); 129 rc = __kvm_s390_mprotect_many(parent, asce & _ASCE_ORIGIN, 130 ((asce & _ASCE_TABLE_LENGTH) + 1), 131 PROT_READ, GMAP_NOTIFY_SHADOW); 132 mmap_read_unlock(parent->mm); 133 spin_lock(&parent->shadow_lock); 134 new->initialized = true; 135 if (rc) { 136 list_del(&new->list); 137 gmap_free(new); 138 new = ERR_PTR(rc); 139 } 140 spin_unlock(&parent->shadow_lock); 141 return new; 142 } 143