1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Debug helper used to dump the stage-2 pagetables of the system and their 4 * associated permissions. 5 * 6 * Copyright (C) Google, 2024 7 * Author: Sebastian Ene <sebastianene@google.com> 8 */ 9 #include <linux/debugfs.h> 10 #include <linux/kvm_host.h> 11 #include <linux/seq_file.h> 12 13 #include <asm/kvm_mmu.h> 14 #include <asm/kvm_pgtable.h> 15 #include <asm/ptdump.h> 16 17 #define MARKERS_LEN 2 18 #define KVM_PGTABLE_MAX_LEVELS (KVM_PGTABLE_LAST_LEVEL + 1) 19 20 struct kvm_ptdump_guest_state { 21 struct kvm *kvm; 22 struct ptdump_pg_state parser_state; 23 struct addr_marker ipa_marker[MARKERS_LEN]; 24 struct ptdump_pg_level level[KVM_PGTABLE_MAX_LEVELS]; 25 struct ptdump_range range[MARKERS_LEN]; 26 }; 27 28 static const struct ptdump_prot_bits stage2_pte_bits[] = { 29 { 30 .mask = PTE_VALID, 31 .val = PTE_VALID, 32 .set = " ", 33 .clear = "F", 34 }, 35 { 36 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R, 37 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R, 38 .set = "R", 39 .clear = " ", 40 }, 41 { 42 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, 43 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W, 44 .set = "W", 45 .clear = " ", 46 }, 47 { 48 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 49 .val = 0b00UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 50 .set = "px ux ", 51 }, 52 { 53 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 54 .val = 0b01UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 55 .set = "PXNux ", 56 }, 57 { 58 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 59 .val = 0b10UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 60 .set = "PXNUXN", 61 }, 62 { 63 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN, 64 .val = 0b11UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN), 65 .set = "px UXN", 66 }, 67 { 68 .mask = KVM_PTE_LEAF_ATTR_LO_S2_AF, 69 .val = KVM_PTE_LEAF_ATTR_LO_S2_AF, 70 .set = "AF", 71 .clear = " ", 72 }, 73 { 74 .mask = PMD_TYPE_MASK, 75 .val = PMD_TYPE_SECT, 76 .set = "BLK", 77 .clear = " ", 78 }, 79 }; 80 81 static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx, 82 enum kvm_pgtable_walk_flags visit) 83 { 84 struct ptdump_pg_state *st = ctx->arg; 85 struct ptdump_state *pt_st = &st->ptdump; 86 87 note_page(pt_st, ctx->addr, ctx->level, ctx->old); 88 89 return 0; 90 } 91 92 static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl) 93 { 94 u32 i; 95 u64 mask; 96 97 if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL)) 98 return -EINVAL; 99 100 mask = 0; 101 for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++) 102 mask |= stage2_pte_bits[i].mask; 103 104 for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) { 105 snprintf(level[i].name, sizeof(level[i].name), "%u", i); 106 107 level[i].num = ARRAY_SIZE(stage2_pte_bits); 108 level[i].bits = stage2_pte_bits; 109 level[i].mask = mask; 110 } 111 112 return 0; 113 } 114 115 static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm *kvm) 116 { 117 struct kvm_ptdump_guest_state *st; 118 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; 119 struct kvm_pgtable *pgtable = mmu->pgt; 120 int ret; 121 122 st = kzalloc(sizeof(struct kvm_ptdump_guest_state), GFP_KERNEL_ACCOUNT); 123 if (!st) 124 return ERR_PTR(-ENOMEM); 125 126 ret = kvm_ptdump_build_levels(&st->level[0], pgtable->start_level); 127 if (ret) { 128 kfree(st); 129 return ERR_PTR(ret); 130 } 131 132 st->ipa_marker[0].name = "Guest IPA"; 133 st->ipa_marker[1].start_address = BIT(pgtable->ia_bits); 134 st->range[0].end = BIT(pgtable->ia_bits); 135 136 st->kvm = kvm; 137 st->parser_state = (struct ptdump_pg_state) { 138 .marker = &st->ipa_marker[0], 139 .level = -1, 140 .pg_level = &st->level[0], 141 .ptdump.range = &st->range[0], 142 .start_address = 0, 143 }; 144 145 return st; 146 } 147 148 static int kvm_ptdump_guest_show(struct seq_file *m, void *unused) 149 { 150 int ret; 151 struct kvm_ptdump_guest_state *st = m->private; 152 struct kvm *kvm = st->kvm; 153 struct kvm_s2_mmu *mmu = &kvm->arch.mmu; 154 struct ptdump_pg_state *parser_state = &st->parser_state; 155 struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) { 156 .cb = kvm_ptdump_visitor, 157 .arg = parser_state, 158 .flags = KVM_PGTABLE_WALK_LEAF, 159 }; 160 161 parser_state->seq = m; 162 163 write_lock(&kvm->mmu_lock); 164 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker); 165 write_unlock(&kvm->mmu_lock); 166 167 return ret; 168 } 169 170 static int kvm_ptdump_guest_open(struct inode *m, struct file *file) 171 { 172 struct kvm *kvm = m->i_private; 173 struct kvm_ptdump_guest_state *st; 174 int ret; 175 176 if (!kvm_get_kvm_safe(kvm)) 177 return -ENOENT; 178 179 st = kvm_ptdump_parser_create(kvm); 180 if (IS_ERR(st)) { 181 ret = PTR_ERR(st); 182 goto err_with_kvm_ref; 183 } 184 185 ret = single_open(file, kvm_ptdump_guest_show, st); 186 if (!ret) 187 return 0; 188 189 kfree(st); 190 err_with_kvm_ref: 191 kvm_put_kvm(kvm); 192 return ret; 193 } 194 195 static int kvm_ptdump_guest_close(struct inode *m, struct file *file) 196 { 197 struct kvm *kvm = m->i_private; 198 void *st = ((struct seq_file *)file->private_data)->private; 199 200 kfree(st); 201 kvm_put_kvm(kvm); 202 203 return single_release(m, file); 204 } 205 206 static const struct file_operations kvm_ptdump_guest_fops = { 207 .open = kvm_ptdump_guest_open, 208 .read = seq_read, 209 .llseek = seq_lseek, 210 .release = kvm_ptdump_guest_close, 211 }; 212 213 static int kvm_pgtable_range_show(struct seq_file *m, void *unused) 214 { 215 struct kvm_pgtable *pgtable = m->private; 216 217 seq_printf(m, "%2u\n", pgtable->ia_bits); 218 return 0; 219 } 220 221 static int kvm_pgtable_levels_show(struct seq_file *m, void *unused) 222 { 223 struct kvm_pgtable *pgtable = m->private; 224 225 seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - pgtable->start_level); 226 return 0; 227 } 228 229 static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file, 230 int (*show)(struct seq_file *, void *)) 231 { 232 struct kvm *kvm = m->i_private; 233 struct kvm_pgtable *pgtable; 234 int ret; 235 236 if (!kvm_get_kvm_safe(kvm)) 237 return -ENOENT; 238 239 pgtable = kvm->arch.mmu.pgt; 240 241 ret = single_open(file, show, pgtable); 242 if (ret < 0) 243 kvm_put_kvm(kvm); 244 return ret; 245 } 246 247 static int kvm_pgtable_range_open(struct inode *m, struct file *file) 248 { 249 return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show); 250 } 251 252 static int kvm_pgtable_levels_open(struct inode *m, struct file *file) 253 { 254 return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show); 255 } 256 257 static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file) 258 { 259 struct kvm *kvm = m->i_private; 260 261 kvm_put_kvm(kvm); 262 return single_release(m, file); 263 } 264 265 static const struct file_operations kvm_pgtable_range_fops = { 266 .open = kvm_pgtable_range_open, 267 .read = seq_read, 268 .llseek = seq_lseek, 269 .release = kvm_pgtable_debugfs_close, 270 }; 271 272 static const struct file_operations kvm_pgtable_levels_fops = { 273 .open = kvm_pgtable_levels_open, 274 .read = seq_read, 275 .llseek = seq_lseek, 276 .release = kvm_pgtable_debugfs_close, 277 }; 278 279 void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) 280 { 281 debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry, 282 kvm, &kvm_ptdump_guest_fops); 283 debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry, kvm, 284 &kvm_pgtable_range_fops); 285 debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry, 286 kvm, &kvm_pgtable_levels_fops); 287 } 288