1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Debug helper used to dump the stage-2 pagetables of the system and their
4 * associated permissions.
5 *
6 * Copyright (C) Google, 2024
7 * Author: Sebastian Ene <sebastianene@google.com>
8 */
9 #include <linux/debugfs.h>
10 #include <linux/kvm_host.h>
11 #include <linux/seq_file.h>
12
13 #include <asm/kvm_mmu.h>
14 #include <asm/kvm_pgtable.h>
15 #include <asm/ptdump.h>
16
17 #define MARKERS_LEN 2
18 #define KVM_PGTABLE_MAX_LEVELS (KVM_PGTABLE_LAST_LEVEL + 1)
19
20 struct kvm_ptdump_guest_state {
21 struct kvm *kvm;
22 struct ptdump_pg_state parser_state;
23 struct addr_marker ipa_marker[MARKERS_LEN];
24 struct ptdump_pg_level level[KVM_PGTABLE_MAX_LEVELS];
25 struct ptdump_range range[MARKERS_LEN];
26 };
27
28 static const struct ptdump_prot_bits stage2_pte_bits[] = {
29 {
30 .mask = PTE_VALID,
31 .val = PTE_VALID,
32 .set = " ",
33 .clear = "F",
34 }, {
35 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
36 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | PTE_VALID,
37 .set = "R",
38 .clear = " ",
39 }, {
40 .mask = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
41 .val = KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | PTE_VALID,
42 .set = "W",
43 .clear = " ",
44 }, {
45 .mask = KVM_PTE_LEAF_ATTR_HI_S2_XN | PTE_VALID,
46 .val = PTE_VALID,
47 .set = " ",
48 .clear = "X",
49 }, {
50 .mask = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
51 .val = KVM_PTE_LEAF_ATTR_LO_S2_AF | PTE_VALID,
52 .set = "AF",
53 .clear = " ",
54 }, {
55 .mask = PTE_TABLE_BIT | PTE_VALID,
56 .val = PTE_VALID,
57 .set = "BLK",
58 .clear = " ",
59 },
60 };
61
kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx * ctx,enum kvm_pgtable_walk_flags visit)62 static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx,
63 enum kvm_pgtable_walk_flags visit)
64 {
65 struct ptdump_pg_state *st = ctx->arg;
66 struct ptdump_state *pt_st = &st->ptdump;
67
68 note_page(pt_st, ctx->addr, ctx->level, ctx->old);
69
70 return 0;
71 }
72
kvm_ptdump_build_levels(struct ptdump_pg_level * level,u32 start_lvl)73 static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl)
74 {
75 u32 i;
76 u64 mask;
77
78 if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL))
79 return -EINVAL;
80
81 mask = 0;
82 for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++)
83 mask |= stage2_pte_bits[i].mask;
84
85 for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) {
86 snprintf(level[i].name, sizeof(level[i].name), "%u", i);
87
88 level[i].num = ARRAY_SIZE(stage2_pte_bits);
89 level[i].bits = stage2_pte_bits;
90 level[i].mask = mask;
91 }
92
93 return 0;
94 }
95
kvm_ptdump_parser_create(struct kvm * kvm)96 static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm *kvm)
97 {
98 struct kvm_ptdump_guest_state *st;
99 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
100 struct kvm_pgtable *pgtable = mmu->pgt;
101 int ret;
102
103 st = kzalloc(sizeof(struct kvm_ptdump_guest_state), GFP_KERNEL_ACCOUNT);
104 if (!st)
105 return ERR_PTR(-ENOMEM);
106
107 ret = kvm_ptdump_build_levels(&st->level[0], pgtable->start_level);
108 if (ret) {
109 kfree(st);
110 return ERR_PTR(ret);
111 }
112
113 st->ipa_marker[0].name = "Guest IPA";
114 st->ipa_marker[1].start_address = BIT(pgtable->ia_bits);
115 st->range[0].end = BIT(pgtable->ia_bits);
116
117 st->kvm = kvm;
118 st->parser_state = (struct ptdump_pg_state) {
119 .marker = &st->ipa_marker[0],
120 .level = -1,
121 .pg_level = &st->level[0],
122 .ptdump.range = &st->range[0],
123 .start_address = 0,
124 };
125
126 return st;
127 }
128
kvm_ptdump_guest_show(struct seq_file * m,void * unused)129 static int kvm_ptdump_guest_show(struct seq_file *m, void *unused)
130 {
131 int ret;
132 struct kvm_ptdump_guest_state *st = m->private;
133 struct kvm *kvm = st->kvm;
134 struct kvm_s2_mmu *mmu = &kvm->arch.mmu;
135 struct ptdump_pg_state *parser_state = &st->parser_state;
136 struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) {
137 .cb = kvm_ptdump_visitor,
138 .arg = parser_state,
139 .flags = KVM_PGTABLE_WALK_LEAF,
140 };
141
142 parser_state->seq = m;
143
144 write_lock(&kvm->mmu_lock);
145 ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker);
146 write_unlock(&kvm->mmu_lock);
147
148 return ret;
149 }
150
kvm_ptdump_guest_open(struct inode * m,struct file * file)151 static int kvm_ptdump_guest_open(struct inode *m, struct file *file)
152 {
153 struct kvm *kvm = m->i_private;
154 struct kvm_ptdump_guest_state *st;
155 int ret;
156
157 if (!kvm_get_kvm_safe(kvm))
158 return -ENOENT;
159
160 st = kvm_ptdump_parser_create(kvm);
161 if (IS_ERR(st)) {
162 ret = PTR_ERR(st);
163 goto err_with_kvm_ref;
164 }
165
166 ret = single_open(file, kvm_ptdump_guest_show, st);
167 if (!ret)
168 return 0;
169
170 kfree(st);
171 err_with_kvm_ref:
172 kvm_put_kvm(kvm);
173 return ret;
174 }
175
kvm_ptdump_guest_close(struct inode * m,struct file * file)176 static int kvm_ptdump_guest_close(struct inode *m, struct file *file)
177 {
178 struct kvm *kvm = m->i_private;
179 void *st = ((struct seq_file *)file->private_data)->private;
180
181 kfree(st);
182 kvm_put_kvm(kvm);
183
184 return single_release(m, file);
185 }
186
187 static const struct file_operations kvm_ptdump_guest_fops = {
188 .open = kvm_ptdump_guest_open,
189 .read = seq_read,
190 .llseek = seq_lseek,
191 .release = kvm_ptdump_guest_close,
192 };
193
kvm_pgtable_range_show(struct seq_file * m,void * unused)194 static int kvm_pgtable_range_show(struct seq_file *m, void *unused)
195 {
196 struct kvm_pgtable *pgtable = m->private;
197
198 seq_printf(m, "%2u\n", pgtable->ia_bits);
199 return 0;
200 }
201
kvm_pgtable_levels_show(struct seq_file * m,void * unused)202 static int kvm_pgtable_levels_show(struct seq_file *m, void *unused)
203 {
204 struct kvm_pgtable *pgtable = m->private;
205
206 seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - pgtable->start_level);
207 return 0;
208 }
209
kvm_pgtable_debugfs_open(struct inode * m,struct file * file,int (* show)(struct seq_file *,void *))210 static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file,
211 int (*show)(struct seq_file *, void *))
212 {
213 struct kvm *kvm = m->i_private;
214 struct kvm_pgtable *pgtable;
215 int ret;
216
217 if (!kvm_get_kvm_safe(kvm))
218 return -ENOENT;
219
220 pgtable = kvm->arch.mmu.pgt;
221
222 ret = single_open(file, show, pgtable);
223 if (ret < 0)
224 kvm_put_kvm(kvm);
225 return ret;
226 }
227
kvm_pgtable_range_open(struct inode * m,struct file * file)228 static int kvm_pgtable_range_open(struct inode *m, struct file *file)
229 {
230 return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show);
231 }
232
kvm_pgtable_levels_open(struct inode * m,struct file * file)233 static int kvm_pgtable_levels_open(struct inode *m, struct file *file)
234 {
235 return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show);
236 }
237
kvm_pgtable_debugfs_close(struct inode * m,struct file * file)238 static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file)
239 {
240 struct kvm *kvm = m->i_private;
241
242 kvm_put_kvm(kvm);
243 return single_release(m, file);
244 }
245
246 static const struct file_operations kvm_pgtable_range_fops = {
247 .open = kvm_pgtable_range_open,
248 .read = seq_read,
249 .llseek = seq_lseek,
250 .release = kvm_pgtable_debugfs_close,
251 };
252
253 static const struct file_operations kvm_pgtable_levels_fops = {
254 .open = kvm_pgtable_levels_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
257 .release = kvm_pgtable_debugfs_close,
258 };
259
kvm_s2_ptdump_create_debugfs(struct kvm * kvm)260 void kvm_s2_ptdump_create_debugfs(struct kvm *kvm)
261 {
262 debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry,
263 kvm, &kvm_ptdump_guest_fops);
264 debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry, kvm,
265 &kvm_pgtable_range_fops);
266 debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry,
267 kvm, &kvm_pgtable_levels_fops);
268 }
269