xref: /linux/arch/arm64/kvm/ptdump.c (revision 5ea5880764cbb164afb17a62e76ca75dc371409d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug helper used to dump the stage-2 pagetables of the system and their
4  * associated permissions.
5  *
6  * Copyright (C) Google, 2024
7  * Author: Sebastian Ene <sebastianene@google.com>
8  */
9 #include <linux/debugfs.h>
10 #include <linux/kvm_host.h>
11 #include <linux/seq_file.h>
12 
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_pgtable.h>
16 #include <asm/ptdump.h>
17 
18 #define MARKERS_LEN		2
19 #define KVM_PGTABLE_MAX_LEVELS	(KVM_PGTABLE_LAST_LEVEL + 1)
20 #define S2FNAMESZ		sizeof("0x0123456789abcdef-0x0123456789abcdef-s2-disabled")
21 
22 struct kvm_ptdump_guest_state {
23 	struct kvm_s2_mmu	*mmu;
24 	struct ptdump_pg_state	parser_state;
25 	struct addr_marker	ipa_marker[MARKERS_LEN];
26 	struct ptdump_pg_level	level[KVM_PGTABLE_MAX_LEVELS];
27 };
28 
29 static const struct ptdump_prot_bits stage2_pte_bits[] = {
30 	{
31 		.mask	= PTE_VALID,
32 		.val	= PTE_VALID,
33 		.set	= " ",
34 		.clear	= "F",
35 	},
36 	{
37 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R,
38 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R,
39 		.set	= "R",
40 		.clear	= " ",
41 	},
42 	{
43 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
44 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
45 		.set	= "W",
46 		.clear	= " ",
47 	},
48 	{
49 		.mask	= KVM_PTE_LEAF_ATTR_HI_S2_XN,
50 		.val	= 0b00UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN),
51 		.set	= "px ux ",
52 	},
53 	{
54 		.mask	= KVM_PTE_LEAF_ATTR_HI_S2_XN,
55 		.val	= 0b01UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN),
56 		.set	= "PXNux ",
57 	},
58 	{
59 		.mask	= KVM_PTE_LEAF_ATTR_HI_S2_XN,
60 		.val	= 0b10UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN),
61 		.set	= "PXNUXN",
62 	},
63 	{
64 		.mask	= KVM_PTE_LEAF_ATTR_HI_S2_XN,
65 		.val	= 0b11UL << __bf_shf(KVM_PTE_LEAF_ATTR_HI_S2_XN),
66 		.set	= "px UXN",
67 	},
68 	{
69 		.mask	= KVM_PTE_LEAF_ATTR_LO_S2_AF,
70 		.val	= KVM_PTE_LEAF_ATTR_LO_S2_AF,
71 		.set	= "AF",
72 		.clear	= "  ",
73 	},
74 	{
75 		.mask	= PMD_TYPE_MASK,
76 		.val	= PMD_TYPE_SECT,
77 		.set	= "BLK",
78 		.clear	= "   ",
79 	},
80 };
81 
82 static int kvm_ptdump_visitor(const struct kvm_pgtable_visit_ctx *ctx,
83 			      enum kvm_pgtable_walk_flags visit)
84 {
85 	struct ptdump_pg_state *st = ctx->arg;
86 	struct ptdump_state *pt_st = &st->ptdump;
87 
88 	note_page(pt_st, ctx->addr, ctx->level, ctx->old);
89 
90 	return 0;
91 }
92 
93 static int kvm_ptdump_build_levels(struct ptdump_pg_level *level, u32 start_lvl)
94 {
95 	u32 i;
96 	u64 mask;
97 
98 	if (WARN_ON_ONCE(start_lvl >= KVM_PGTABLE_LAST_LEVEL))
99 		return -EINVAL;
100 
101 	mask = 0;
102 	for (i = 0; i < ARRAY_SIZE(stage2_pte_bits); i++)
103 		mask |= stage2_pte_bits[i].mask;
104 
105 	for (i = start_lvl; i < KVM_PGTABLE_MAX_LEVELS; i++) {
106 		snprintf(level[i].name, sizeof(level[i].name), "%u", i);
107 
108 		level[i].num	= ARRAY_SIZE(stage2_pte_bits);
109 		level[i].bits	= stage2_pte_bits;
110 		level[i].mask	= mask;
111 	}
112 
113 	return 0;
114 }
115 
116 static struct kvm_ptdump_guest_state *kvm_ptdump_parser_create(struct kvm_s2_mmu *mmu)
117 {
118 	struct kvm_ptdump_guest_state *st;
119 	struct kvm_pgtable *pgtable = mmu->pgt;
120 	int ret;
121 
122 	st = kzalloc_obj(struct kvm_ptdump_guest_state, GFP_KERNEL_ACCOUNT);
123 	if (!st)
124 		return ERR_PTR(-ENOMEM);
125 
126 	ret = kvm_ptdump_build_levels(&st->level[0], pgtable->start_level);
127 	if (ret) {
128 		kfree(st);
129 		return ERR_PTR(ret);
130 	}
131 
132 	st->ipa_marker[0].name		= "Guest IPA";
133 	st->ipa_marker[1].start_address = BIT(pgtable->ia_bits);
134 
135 	st->mmu				= mmu;
136 	return st;
137 }
138 
139 static int kvm_ptdump_guest_show(struct seq_file *m, void *unused)
140 {
141 	int ret;
142 	struct kvm_ptdump_guest_state *st = m->private;
143 	struct kvm_s2_mmu *mmu = st->mmu;
144 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
145 	struct kvm_pgtable_walker walker = (struct kvm_pgtable_walker) {
146 		.cb	= kvm_ptdump_visitor,
147 		.arg	= &st->parser_state,
148 		.flags	= KVM_PGTABLE_WALK_LEAF,
149 	};
150 
151 	st->parser_state = (struct ptdump_pg_state) {
152 		.marker		= &st->ipa_marker[0],
153 		.level		= -1,
154 		.pg_level	= &st->level[0],
155 		.seq		= m,
156 	};
157 
158 	write_lock(&kvm->mmu_lock);
159 	ret = kvm_pgtable_walk(mmu->pgt, 0, BIT(mmu->pgt->ia_bits), &walker);
160 	write_unlock(&kvm->mmu_lock);
161 
162 	return ret;
163 }
164 
165 static int kvm_ptdump_guest_open(struct inode *m, struct file *file)
166 {
167 	struct kvm_s2_mmu *mmu = m->i_private;
168 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
169 	struct kvm_ptdump_guest_state *st;
170 	int ret;
171 
172 	if (!kvm_get_kvm_safe(kvm))
173 		return -ENOENT;
174 
175 	st = kvm_ptdump_parser_create(mmu);
176 	if (IS_ERR(st)) {
177 		ret = PTR_ERR(st);
178 		goto err_with_kvm_ref;
179 	}
180 
181 	ret = single_open(file, kvm_ptdump_guest_show, st);
182 	if (!ret)
183 		return 0;
184 
185 	kfree(st);
186 err_with_kvm_ref:
187 	kvm_put_kvm(kvm);
188 	return ret;
189 }
190 
191 static int kvm_ptdump_guest_close(struct inode *m, struct file *file)
192 {
193 	struct kvm *kvm = kvm_s2_mmu_to_kvm(m->i_private);
194 	void *st = ((struct seq_file *)file->private_data)->private;
195 
196 	kfree(st);
197 	kvm_put_kvm(kvm);
198 
199 	return single_release(m, file);
200 }
201 
202 static const struct file_operations kvm_ptdump_guest_fops = {
203 	.open		= kvm_ptdump_guest_open,
204 	.read		= seq_read,
205 	.llseek		= seq_lseek,
206 	.release	= kvm_ptdump_guest_close,
207 };
208 
209 static int kvm_pgtable_range_show(struct seq_file *m, void *unused)
210 {
211 	struct kvm_pgtable *pgtable = m->private;
212 
213 	seq_printf(m, "%2u\n", pgtable->ia_bits);
214 	return 0;
215 }
216 
217 static int kvm_pgtable_levels_show(struct seq_file *m, void *unused)
218 {
219 	struct kvm_pgtable *pgtable = m->private;
220 
221 	seq_printf(m, "%1d\n", KVM_PGTABLE_MAX_LEVELS - pgtable->start_level);
222 	return 0;
223 }
224 
225 static int kvm_pgtable_debugfs_open(struct inode *m, struct file *file,
226 				    int (*show)(struct seq_file *, void *))
227 {
228 	struct kvm_s2_mmu *mmu = m->i_private;
229 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
230 	struct kvm_pgtable *pgtable;
231 	int ret;
232 
233 	if (!kvm_get_kvm_safe(kvm))
234 		return -ENOENT;
235 
236 	pgtable = mmu->pgt;
237 
238 	ret = single_open(file, show, pgtable);
239 	if (ret < 0)
240 		kvm_put_kvm(kvm);
241 	return ret;
242 }
243 
244 static int kvm_pgtable_range_open(struct inode *m, struct file *file)
245 {
246 	return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_range_show);
247 }
248 
249 static int kvm_pgtable_levels_open(struct inode *m, struct file *file)
250 {
251 	return kvm_pgtable_debugfs_open(m, file, kvm_pgtable_levels_show);
252 }
253 
254 static int kvm_pgtable_debugfs_close(struct inode *m, struct file *file)
255 {
256 	struct kvm *kvm = kvm_s2_mmu_to_kvm(m->i_private);
257 
258 	kvm_put_kvm(kvm);
259 	return single_release(m, file);
260 }
261 
262 static const struct file_operations kvm_pgtable_range_fops = {
263 	.open		= kvm_pgtable_range_open,
264 	.read		= seq_read,
265 	.llseek		= seq_lseek,
266 	.release	= kvm_pgtable_debugfs_close,
267 };
268 
269 static const struct file_operations kvm_pgtable_levels_fops = {
270 	.open		= kvm_pgtable_levels_open,
271 	.read		= seq_read,
272 	.llseek		= seq_lseek,
273 	.release	= kvm_pgtable_debugfs_close,
274 };
275 
276 void kvm_nested_s2_ptdump_create_debugfs(struct kvm_s2_mmu *mmu)
277 {
278 	struct dentry *dent;
279 	char file_name[S2FNAMESZ];
280 
281 	snprintf(file_name, sizeof(file_name), "0x%016llx-0x%016llx-s2-%sabled",
282 		 mmu->tlb_vttbr,
283 		 mmu->tlb_vtcr,
284 		 mmu->nested_stage2_enabled ? "en" : "dis");
285 
286 	dent = debugfs_create_file(file_name, 0400,
287 				   mmu->arch->debugfs_nv_dentry, mmu,
288 				   &kvm_ptdump_guest_fops);
289 
290 	mmu->shadow_pt_debugfs_dentry = dent;
291 }
292 
293 void kvm_nested_s2_ptdump_remove_debugfs(struct kvm_s2_mmu *mmu)
294 {
295 	debugfs_remove(mmu->shadow_pt_debugfs_dentry);
296 }
297 
298 void kvm_s2_ptdump_create_debugfs(struct kvm *kvm)
299 {
300 	debugfs_create_file("stage2_page_tables", 0400, kvm->debugfs_dentry,
301 			    &kvm->arch.mmu, &kvm_ptdump_guest_fops);
302 	debugfs_create_file("ipa_range", 0400, kvm->debugfs_dentry,
303 			    &kvm->arch.mmu, &kvm_pgtable_range_fops);
304 	debugfs_create_file("stage2_levels", 0400, kvm->debugfs_dentry,
305 			    &kvm->arch.mmu, &kvm_pgtable_levels_fops);
306 	if (cpus_have_final_cap(ARM64_HAS_NESTED_VIRT))
307 		kvm->arch.debugfs_nv_dentry = debugfs_create_dir("nested", kvm->debugfs_dentry);
308 }
309