xref: /linux/arch/arm64/kvm/vgic/vgic-debug.c (revision 6e7fd890f1d6ac83805409e9c346240de2705584)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Linaro
4  * Author: Christoffer Dall <christoffer.dall@linaro.org>
5  */
6 
7 #include <linux/cpu.h>
8 #include <linux/debugfs.h>
9 #include <linux/interrupt.h>
10 #include <linux/kvm_host.h>
11 #include <linux/seq_file.h>
12 #include <kvm/arm_vgic.h>
13 #include <asm/kvm_mmu.h>
14 #include "vgic.h"
15 
16 /*
17  * Structure to control looping through the entire vgic state.  We start at
18  * zero for each field and move upwards.  So, if dist_id is 0 we print the
19  * distributor info.  When dist_id is 1, we have already printed it and move
20  * on.
21  *
22  * When vcpu_id < nr_cpus we print the vcpu info until vcpu_id == nr_cpus and
23  * so on.
24  */
25 struct vgic_state_iter {
26 	int nr_cpus;
27 	int nr_spis;
28 	int nr_lpis;
29 	int dist_id;
30 	int vcpu_id;
31 	unsigned long intid;
32 	int lpi_idx;
33 };
34 
35 static void iter_next(struct kvm *kvm, struct vgic_state_iter *iter)
36 {
37 	struct vgic_dist *dist = &kvm->arch.vgic;
38 
39 	if (iter->dist_id == 0) {
40 		iter->dist_id++;
41 		return;
42 	}
43 
44 	/*
45 	 * Let the xarray drive the iterator after the last SPI, as the iterator
46 	 * has exhausted the sequentially-allocated INTID space.
47 	 */
48 	if (iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS - 1) &&
49 	    iter->nr_lpis) {
50 		if (iter->lpi_idx < iter->nr_lpis)
51 			xa_find_after(&dist->lpi_xa, &iter->intid,
52 				      VGIC_LPI_MAX_INTID,
53 				      LPI_XA_MARK_DEBUG_ITER);
54 		iter->lpi_idx++;
55 		return;
56 	}
57 
58 	iter->intid++;
59 	if (iter->intid == VGIC_NR_PRIVATE_IRQS &&
60 	    ++iter->vcpu_id < iter->nr_cpus)
61 		iter->intid = 0;
62 }
63 
64 static int iter_mark_lpis(struct kvm *kvm)
65 {
66 	struct vgic_dist *dist = &kvm->arch.vgic;
67 	struct vgic_irq *irq;
68 	unsigned long intid;
69 	int nr_lpis = 0;
70 
71 	xa_for_each(&dist->lpi_xa, intid, irq) {
72 		if (!vgic_try_get_irq_kref(irq))
73 			continue;
74 
75 		xa_set_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
76 		nr_lpis++;
77 	}
78 
79 	return nr_lpis;
80 }
81 
82 static void iter_unmark_lpis(struct kvm *kvm)
83 {
84 	struct vgic_dist *dist = &kvm->arch.vgic;
85 	struct vgic_irq *irq;
86 	unsigned long intid;
87 
88 	xa_for_each(&dist->lpi_xa, intid, irq) {
89 		xa_clear_mark(&dist->lpi_xa, intid, LPI_XA_MARK_DEBUG_ITER);
90 		vgic_put_irq(kvm, irq);
91 	}
92 }
93 
94 static void iter_init(struct kvm *kvm, struct vgic_state_iter *iter,
95 		      loff_t pos)
96 {
97 	int nr_cpus = atomic_read(&kvm->online_vcpus);
98 
99 	memset(iter, 0, sizeof(*iter));
100 
101 	iter->nr_cpus = nr_cpus;
102 	iter->nr_spis = kvm->arch.vgic.nr_spis;
103 	if (kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
104 		iter->nr_lpis = iter_mark_lpis(kvm);
105 
106 	/* Fast forward to the right position if needed */
107 	while (pos--)
108 		iter_next(kvm, iter);
109 }
110 
111 static bool end_of_vgic(struct vgic_state_iter *iter)
112 {
113 	return iter->dist_id > 0 &&
114 		iter->vcpu_id == iter->nr_cpus &&
115 		iter->intid >= (iter->nr_spis + VGIC_NR_PRIVATE_IRQS) &&
116 		(!iter->nr_lpis || iter->lpi_idx > iter->nr_lpis);
117 }
118 
119 static void *vgic_debug_start(struct seq_file *s, loff_t *pos)
120 {
121 	struct kvm *kvm = s->private;
122 	struct vgic_state_iter *iter;
123 
124 	mutex_lock(&kvm->arch.config_lock);
125 	iter = kvm->arch.vgic.iter;
126 	if (iter) {
127 		iter = ERR_PTR(-EBUSY);
128 		goto out;
129 	}
130 
131 	iter = kmalloc(sizeof(*iter), GFP_KERNEL);
132 	if (!iter) {
133 		iter = ERR_PTR(-ENOMEM);
134 		goto out;
135 	}
136 
137 	iter_init(kvm, iter, *pos);
138 	kvm->arch.vgic.iter = iter;
139 
140 	if (end_of_vgic(iter))
141 		iter = NULL;
142 out:
143 	mutex_unlock(&kvm->arch.config_lock);
144 	return iter;
145 }
146 
147 static void *vgic_debug_next(struct seq_file *s, void *v, loff_t *pos)
148 {
149 	struct kvm *kvm = s->private;
150 	struct vgic_state_iter *iter = kvm->arch.vgic.iter;
151 
152 	++*pos;
153 	iter_next(kvm, iter);
154 	if (end_of_vgic(iter))
155 		iter = NULL;
156 	return iter;
157 }
158 
159 static void vgic_debug_stop(struct seq_file *s, void *v)
160 {
161 	struct kvm *kvm = s->private;
162 	struct vgic_state_iter *iter;
163 
164 	/*
165 	 * If the seq file wasn't properly opened, there's nothing to clearn
166 	 * up.
167 	 */
168 	if (IS_ERR(v))
169 		return;
170 
171 	mutex_lock(&kvm->arch.config_lock);
172 	iter = kvm->arch.vgic.iter;
173 	iter_unmark_lpis(kvm);
174 	kfree(iter);
175 	kvm->arch.vgic.iter = NULL;
176 	mutex_unlock(&kvm->arch.config_lock);
177 }
178 
179 static void print_dist_state(struct seq_file *s, struct vgic_dist *dist,
180 			     struct vgic_state_iter *iter)
181 {
182 	bool v3 = dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3;
183 
184 	seq_printf(s, "Distributor\n");
185 	seq_printf(s, "===========\n");
186 	seq_printf(s, "vgic_model:\t%s\n", v3 ? "GICv3" : "GICv2");
187 	seq_printf(s, "nr_spis:\t%d\n", dist->nr_spis);
188 	if (v3)
189 		seq_printf(s, "nr_lpis:\t%d\n", iter->nr_lpis);
190 	seq_printf(s, "enabled:\t%d\n", dist->enabled);
191 	seq_printf(s, "\n");
192 
193 	seq_printf(s, "P=pending_latch, L=line_level, A=active\n");
194 	seq_printf(s, "E=enabled, H=hw, C=config (level=1, edge=0)\n");
195 	seq_printf(s, "G=group\n");
196 }
197 
198 static void print_header(struct seq_file *s, struct vgic_irq *irq,
199 			 struct kvm_vcpu *vcpu)
200 {
201 	int id = 0;
202 	char *hdr = "SPI ";
203 
204 	if (vcpu) {
205 		hdr = "VCPU";
206 		id = vcpu->vcpu_idx;
207 	}
208 
209 	seq_printf(s, "\n");
210 	seq_printf(s, "%s%2d TYP   ID TGT_ID PLAEHCG     HWID   TARGET SRC PRI VCPU_ID\n", hdr, id);
211 	seq_printf(s, "----------------------------------------------------------------\n");
212 }
213 
214 static void print_irq_state(struct seq_file *s, struct vgic_irq *irq,
215 			    struct kvm_vcpu *vcpu)
216 {
217 	char *type;
218 	bool pending;
219 
220 	if (irq->intid < VGIC_NR_SGIS)
221 		type = "SGI";
222 	else if (irq->intid < VGIC_NR_PRIVATE_IRQS)
223 		type = "PPI";
224 	else if (irq->intid < VGIC_MAX_SPI)
225 		type = "SPI";
226 	else
227 		type = "LPI";
228 
229 	if (irq->intid ==0 || irq->intid == VGIC_NR_PRIVATE_IRQS)
230 		print_header(s, irq, vcpu);
231 
232 	pending = irq->pending_latch;
233 	if (irq->hw && vgic_irq_is_sgi(irq->intid)) {
234 		int err;
235 
236 		err = irq_get_irqchip_state(irq->host_irq,
237 					    IRQCHIP_STATE_PENDING,
238 					    &pending);
239 		WARN_ON_ONCE(err);
240 	}
241 
242 	seq_printf(s, "       %s %4d "
243 		      "    %2d "
244 		      "%d%d%d%d%d%d%d "
245 		      "%8d "
246 		      "%8x "
247 		      " %2x "
248 		      "%3d "
249 		      "     %2d "
250 		      "\n",
251 			type, irq->intid,
252 			(irq->target_vcpu) ? irq->target_vcpu->vcpu_idx : -1,
253 			pending,
254 			irq->line_level,
255 			irq->active,
256 			irq->enabled,
257 			irq->hw,
258 			irq->config == VGIC_CONFIG_LEVEL,
259 			irq->group,
260 			irq->hwintid,
261 			irq->mpidr,
262 			irq->source,
263 			irq->priority,
264 			(irq->vcpu) ? irq->vcpu->vcpu_idx : -1);
265 }
266 
267 static int vgic_debug_show(struct seq_file *s, void *v)
268 {
269 	struct kvm *kvm = s->private;
270 	struct vgic_state_iter *iter = v;
271 	struct vgic_irq *irq;
272 	struct kvm_vcpu *vcpu = NULL;
273 	unsigned long flags;
274 
275 	if (iter->dist_id == 0) {
276 		print_dist_state(s, &kvm->arch.vgic, iter);
277 		return 0;
278 	}
279 
280 	if (!kvm->arch.vgic.initialized)
281 		return 0;
282 
283 	if (iter->vcpu_id < iter->nr_cpus)
284 		vcpu = kvm_get_vcpu(kvm, iter->vcpu_id);
285 
286 	/*
287 	 * Expect this to succeed, as iter_mark_lpis() takes a reference on
288 	 * every LPI to be visited.
289 	 */
290 	irq = vgic_get_irq(kvm, vcpu, iter->intid);
291 	if (WARN_ON_ONCE(!irq))
292 		return -EINVAL;
293 
294 	raw_spin_lock_irqsave(&irq->irq_lock, flags);
295 	print_irq_state(s, irq, vcpu);
296 	raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
297 
298 	vgic_put_irq(kvm, irq);
299 	return 0;
300 }
301 
302 static const struct seq_operations vgic_debug_sops = {
303 	.start = vgic_debug_start,
304 	.next  = vgic_debug_next,
305 	.stop  = vgic_debug_stop,
306 	.show  = vgic_debug_show
307 };
308 
309 DEFINE_SEQ_ATTRIBUTE(vgic_debug);
310 
311 void vgic_debug_init(struct kvm *kvm)
312 {
313 	debugfs_create_file("vgic-state", 0444, kvm->debugfs_dentry, kvm,
314 			    &vgic_debug_fops);
315 }
316 
317 void vgic_debug_destroy(struct kvm *kvm)
318 {
319 }
320