xref: /linux/arch/riscv/kvm/vcpu_sbi.c (revision 43db1111073049220381944af4a3b8a5400eda71)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2019 Western Digital Corporation or its affiliates.
4  *
5  * Authors:
6  *     Atish Patra <atish.patra@wdc.com>
7  */
8 
9 #include <linux/errno.h>
10 #include <linux/err.h>
11 #include <linux/kvm_host.h>
12 #include <asm/sbi.h>
13 #include <asm/kvm_vcpu_sbi.h>
14 
15 #ifndef CONFIG_RISCV_SBI_V01
16 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01 = {
17 	.extid_start = -1UL,
18 	.extid_end = -1UL,
19 	.handler = NULL,
20 };
21 #endif
22 
23 #ifndef CONFIG_RISCV_PMU_SBI
24 static const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu = {
25 	.extid_start = -1UL,
26 	.extid_end = -1UL,
27 	.handler = NULL,
28 };
29 #endif
30 
31 struct kvm_riscv_sbi_extension_entry {
32 	enum KVM_RISCV_SBI_EXT_ID ext_idx;
33 	const struct kvm_vcpu_sbi_extension *ext_ptr;
34 };
35 
36 static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = {
37 	{
38 		.ext_idx = KVM_RISCV_SBI_EXT_V01,
39 		.ext_ptr = &vcpu_sbi_ext_v01,
40 	},
41 	{
42 		.ext_idx = KVM_RISCV_SBI_EXT_MAX, /* Can't be disabled */
43 		.ext_ptr = &vcpu_sbi_ext_base,
44 	},
45 	{
46 		.ext_idx = KVM_RISCV_SBI_EXT_TIME,
47 		.ext_ptr = &vcpu_sbi_ext_time,
48 	},
49 	{
50 		.ext_idx = KVM_RISCV_SBI_EXT_IPI,
51 		.ext_ptr = &vcpu_sbi_ext_ipi,
52 	},
53 	{
54 		.ext_idx = KVM_RISCV_SBI_EXT_RFENCE,
55 		.ext_ptr = &vcpu_sbi_ext_rfence,
56 	},
57 	{
58 		.ext_idx = KVM_RISCV_SBI_EXT_SRST,
59 		.ext_ptr = &vcpu_sbi_ext_srst,
60 	},
61 	{
62 		.ext_idx = KVM_RISCV_SBI_EXT_HSM,
63 		.ext_ptr = &vcpu_sbi_ext_hsm,
64 	},
65 	{
66 		.ext_idx = KVM_RISCV_SBI_EXT_PMU,
67 		.ext_ptr = &vcpu_sbi_ext_pmu,
68 	},
69 	{
70 		.ext_idx = KVM_RISCV_SBI_EXT_DBCN,
71 		.ext_ptr = &vcpu_sbi_ext_dbcn,
72 	},
73 	{
74 		.ext_idx = KVM_RISCV_SBI_EXT_SUSP,
75 		.ext_ptr = &vcpu_sbi_ext_susp,
76 	},
77 	{
78 		.ext_idx = KVM_RISCV_SBI_EXT_STA,
79 		.ext_ptr = &vcpu_sbi_ext_sta,
80 	},
81 	{
82 		.ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL,
83 		.ext_ptr = &vcpu_sbi_ext_experimental,
84 	},
85 	{
86 		.ext_idx = KVM_RISCV_SBI_EXT_VENDOR,
87 		.ext_ptr = &vcpu_sbi_ext_vendor,
88 	},
89 };
90 
91 static const struct kvm_riscv_sbi_extension_entry *
riscv_vcpu_get_sbi_ext(struct kvm_vcpu * vcpu,unsigned long idx)92 riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx)
93 {
94 	const struct kvm_riscv_sbi_extension_entry *sext = NULL;
95 
96 	if (idx >= KVM_RISCV_SBI_EXT_MAX)
97 		return NULL;
98 
99 	for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
100 		if (sbi_ext[i].ext_idx == idx) {
101 			sext = &sbi_ext[i];
102 			break;
103 		}
104 	}
105 
106 	return sext;
107 }
108 
riscv_vcpu_supports_sbi_ext(struct kvm_vcpu * vcpu,int idx)109 bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx)
110 {
111 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
112 	const struct kvm_riscv_sbi_extension_entry *sext;
113 
114 	sext = riscv_vcpu_get_sbi_ext(vcpu, idx);
115 
116 	return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
117 }
118 
kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu * vcpu,struct kvm_run * run)119 void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run)
120 {
121 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
122 
123 	vcpu->arch.sbi_context.return_handled = 0;
124 	vcpu->stat.ecall_exit_stat++;
125 	run->exit_reason = KVM_EXIT_RISCV_SBI;
126 	run->riscv_sbi.extension_id = cp->a7;
127 	run->riscv_sbi.function_id = cp->a6;
128 	run->riscv_sbi.args[0] = cp->a0;
129 	run->riscv_sbi.args[1] = cp->a1;
130 	run->riscv_sbi.args[2] = cp->a2;
131 	run->riscv_sbi.args[3] = cp->a3;
132 	run->riscv_sbi.args[4] = cp->a4;
133 	run->riscv_sbi.args[5] = cp->a5;
134 	run->riscv_sbi.ret[0] = SBI_ERR_NOT_SUPPORTED;
135 	run->riscv_sbi.ret[1] = 0;
136 }
137 
kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu * vcpu,struct kvm_run * run,u32 type,u64 reason)138 void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
139 				     struct kvm_run *run,
140 				     u32 type, u64 reason)
141 {
142 	unsigned long i;
143 	struct kvm_vcpu *tmp;
144 
145 	kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
146 		spin_lock(&tmp->arch.mp_state_lock);
147 		WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
148 		spin_unlock(&tmp->arch.mp_state_lock);
149 	}
150 	kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
151 
152 	memset(&run->system_event, 0, sizeof(run->system_event));
153 	run->system_event.type = type;
154 	run->system_event.ndata = 1;
155 	run->system_event.data[0] = reason;
156 	run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
157 }
158 
kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu * vcpu,unsigned long pc,unsigned long a1)159 void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
160 				      unsigned long pc, unsigned long a1)
161 {
162 	spin_lock(&vcpu->arch.reset_state.lock);
163 	vcpu->arch.reset_state.pc = pc;
164 	vcpu->arch.reset_state.a1 = a1;
165 	spin_unlock(&vcpu->arch.reset_state.lock);
166 
167 	kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
168 }
169 
kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu * vcpu)170 void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
171 {
172 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
173 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
174 	struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;
175 
176 	cntx->a0 = vcpu->vcpu_id;
177 
178 	spin_lock(&vcpu->arch.reset_state.lock);
179 	cntx->sepc = reset_state->pc;
180 	cntx->a1 = reset_state->a1;
181 	spin_unlock(&vcpu->arch.reset_state.lock);
182 
183 	cntx->sstatus &= ~SR_SIE;
184 	csr->vsatp = 0;
185 }
186 
kvm_riscv_vcpu_sbi_return(struct kvm_vcpu * vcpu,struct kvm_run * run)187 int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
188 {
189 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
190 
191 	/* Handle SBI return only once */
192 	if (vcpu->arch.sbi_context.return_handled)
193 		return 0;
194 	vcpu->arch.sbi_context.return_handled = 1;
195 
196 	/* Update return values */
197 	cp->a0 = run->riscv_sbi.ret[0];
198 	cp->a1 = run->riscv_sbi.ret[1];
199 
200 	/* Move to next instruction */
201 	vcpu->arch.guest_context.sepc += 4;
202 
203 	return 0;
204 }
205 
riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val)206 static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu,
207 					 unsigned long reg_num,
208 					 unsigned long reg_val)
209 {
210 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
211 	const struct kvm_riscv_sbi_extension_entry *sext;
212 
213 	if (reg_val != 1 && reg_val != 0)
214 		return -EINVAL;
215 
216 	sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
217 	if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
218 		return -ENOENT;
219 
220 	scontext->ext_status[sext->ext_idx] = (reg_val) ?
221 			KVM_RISCV_SBI_EXT_STATUS_ENABLED :
222 			KVM_RISCV_SBI_EXT_STATUS_DISABLED;
223 
224 	return 0;
225 }
226 
riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)227 static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu,
228 					 unsigned long reg_num,
229 					 unsigned long *reg_val)
230 {
231 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
232 	const struct kvm_riscv_sbi_extension_entry *sext;
233 
234 	sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num);
235 	if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE)
236 		return -ENOENT;
237 
238 	*reg_val = scontext->ext_status[sext->ext_idx] ==
239 				KVM_RISCV_SBI_EXT_STATUS_ENABLED;
240 
241 	return 0;
242 }
243 
riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long reg_val,bool enable)244 static int riscv_vcpu_set_sbi_ext_multi(struct kvm_vcpu *vcpu,
245 					unsigned long reg_num,
246 					unsigned long reg_val, bool enable)
247 {
248 	unsigned long i, ext_id;
249 
250 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
251 		return -ENOENT;
252 
253 	for_each_set_bit(i, &reg_val, BITS_PER_LONG) {
254 		ext_id = i + reg_num * BITS_PER_LONG;
255 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
256 			break;
257 
258 		riscv_vcpu_set_sbi_ext_single(vcpu, ext_id, enable);
259 	}
260 
261 	return 0;
262 }
263 
riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu * vcpu,unsigned long reg_num,unsigned long * reg_val)264 static int riscv_vcpu_get_sbi_ext_multi(struct kvm_vcpu *vcpu,
265 					unsigned long reg_num,
266 					unsigned long *reg_val)
267 {
268 	unsigned long i, ext_id, ext_val;
269 
270 	if (reg_num > KVM_REG_RISCV_SBI_MULTI_REG_LAST)
271 		return -ENOENT;
272 
273 	for (i = 0; i < BITS_PER_LONG; i++) {
274 		ext_id = i + reg_num * BITS_PER_LONG;
275 		if (ext_id >= KVM_RISCV_SBI_EXT_MAX)
276 			break;
277 
278 		ext_val = 0;
279 		riscv_vcpu_get_sbi_ext_single(vcpu, ext_id, &ext_val);
280 		if (ext_val)
281 			*reg_val |= KVM_REG_RISCV_SBI_MULTI_MASK(ext_id);
282 	}
283 
284 	return 0;
285 }
286 
kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)287 int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
288 				   const struct kvm_one_reg *reg)
289 {
290 	unsigned long __user *uaddr =
291 			(unsigned long __user *)(unsigned long)reg->addr;
292 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
293 					    KVM_REG_SIZE_MASK |
294 					    KVM_REG_RISCV_SBI_EXT);
295 	unsigned long reg_val, reg_subtype;
296 
297 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
298 		return -EINVAL;
299 
300 	if (vcpu->arch.ran_atleast_once)
301 		return -EBUSY;
302 
303 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
304 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
305 
306 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
307 		return -EFAULT;
308 
309 	switch (reg_subtype) {
310 	case KVM_REG_RISCV_SBI_SINGLE:
311 		return riscv_vcpu_set_sbi_ext_single(vcpu, reg_num, reg_val);
312 	case KVM_REG_RISCV_SBI_MULTI_EN:
313 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, true);
314 	case KVM_REG_RISCV_SBI_MULTI_DIS:
315 		return riscv_vcpu_set_sbi_ext_multi(vcpu, reg_num, reg_val, false);
316 	default:
317 		return -ENOENT;
318 	}
319 
320 	return 0;
321 }
322 
kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)323 int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
324 				   const struct kvm_one_reg *reg)
325 {
326 	int rc;
327 	unsigned long __user *uaddr =
328 			(unsigned long __user *)(unsigned long)reg->addr;
329 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
330 					    KVM_REG_SIZE_MASK |
331 					    KVM_REG_RISCV_SBI_EXT);
332 	unsigned long reg_val, reg_subtype;
333 
334 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
335 		return -EINVAL;
336 
337 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
338 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
339 
340 	reg_val = 0;
341 	switch (reg_subtype) {
342 	case KVM_REG_RISCV_SBI_SINGLE:
343 		rc = riscv_vcpu_get_sbi_ext_single(vcpu, reg_num, &reg_val);
344 		break;
345 	case KVM_REG_RISCV_SBI_MULTI_EN:
346 	case KVM_REG_RISCV_SBI_MULTI_DIS:
347 		rc = riscv_vcpu_get_sbi_ext_multi(vcpu, reg_num, &reg_val);
348 		if (!rc && reg_subtype == KVM_REG_RISCV_SBI_MULTI_DIS)
349 			reg_val = ~reg_val;
350 		break;
351 	default:
352 		rc = -ENOENT;
353 	}
354 	if (rc)
355 		return rc;
356 
357 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
358 		return -EFAULT;
359 
360 	return 0;
361 }
362 
kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)363 int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu,
364 			       const struct kvm_one_reg *reg)
365 {
366 	unsigned long __user *uaddr =
367 			(unsigned long __user *)(unsigned long)reg->addr;
368 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
369 					    KVM_REG_SIZE_MASK |
370 					    KVM_REG_RISCV_SBI_STATE);
371 	unsigned long reg_subtype, reg_val;
372 
373 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
374 		return -EINVAL;
375 
376 	if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
377 		return -EFAULT;
378 
379 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
380 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
381 
382 	switch (reg_subtype) {
383 	case KVM_REG_RISCV_SBI_STA:
384 		return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val);
385 	default:
386 		return -EINVAL;
387 	}
388 
389 	return 0;
390 }
391 
kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu * vcpu,const struct kvm_one_reg * reg)392 int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu,
393 			       const struct kvm_one_reg *reg)
394 {
395 	unsigned long __user *uaddr =
396 			(unsigned long __user *)(unsigned long)reg->addr;
397 	unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
398 					    KVM_REG_SIZE_MASK |
399 					    KVM_REG_RISCV_SBI_STATE);
400 	unsigned long reg_subtype, reg_val;
401 	int ret;
402 
403 	if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
404 		return -EINVAL;
405 
406 	reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK;
407 	reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK;
408 
409 	switch (reg_subtype) {
410 	case KVM_REG_RISCV_SBI_STA:
411 		ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, &reg_val);
412 		break;
413 	default:
414 		return -EINVAL;
415 	}
416 
417 	if (ret)
418 		return ret;
419 
420 	if (copy_to_user(uaddr, &reg_val, KVM_REG_SIZE(reg->id)))
421 		return -EFAULT;
422 
423 	return 0;
424 }
425 
kvm_vcpu_sbi_find_ext(struct kvm_vcpu * vcpu,unsigned long extid)426 const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
427 				struct kvm_vcpu *vcpu, unsigned long extid)
428 {
429 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
430 	const struct kvm_riscv_sbi_extension_entry *entry;
431 	const struct kvm_vcpu_sbi_extension *ext;
432 	int i;
433 
434 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
435 		entry = &sbi_ext[i];
436 		ext = entry->ext_ptr;
437 
438 		if (ext->extid_start <= extid && ext->extid_end >= extid) {
439 			if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX ||
440 			    scontext->ext_status[entry->ext_idx] ==
441 						KVM_RISCV_SBI_EXT_STATUS_ENABLED)
442 				return ext;
443 
444 			return NULL;
445 		}
446 	}
447 
448 	return NULL;
449 }
450 
kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu * vcpu,struct kvm_run * run)451 int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run)
452 {
453 	int ret = 1;
454 	bool next_sepc = true;
455 	struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
456 	const struct kvm_vcpu_sbi_extension *sbi_ext;
457 	struct kvm_cpu_trap utrap = {0};
458 	struct kvm_vcpu_sbi_return sbi_ret = {
459 		.out_val = 0,
460 		.err_val = 0,
461 		.utrap = &utrap,
462 	};
463 	bool ext_is_v01 = false;
464 
465 	sbi_ext = kvm_vcpu_sbi_find_ext(vcpu, cp->a7);
466 	if (sbi_ext && sbi_ext->handler) {
467 #ifdef CONFIG_RISCV_SBI_V01
468 		if (cp->a7 >= SBI_EXT_0_1_SET_TIMER &&
469 		    cp->a7 <= SBI_EXT_0_1_SHUTDOWN)
470 			ext_is_v01 = true;
471 #endif
472 		ret = sbi_ext->handler(vcpu, run, &sbi_ret);
473 	} else {
474 		/* Return error for unsupported SBI calls */
475 		cp->a0 = SBI_ERR_NOT_SUPPORTED;
476 		goto ecall_done;
477 	}
478 
479 	/*
480 	 * When the SBI extension returns a Linux error code, it exits the ioctl
481 	 * loop and forwards the error to userspace.
482 	 */
483 	if (ret < 0) {
484 		next_sepc = false;
485 		goto ecall_done;
486 	}
487 
488 	/* Handle special error cases i.e trap, exit or userspace forward */
489 	if (sbi_ret.utrap->scause) {
490 		/* No need to increment sepc or exit ioctl loop */
491 		ret = 1;
492 		sbi_ret.utrap->sepc = cp->sepc;
493 		kvm_riscv_vcpu_trap_redirect(vcpu, sbi_ret.utrap);
494 		next_sepc = false;
495 		goto ecall_done;
496 	}
497 
498 	/* Exit ioctl loop or Propagate the error code the guest */
499 	if (sbi_ret.uexit) {
500 		next_sepc = false;
501 		ret = 0;
502 	} else {
503 		cp->a0 = sbi_ret.err_val;
504 		ret = 1;
505 	}
506 ecall_done:
507 	if (next_sepc)
508 		cp->sepc += 4;
509 	/* a1 should only be updated when we continue the ioctl loop */
510 	if (!ext_is_v01 && ret == 1)
511 		cp->a1 = sbi_ret.out_val;
512 
513 	return ret;
514 }
515 
kvm_riscv_vcpu_sbi_init(struct kvm_vcpu * vcpu)516 void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu)
517 {
518 	struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context;
519 	const struct kvm_riscv_sbi_extension_entry *entry;
520 	const struct kvm_vcpu_sbi_extension *ext;
521 	int idx, i;
522 
523 	for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) {
524 		entry = &sbi_ext[i];
525 		ext = entry->ext_ptr;
526 		idx = entry->ext_idx;
527 
528 		if (idx < 0 || idx >= ARRAY_SIZE(scontext->ext_status))
529 			continue;
530 
531 		if (ext->probe && !ext->probe(vcpu)) {
532 			scontext->ext_status[idx] = KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE;
533 			continue;
534 		}
535 
536 		scontext->ext_status[idx] = ext->default_disabled ?
537 					KVM_RISCV_SBI_EXT_STATUS_DISABLED :
538 					KVM_RISCV_SBI_EXT_STATUS_ENABLED;
539 	}
540 }
541