xref: /linux/arch/powerpc/kvm/book3s_hv_nestedv2.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
4  *
5  * Authors:
6  *    Jordan Niethe <jniethe5@gmail.com>
7  *
8  * Description: KVM functions specific to running on Book 3S
9  * processors as a NESTEDv2 guest.
10  *
11  */
12 
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
20 
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
25 #include <asm/reg.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
28 #include "trace_hv.h"
29 
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
32 
33 
34 static size_t
35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
36 {
37 	u16 ids[] = {
38 		KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 		KVMPPC_GSID_RUN_INPUT,
40 		KVMPPC_GSID_RUN_OUTPUT,
41 
42 	};
43 	size_t size = 0;
44 
45 	for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 		size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
47 	return size;
48 }
49 
50 static int
51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 					   struct kvmppc_gs_msg *gsm)
53 {
54 	struct kvmhv_nestedv2_config *cfg;
55 	int rc;
56 
57 	cfg = gsm->data;
58 
59 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 		rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 					cfg->vcpu_run_output_size);
62 		if (rc < 0)
63 			return rc;
64 	}
65 
66 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 					      cfg->vcpu_run_input_cfg);
69 		if (rc < 0)
70 			return rc;
71 	}
72 
73 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 					      cfg->vcpu_run_output_cfg);
76 		if (rc < 0)
77 			return rc;
78 	}
79 
80 	return 0;
81 }
82 
83 static int
84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 					      struct kvmppc_gs_buff *gsb)
86 {
87 	struct kvmhv_nestedv2_config *cfg;
88 	struct kvmppc_gs_parser gsp = { 0 };
89 	struct kvmppc_gs_elem *gse;
90 	int rc;
91 
92 	cfg = gsm->data;
93 
94 	rc = kvmppc_gse_parse(&gsp, gsb);
95 	if (rc < 0)
96 		return rc;
97 
98 	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
99 	if (gse)
100 		cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
101 	return 0;
102 }
103 
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 	.get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 	.fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 	.refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
108 };
109 
110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
111 {
112 	struct kvmppc_gs_bitmap gsbm = { 0 };
113 	size_t size = 0;
114 	u16 iden;
115 
116 	kvmppc_gsbm_fill(&gsbm);
117 	kvmppc_gsbm_for_each(&gsbm, iden)
118 	{
119 		switch (iden) {
120 		case KVMPPC_GSID_HOST_STATE_SIZE:
121 		case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 		case KVMPPC_GSID_PARTITION_TABLE:
123 		case KVMPPC_GSID_PROCESS_TABLE:
124 		case KVMPPC_GSID_RUN_INPUT:
125 		case KVMPPC_GSID_RUN_OUTPUT:
126 			break;
127 		default:
128 			size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
129 		}
130 	}
131 	return size;
132 }
133 
134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
135 				     struct kvmppc_gs_msg *gsm)
136 {
137 	struct kvm_vcpu *vcpu;
138 	vector128 v;
139 	int rc, i;
140 	u16 iden;
141 	u32 arch_compat = 0;
142 
143 	vcpu = gsm->data;
144 
145 	kvmppc_gsm_for_each(gsm, iden)
146 	{
147 		rc = 0;
148 
149 		if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
150 		    (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
151 			continue;
152 
153 		switch (iden) {
154 		case KVMPPC_GSID_DSCR:
155 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
156 			break;
157 		case KVMPPC_GSID_MMCRA:
158 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
159 			break;
160 		case KVMPPC_GSID_HFSCR:
161 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
162 			break;
163 		case KVMPPC_GSID_PURR:
164 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
165 			break;
166 		case KVMPPC_GSID_SPURR:
167 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
168 			break;
169 		case KVMPPC_GSID_AMR:
170 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
171 			break;
172 		case KVMPPC_GSID_UAMOR:
173 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
174 			break;
175 		case KVMPPC_GSID_SIAR:
176 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
177 			break;
178 		case KVMPPC_GSID_SDAR:
179 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
180 			break;
181 		case KVMPPC_GSID_IAMR:
182 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
183 			break;
184 		case KVMPPC_GSID_DAWR0:
185 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
186 			break;
187 		case KVMPPC_GSID_DAWR1:
188 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
189 			break;
190 		case KVMPPC_GSID_DAWRX0:
191 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
192 			break;
193 		case KVMPPC_GSID_DAWRX1:
194 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
195 			break;
196 		case KVMPPC_GSID_DEXCR:
197 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
198 			break;
199 		case KVMPPC_GSID_HASHKEYR:
200 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
201 			break;
202 		case KVMPPC_GSID_HASHPKEYR:
203 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
204 			break;
205 		case KVMPPC_GSID_CIABR:
206 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
207 			break;
208 		case KVMPPC_GSID_WORT:
209 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
210 			break;
211 		case KVMPPC_GSID_PPR:
212 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
213 			break;
214 		case KVMPPC_GSID_PSPB:
215 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
216 			break;
217 		case KVMPPC_GSID_TAR:
218 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
219 			break;
220 		case KVMPPC_GSID_FSCR:
221 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
222 			break;
223 		case KVMPPC_GSID_EBBHR:
224 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
225 			break;
226 		case KVMPPC_GSID_EBBRR:
227 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
228 			break;
229 		case KVMPPC_GSID_BESCR:
230 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
231 			break;
232 		case KVMPPC_GSID_IC:
233 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
234 			break;
235 		case KVMPPC_GSID_CTRL:
236 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
237 			break;
238 		case KVMPPC_GSID_PIDR:
239 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
240 			break;
241 		case KVMPPC_GSID_AMOR: {
242 			u64 amor = ~0;
243 
244 			rc = kvmppc_gse_put_u64(gsb, iden, amor);
245 			break;
246 		}
247 		case KVMPPC_GSID_VRSAVE:
248 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
249 			break;
250 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
251 			i = iden - KVMPPC_GSID_MMCR(0);
252 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
253 			break;
254 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
255 			i = iden - KVMPPC_GSID_SIER(0);
256 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
257 			break;
258 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
259 			i = iden - KVMPPC_GSID_PMC(0);
260 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
261 			break;
262 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
263 			i = iden - KVMPPC_GSID_GPR(0);
264 			rc = kvmppc_gse_put_u64(gsb, iden,
265 						vcpu->arch.regs.gpr[i]);
266 			break;
267 		case KVMPPC_GSID_CR:
268 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
269 			break;
270 		case KVMPPC_GSID_XER:
271 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
272 			break;
273 		case KVMPPC_GSID_CTR:
274 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
275 			break;
276 		case KVMPPC_GSID_LR:
277 			rc = kvmppc_gse_put_u64(gsb, iden,
278 						vcpu->arch.regs.link);
279 			break;
280 		case KVMPPC_GSID_NIA:
281 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
282 			break;
283 		case KVMPPC_GSID_SRR0:
284 			rc = kvmppc_gse_put_u64(gsb, iden,
285 						vcpu->arch.shregs.srr0);
286 			break;
287 		case KVMPPC_GSID_SRR1:
288 			rc = kvmppc_gse_put_u64(gsb, iden,
289 						vcpu->arch.shregs.srr1);
290 			break;
291 		case KVMPPC_GSID_SPRG0:
292 			rc = kvmppc_gse_put_u64(gsb, iden,
293 						vcpu->arch.shregs.sprg0);
294 			break;
295 		case KVMPPC_GSID_SPRG1:
296 			rc = kvmppc_gse_put_u64(gsb, iden,
297 						vcpu->arch.shregs.sprg1);
298 			break;
299 		case KVMPPC_GSID_SPRG2:
300 			rc = kvmppc_gse_put_u64(gsb, iden,
301 						vcpu->arch.shregs.sprg2);
302 			break;
303 		case KVMPPC_GSID_SPRG3:
304 			rc = kvmppc_gse_put_u64(gsb, iden,
305 						vcpu->arch.shregs.sprg3);
306 			break;
307 		case KVMPPC_GSID_DAR:
308 			rc = kvmppc_gse_put_u64(gsb, iden,
309 						vcpu->arch.shregs.dar);
310 			break;
311 		case KVMPPC_GSID_DSISR:
312 			rc = kvmppc_gse_put_u32(gsb, iden,
313 						vcpu->arch.shregs.dsisr);
314 			break;
315 		case KVMPPC_GSID_MSR:
316 			rc = kvmppc_gse_put_u64(gsb, iden,
317 						vcpu->arch.shregs.msr);
318 			break;
319 		case KVMPPC_GSID_VTB:
320 			rc = kvmppc_gse_put_u64(gsb, iden,
321 						vcpu->arch.vcore->vtb);
322 			break;
323 		case KVMPPC_GSID_DPDES:
324 			rc = kvmppc_gse_put_u64(gsb, iden,
325 						vcpu->arch.vcore->dpdes);
326 			break;
327 		case KVMPPC_GSID_LPCR:
328 			rc = kvmppc_gse_put_u64(gsb, iden,
329 						vcpu->arch.vcore->lpcr);
330 			break;
331 		case KVMPPC_GSID_TB_OFFSET:
332 			rc = kvmppc_gse_put_u64(gsb, iden,
333 						vcpu->arch.vcore->tb_offset);
334 			break;
335 		case KVMPPC_GSID_FPSCR:
336 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
337 			break;
338 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
339 			i = iden - KVMPPC_GSID_VSRS(0);
340 			memcpy(&v, &vcpu->arch.fp.fpr[i],
341 			       sizeof(vcpu->arch.fp.fpr[i]));
342 			rc = kvmppc_gse_put_vector128(gsb, iden, &v);
343 			break;
344 #ifdef CONFIG_VSX
345 		case KVMPPC_GSID_VSCR:
346 			rc = kvmppc_gse_put_u32(gsb, iden,
347 						vcpu->arch.vr.vscr.u[3]);
348 			break;
349 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
350 			i = iden - KVMPPC_GSID_VSRS(32);
351 			rc = kvmppc_gse_put_vector128(gsb, iden,
352 						      &vcpu->arch.vr.vr[i]);
353 			break;
354 #endif
355 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
356 			u64 dw;
357 
358 			dw = vcpu->arch.dec_expires -
359 			     vcpu->arch.vcore->tb_offset;
360 			rc = kvmppc_gse_put_u64(gsb, iden, dw);
361 			break;
362 		}
363 		case KVMPPC_GSID_LOGICAL_PVR:
364 			/*
365 			 * Though 'arch_compat == 0' would mean the default
366 			 * compatibility, arch_compat, being a Guest Wide
367 			 * Element, cannot be filled with a value of 0 in GSB
368 			 * as this would result into a kernel trap.
369 			 * Hence, when `arch_compat == 0`, arch_compat should
370 			 * default to L1's PVR.
371 			 */
372 			if (!vcpu->arch.vcore->arch_compat) {
373 				if (cpu_has_feature(CPU_FTR_P11_PVR))
374 					arch_compat = PVR_ARCH_31_P11;
375 				else if (cpu_has_feature(CPU_FTR_ARCH_31))
376 					arch_compat = PVR_ARCH_31;
377 				else if (cpu_has_feature(CPU_FTR_ARCH_300))
378 					arch_compat = PVR_ARCH_300;
379 			} else {
380 				arch_compat = vcpu->arch.vcore->arch_compat;
381 			}
382 			rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
383 			break;
384 		}
385 
386 		if (rc < 0)
387 			return rc;
388 	}
389 
390 	return 0;
391 }
392 
393 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
394 					struct kvmppc_gs_buff *gsb)
395 {
396 	struct kvmppc_gs_parser gsp = { 0 };
397 	struct kvmhv_nestedv2_io *io;
398 	struct kvmppc_gs_bitmap *valids;
399 	struct kvm_vcpu *vcpu;
400 	struct kvmppc_gs_elem *gse;
401 	vector128 v;
402 	int rc, i;
403 	u16 iden;
404 
405 	vcpu = gsm->data;
406 
407 	rc = kvmppc_gse_parse(&gsp, gsb);
408 	if (rc < 0)
409 		return rc;
410 
411 	io = &vcpu->arch.nestedv2_io;
412 	valids = &io->valids;
413 
414 	kvmppc_gsp_for_each(&gsp, iden, gse)
415 	{
416 		switch (iden) {
417 		case KVMPPC_GSID_DSCR:
418 			vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
419 			break;
420 		case KVMPPC_GSID_MMCRA:
421 			vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
422 			break;
423 		case KVMPPC_GSID_HFSCR:
424 			vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
425 			break;
426 		case KVMPPC_GSID_PURR:
427 			vcpu->arch.purr = kvmppc_gse_get_u64(gse);
428 			break;
429 		case KVMPPC_GSID_SPURR:
430 			vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
431 			break;
432 		case KVMPPC_GSID_AMR:
433 			vcpu->arch.amr = kvmppc_gse_get_u64(gse);
434 			break;
435 		case KVMPPC_GSID_UAMOR:
436 			vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
437 			break;
438 		case KVMPPC_GSID_SIAR:
439 			vcpu->arch.siar = kvmppc_gse_get_u64(gse);
440 			break;
441 		case KVMPPC_GSID_SDAR:
442 			vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
443 			break;
444 		case KVMPPC_GSID_IAMR:
445 			vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
446 			break;
447 		case KVMPPC_GSID_DAWR0:
448 			vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
449 			break;
450 		case KVMPPC_GSID_DAWR1:
451 			vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
452 			break;
453 		case KVMPPC_GSID_DAWRX0:
454 			vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
455 			break;
456 		case KVMPPC_GSID_DAWRX1:
457 			vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
458 			break;
459 		case KVMPPC_GSID_DEXCR:
460 			vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
461 			break;
462 		case KVMPPC_GSID_HASHKEYR:
463 			vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
464 			break;
465 		case KVMPPC_GSID_HASHPKEYR:
466 			vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
467 			break;
468 		case KVMPPC_GSID_CIABR:
469 			vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
470 			break;
471 		case KVMPPC_GSID_WORT:
472 			vcpu->arch.wort = kvmppc_gse_get_u32(gse);
473 			break;
474 		case KVMPPC_GSID_PPR:
475 			vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
476 			break;
477 		case KVMPPC_GSID_PSPB:
478 			vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
479 			break;
480 		case KVMPPC_GSID_TAR:
481 			vcpu->arch.tar = kvmppc_gse_get_u64(gse);
482 			break;
483 		case KVMPPC_GSID_FSCR:
484 			vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
485 			break;
486 		case KVMPPC_GSID_EBBHR:
487 			vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
488 			break;
489 		case KVMPPC_GSID_EBBRR:
490 			vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
491 			break;
492 		case KVMPPC_GSID_BESCR:
493 			vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
494 			break;
495 		case KVMPPC_GSID_IC:
496 			vcpu->arch.ic = kvmppc_gse_get_u64(gse);
497 			break;
498 		case KVMPPC_GSID_CTRL:
499 			vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
500 			break;
501 		case KVMPPC_GSID_PIDR:
502 			vcpu->arch.pid = kvmppc_gse_get_u32(gse);
503 			break;
504 		case KVMPPC_GSID_AMOR:
505 			break;
506 		case KVMPPC_GSID_VRSAVE:
507 			vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
508 			break;
509 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
510 			i = iden - KVMPPC_GSID_MMCR(0);
511 			vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
512 			break;
513 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
514 			i = iden - KVMPPC_GSID_SIER(0);
515 			vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
516 			break;
517 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
518 			i = iden - KVMPPC_GSID_PMC(0);
519 			vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
520 			break;
521 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
522 			i = iden - KVMPPC_GSID_GPR(0);
523 			vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
524 			break;
525 		case KVMPPC_GSID_CR:
526 			vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
527 			break;
528 		case KVMPPC_GSID_XER:
529 			vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
530 			break;
531 		case KVMPPC_GSID_CTR:
532 			vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
533 			break;
534 		case KVMPPC_GSID_LR:
535 			vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
536 			break;
537 		case KVMPPC_GSID_NIA:
538 			vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
539 			break;
540 		case KVMPPC_GSID_SRR0:
541 			vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
542 			break;
543 		case KVMPPC_GSID_SRR1:
544 			vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
545 			break;
546 		case KVMPPC_GSID_SPRG0:
547 			vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
548 			break;
549 		case KVMPPC_GSID_SPRG1:
550 			vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
551 			break;
552 		case KVMPPC_GSID_SPRG2:
553 			vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
554 			break;
555 		case KVMPPC_GSID_SPRG3:
556 			vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
557 			break;
558 		case KVMPPC_GSID_DAR:
559 			vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
560 			break;
561 		case KVMPPC_GSID_DSISR:
562 			vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
563 			break;
564 		case KVMPPC_GSID_MSR:
565 			vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
566 			break;
567 		case KVMPPC_GSID_VTB:
568 			vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
569 			break;
570 		case KVMPPC_GSID_DPDES:
571 			vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
572 			break;
573 		case KVMPPC_GSID_LPCR:
574 			vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
575 			break;
576 		case KVMPPC_GSID_TB_OFFSET:
577 			vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
578 			break;
579 		case KVMPPC_GSID_FPSCR:
580 			vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
581 			break;
582 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
583 			kvmppc_gse_get_vector128(gse, &v);
584 			i = iden - KVMPPC_GSID_VSRS(0);
585 			memcpy(&vcpu->arch.fp.fpr[i], &v,
586 			       sizeof(vcpu->arch.fp.fpr[i]));
587 			break;
588 #ifdef CONFIG_VSX
589 		case KVMPPC_GSID_VSCR:
590 			vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
591 			break;
592 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
593 			i = iden - KVMPPC_GSID_VSRS(32);
594 			kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
595 			break;
596 #endif
597 		case KVMPPC_GSID_HDAR:
598 			vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
599 			break;
600 		case KVMPPC_GSID_HDSISR:
601 			vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
602 			break;
603 		case KVMPPC_GSID_ASDR:
604 			vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
605 			break;
606 		case KVMPPC_GSID_HEIR:
607 			vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
608 			break;
609 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
610 			u64 dw;
611 
612 			dw = kvmppc_gse_get_u64(gse);
613 			vcpu->arch.dec_expires =
614 				dw + vcpu->arch.vcore->tb_offset;
615 			break;
616 		}
617 		case KVMPPC_GSID_LOGICAL_PVR:
618 			vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
619 			break;
620 		default:
621 			continue;
622 		}
623 		kvmppc_gsbm_set(valids, iden);
624 	}
625 
626 	return 0;
627 }
628 
629 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
630 	.get_size = gs_msg_ops_vcpu_get_size,
631 	.fill_info = gs_msg_ops_vcpu_fill_info,
632 	.refresh_info = gs_msg_ops_vcpu_refresh_info,
633 };
634 
635 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
636 				      struct kvmhv_nestedv2_io *io)
637 {
638 	struct kvmhv_nestedv2_config *cfg;
639 	struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
640 	unsigned long guest_id, vcpu_id;
641 	struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
642 	int rc;
643 
644 	cfg = &io->cfg;
645 	guest_id = vcpu->kvm->arch.lpid;
646 	vcpu_id = vcpu->vcpu_id;
647 
648 	gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
649 			     GFP_KERNEL);
650 	if (!gsm) {
651 		rc = -ENOMEM;
652 		goto err;
653 	}
654 
655 	gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
656 			     GFP_KERNEL);
657 	if (!gsb) {
658 		rc = -ENOMEM;
659 		goto free_gsm;
660 	}
661 
662 	rc = kvmppc_gsb_receive_datum(gsb, gsm,
663 				      KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
664 	if (rc < 0) {
665 		pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
666 		goto free_gsb;
667 	}
668 
669 	vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
670 					 vcpu_id, GFP_KERNEL);
671 	if (!vcpu_run_output) {
672 		rc = -ENOMEM;
673 		goto free_gsb;
674 	}
675 
676 	cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
677 	cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
678 	io->vcpu_run_output = vcpu_run_output;
679 
680 	gsm->flags = 0;
681 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
682 	if (rc < 0) {
683 		pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
684 		goto free_gs_out;
685 	}
686 
687 	vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
688 	if (!vcpu_message) {
689 		rc = -ENOMEM;
690 		goto free_gs_out;
691 	}
692 	kvmppc_gsm_include_all(vcpu_message);
693 
694 	io->vcpu_message = vcpu_message;
695 
696 	vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
697 					vcpu_id, GFP_KERNEL);
698 	if (!vcpu_run_input) {
699 		rc = -ENOMEM;
700 		goto free_vcpu_message;
701 	}
702 
703 	io->vcpu_run_input = vcpu_run_input;
704 	cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
705 	cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
706 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
707 	if (rc < 0) {
708 		pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
709 		goto free_vcpu_run_input;
710 	}
711 
712 	vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
713 				       KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
714 	if (!vcore_message) {
715 		rc = -ENOMEM;
716 		goto free_vcpu_run_input;
717 	}
718 
719 	kvmppc_gsm_include_all(vcore_message);
720 	kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
721 	io->vcore_message = vcore_message;
722 
723 	kvmppc_gsbm_fill(&io->valids);
724 	kvmppc_gsm_free(gsm);
725 	kvmppc_gsb_free(gsb);
726 	return 0;
727 
728 free_vcpu_run_input:
729 	kvmppc_gsb_free(vcpu_run_input);
730 free_vcpu_message:
731 	kvmppc_gsm_free(vcpu_message);
732 free_gs_out:
733 	kvmppc_gsb_free(vcpu_run_output);
734 free_gsb:
735 	kvmppc_gsb_free(gsb);
736 free_gsm:
737 	kvmppc_gsm_free(gsm);
738 err:
739 	return rc;
740 }
741 
742 /**
743  * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
744  * @vcpu: vcpu
745  * @iden: guest state ID
746  *
747  * Mark a guest state ID as having been changed by the L1 host and thus
748  * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
749  */
750 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
751 {
752 	struct kvmhv_nestedv2_io *io;
753 	struct kvmppc_gs_bitmap *valids;
754 	struct kvmppc_gs_msg *gsm;
755 
756 	if (!iden)
757 		return 0;
758 
759 	io = &vcpu->arch.nestedv2_io;
760 	valids = &io->valids;
761 	gsm = io->vcpu_message;
762 	kvmppc_gsm_include(gsm, iden);
763 	gsm = io->vcore_message;
764 	kvmppc_gsm_include(gsm, iden);
765 	kvmppc_gsbm_set(valids, iden);
766 	return 0;
767 }
768 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
769 
770 /**
771  * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
772  * @vcpu: vcpu
773  * @iden: guest state ID
774  *
775  * Reload the value for the guest state ID from the L0 host into the L1 host.
776  * This is cached so that going out to the L0 host only happens if necessary.
777  */
778 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
779 {
780 	struct kvmhv_nestedv2_io *io;
781 	struct kvmppc_gs_bitmap *valids;
782 	struct kvmppc_gs_buff *gsb;
783 	struct kvmppc_gs_msg gsm;
784 	int rc;
785 
786 	if (!iden)
787 		return 0;
788 
789 	io = &vcpu->arch.nestedv2_io;
790 	valids = &io->valids;
791 	if (kvmppc_gsbm_test(valids, iden))
792 		return 0;
793 
794 	gsb = io->vcpu_run_input;
795 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
796 	rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
797 	if (rc < 0) {
798 		pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
799 		return rc;
800 	}
801 	return 0;
802 }
803 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
804 
805 /**
806  * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
807  * @vcpu: vcpu
808  * @time_limit: hdec expiry tb
809  *
810  * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
811  * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
812  * wide values need to be sent with H_GUEST_SET first.
813  *
814  * The hdec tb offset is always sent to L0 host.
815  */
816 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
817 {
818 	struct kvmhv_nestedv2_io *io;
819 	struct kvmppc_gs_buff *gsb;
820 	struct kvmppc_gs_msg *gsm;
821 	int rc;
822 
823 	io = &vcpu->arch.nestedv2_io;
824 	gsb = io->vcpu_run_input;
825 	gsm = io->vcore_message;
826 	rc = kvmppc_gsb_send_data(gsb, gsm);
827 	if (rc < 0) {
828 		pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
829 		return rc;
830 	}
831 
832 	gsm = io->vcpu_message;
833 	kvmppc_gsb_reset(gsb);
834 	rc = kvmppc_gsm_fill_info(gsm, gsb);
835 	if (rc < 0) {
836 		pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
837 		return rc;
838 	}
839 
840 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
841 	if (rc < 0)
842 		return rc;
843 	return 0;
844 }
845 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
846 
847 /**
848  * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
849  * L0 host
850  * @lpid: guest id
851  * @dw0: partition table double word
852  * @dw1: process table double word
853  */
854 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
855 {
856 	struct kvmppc_gs_part_table patbl;
857 	struct kvmppc_gs_proc_table prtbl;
858 	struct kvmppc_gs_buff *gsb;
859 	size_t size;
860 	int rc;
861 
862 	size = kvmppc_gse_total_size(
863 		       kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
864 	       kvmppc_gse_total_size(
865 		       kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
866 	       sizeof(struct kvmppc_gs_header);
867 	gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
868 	if (!gsb)
869 		return -ENOMEM;
870 
871 	patbl.address = dw0 & RPDB_MASK;
872 	patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
873 			  ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
874 			 31);
875 	patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
876 	rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
877 	if (rc < 0)
878 		goto free_gsb;
879 
880 	prtbl.address = dw1 & PRTB_MASK;
881 	prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
882 	rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
883 	if (rc < 0)
884 		goto free_gsb;
885 
886 	rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
887 	if (rc < 0) {
888 		pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
889 		goto free_gsb;
890 	}
891 
892 	kvmppc_gsb_free(gsb);
893 	return 0;
894 
895 free_gsb:
896 	kvmppc_gsb_free(gsb);
897 	return rc;
898 }
899 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
900 
901 /**
902  * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
903  * @vcpu: vcpu
904  * @vpa: L1 logical real address
905  */
906 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
907 {
908 	struct kvmhv_nestedv2_io *io;
909 	struct kvmppc_gs_buff *gsb;
910 	int rc = 0;
911 
912 	io = &vcpu->arch.nestedv2_io;
913 	gsb = io->vcpu_run_input;
914 
915 	kvmppc_gsb_reset(gsb);
916 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
917 	if (rc < 0)
918 		goto out;
919 
920 	rc = kvmppc_gsb_send(gsb, 0);
921 	if (rc < 0)
922 		pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
923 
924 out:
925 	kvmppc_gsb_reset(gsb);
926 	return rc;
927 }
928 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
929 
930 /**
931  * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
932  * @vcpu: vcpu
933  *
934  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
935  */
936 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
937 {
938 	struct kvmhv_nestedv2_io *io;
939 	struct kvmppc_gs_buff *gsb;
940 	struct kvmppc_gs_msg gsm;
941 
942 	io = &vcpu->arch.nestedv2_io;
943 	gsb = io->vcpu_run_output;
944 
945 	vcpu->arch.fault_dar = 0;
946 	vcpu->arch.fault_dsisr = 0;
947 	vcpu->arch.fault_gpa = 0;
948 	vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
949 
950 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
951 	return kvmppc_gsm_refresh_info(&gsm, gsb);
952 }
953 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
954 
955 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
956 				     struct kvmhv_nestedv2_io *io)
957 {
958 	kvmppc_gsm_free(io->vcpu_message);
959 	kvmppc_gsm_free(io->vcore_message);
960 	kvmppc_gsb_free(io->vcpu_run_input);
961 	kvmppc_gsb_free(io->vcpu_run_output);
962 }
963 
964 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
965 {
966 	struct kvmhv_nestedv2_io *io;
967 	struct kvmppc_gs_bitmap *valids;
968 	struct kvmppc_gs_buff *gsb;
969 	struct kvmppc_gs_msg gsm;
970 	int rc = 0;
971 
972 
973 	io = &vcpu->arch.nestedv2_io;
974 	valids = &io->valids;
975 
976 	gsb = io->vcpu_run_input;
977 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
978 
979 	for (int i = 0; i < 32; i++) {
980 		if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
981 			kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
982 	}
983 
984 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
985 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
986 
987 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
988 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
989 
990 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
991 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
992 
993 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
994 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
995 
996 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
997 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
998 
999 	rc = kvmppc_gsb_receive_data(gsb, &gsm);
1000 	if (rc < 0)
1001 		pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
1002 
1003 	return rc;
1004 }
1005 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
1006 
1007 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
1008 				       struct pt_regs *regs)
1009 {
1010 	for (int i = 0; i < 32; i++)
1011 		kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
1012 
1013 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
1014 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
1015 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
1016 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
1017 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
1018 
1019 	return 0;
1020 }
1021 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
1022 
1023 /**
1024  * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
1025  * @vcpu: vcpu
1026  * @io: NESTEDv2 nested io state
1027  *
1028  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
1029  */
1030 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
1031 			       struct kvmhv_nestedv2_io *io)
1032 {
1033 	long rc;
1034 
1035 	rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
1036 
1037 	if (rc != H_SUCCESS) {
1038 		pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
1039 		switch (rc) {
1040 		case H_NOT_ENOUGH_RESOURCES:
1041 		case H_ABORTED:
1042 			return -ENOMEM;
1043 		case H_AUTHORITY:
1044 			return -EPERM;
1045 		default:
1046 			return -EINVAL;
1047 		}
1048 	}
1049 
1050 	rc = kvmhv_nestedv2_host_create(vcpu, io);
1051 
1052 	return rc;
1053 }
1054 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
1055 
1056 /**
1057  * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
1058  * @vcpu: vcpu
1059  * @io: NESTEDv2 nested io state
1060  */
1061 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
1062 			      struct kvmhv_nestedv2_io *io)
1063 {
1064 	kvmhv_nestedv2_host_free(vcpu, io);
1065 }
1066 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
1067