xref: /linux/arch/powerpc/kvm/book3s_hv_nestedv2.c (revision 3f41368fbfe1b3d5922d317fe1a0a0cab6846802)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
4  *
5  * Authors:
6  *    Jordan Niethe <jniethe5@gmail.com>
7  *
8  * Description: KVM functions specific to running on Book 3S
9  * processors as a NESTEDv2 guest.
10  *
11  */
12 
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
20 
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
25 #include <asm/reg.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
28 #include "trace_hv.h"
29 
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
32 
33 
34 static size_t
35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
36 {
37 	u16 ids[] = {
38 		KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 		KVMPPC_GSID_RUN_INPUT,
40 		KVMPPC_GSID_RUN_OUTPUT,
41 
42 	};
43 	size_t size = 0;
44 
45 	for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 		size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
47 	return size;
48 }
49 
50 static int
51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 					   struct kvmppc_gs_msg *gsm)
53 {
54 	struct kvmhv_nestedv2_config *cfg;
55 	int rc;
56 
57 	cfg = gsm->data;
58 
59 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 		rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 					cfg->vcpu_run_output_size);
62 		if (rc < 0)
63 			return rc;
64 	}
65 
66 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 					      cfg->vcpu_run_input_cfg);
69 		if (rc < 0)
70 			return rc;
71 	}
72 
73 	if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 		rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 					      cfg->vcpu_run_output_cfg);
76 		if (rc < 0)
77 			return rc;
78 	}
79 
80 	return 0;
81 }
82 
83 static int
84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 					      struct kvmppc_gs_buff *gsb)
86 {
87 	struct kvmhv_nestedv2_config *cfg;
88 	struct kvmppc_gs_parser gsp = { 0 };
89 	struct kvmppc_gs_elem *gse;
90 	int rc;
91 
92 	cfg = gsm->data;
93 
94 	rc = kvmppc_gse_parse(&gsp, gsb);
95 	if (rc < 0)
96 		return rc;
97 
98 	gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
99 	if (gse)
100 		cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
101 	return 0;
102 }
103 
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 	.get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 	.fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 	.refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
108 };
109 
110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
111 {
112 	struct kvmppc_gs_bitmap gsbm = { 0 };
113 	size_t size = 0;
114 	u16 iden;
115 
116 	kvmppc_gsbm_fill(&gsbm);
117 	kvmppc_gsbm_for_each(&gsbm, iden)
118 	{
119 		switch (iden) {
120 		case KVMPPC_GSID_HOST_STATE_SIZE:
121 		case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 		case KVMPPC_GSID_PARTITION_TABLE:
123 		case KVMPPC_GSID_PROCESS_TABLE:
124 		case KVMPPC_GSID_RUN_INPUT:
125 		case KVMPPC_GSID_RUN_OUTPUT:
126 			break;
127 		default:
128 			size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
129 		}
130 	}
131 	return size;
132 }
133 
134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
135 				     struct kvmppc_gs_msg *gsm)
136 {
137 	struct kvm_vcpu *vcpu;
138 	vector128 v;
139 	int rc, i;
140 	u16 iden;
141 	u32 arch_compat = 0;
142 
143 	vcpu = gsm->data;
144 
145 	kvmppc_gsm_for_each(gsm, iden)
146 	{
147 		rc = 0;
148 
149 		if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
150 		    (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
151 			continue;
152 
153 		switch (iden) {
154 		case KVMPPC_GSID_DSCR:
155 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
156 			break;
157 		case KVMPPC_GSID_MMCRA:
158 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
159 			break;
160 		case KVMPPC_GSID_HFSCR:
161 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
162 			break;
163 		case KVMPPC_GSID_PURR:
164 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
165 			break;
166 		case KVMPPC_GSID_SPURR:
167 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
168 			break;
169 		case KVMPPC_GSID_AMR:
170 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
171 			break;
172 		case KVMPPC_GSID_UAMOR:
173 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
174 			break;
175 		case KVMPPC_GSID_SIAR:
176 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
177 			break;
178 		case KVMPPC_GSID_SDAR:
179 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
180 			break;
181 		case KVMPPC_GSID_IAMR:
182 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
183 			break;
184 		case KVMPPC_GSID_DAWR0:
185 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
186 			break;
187 		case KVMPPC_GSID_DAWR1:
188 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
189 			break;
190 		case KVMPPC_GSID_DAWRX0:
191 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
192 			break;
193 		case KVMPPC_GSID_DAWRX1:
194 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
195 			break;
196 		case KVMPPC_GSID_CIABR:
197 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
198 			break;
199 		case KVMPPC_GSID_WORT:
200 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
201 			break;
202 		case KVMPPC_GSID_PPR:
203 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
204 			break;
205 		case KVMPPC_GSID_PSPB:
206 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
207 			break;
208 		case KVMPPC_GSID_TAR:
209 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
210 			break;
211 		case KVMPPC_GSID_FSCR:
212 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
213 			break;
214 		case KVMPPC_GSID_EBBHR:
215 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
216 			break;
217 		case KVMPPC_GSID_EBBRR:
218 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
219 			break;
220 		case KVMPPC_GSID_BESCR:
221 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
222 			break;
223 		case KVMPPC_GSID_IC:
224 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
225 			break;
226 		case KVMPPC_GSID_CTRL:
227 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
228 			break;
229 		case KVMPPC_GSID_PIDR:
230 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
231 			break;
232 		case KVMPPC_GSID_AMOR: {
233 			u64 amor = ~0;
234 
235 			rc = kvmppc_gse_put_u64(gsb, iden, amor);
236 			break;
237 		}
238 		case KVMPPC_GSID_VRSAVE:
239 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
240 			break;
241 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
242 			i = iden - KVMPPC_GSID_MMCR(0);
243 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
244 			break;
245 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
246 			i = iden - KVMPPC_GSID_SIER(0);
247 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
248 			break;
249 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
250 			i = iden - KVMPPC_GSID_PMC(0);
251 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
252 			break;
253 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
254 			i = iden - KVMPPC_GSID_GPR(0);
255 			rc = kvmppc_gse_put_u64(gsb, iden,
256 						vcpu->arch.regs.gpr[i]);
257 			break;
258 		case KVMPPC_GSID_CR:
259 			rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
260 			break;
261 		case KVMPPC_GSID_XER:
262 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
263 			break;
264 		case KVMPPC_GSID_CTR:
265 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
266 			break;
267 		case KVMPPC_GSID_LR:
268 			rc = kvmppc_gse_put_u64(gsb, iden,
269 						vcpu->arch.regs.link);
270 			break;
271 		case KVMPPC_GSID_NIA:
272 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
273 			break;
274 		case KVMPPC_GSID_SRR0:
275 			rc = kvmppc_gse_put_u64(gsb, iden,
276 						vcpu->arch.shregs.srr0);
277 			break;
278 		case KVMPPC_GSID_SRR1:
279 			rc = kvmppc_gse_put_u64(gsb, iden,
280 						vcpu->arch.shregs.srr1);
281 			break;
282 		case KVMPPC_GSID_SPRG0:
283 			rc = kvmppc_gse_put_u64(gsb, iden,
284 						vcpu->arch.shregs.sprg0);
285 			break;
286 		case KVMPPC_GSID_SPRG1:
287 			rc = kvmppc_gse_put_u64(gsb, iden,
288 						vcpu->arch.shregs.sprg1);
289 			break;
290 		case KVMPPC_GSID_SPRG2:
291 			rc = kvmppc_gse_put_u64(gsb, iden,
292 						vcpu->arch.shregs.sprg2);
293 			break;
294 		case KVMPPC_GSID_SPRG3:
295 			rc = kvmppc_gse_put_u64(gsb, iden,
296 						vcpu->arch.shregs.sprg3);
297 			break;
298 		case KVMPPC_GSID_DAR:
299 			rc = kvmppc_gse_put_u64(gsb, iden,
300 						vcpu->arch.shregs.dar);
301 			break;
302 		case KVMPPC_GSID_DSISR:
303 			rc = kvmppc_gse_put_u32(gsb, iden,
304 						vcpu->arch.shregs.dsisr);
305 			break;
306 		case KVMPPC_GSID_MSR:
307 			rc = kvmppc_gse_put_u64(gsb, iden,
308 						vcpu->arch.shregs.msr);
309 			break;
310 		case KVMPPC_GSID_VTB:
311 			rc = kvmppc_gse_put_u64(gsb, iden,
312 						vcpu->arch.vcore->vtb);
313 			break;
314 		case KVMPPC_GSID_LPCR:
315 			rc = kvmppc_gse_put_u64(gsb, iden,
316 						vcpu->arch.vcore->lpcr);
317 			break;
318 		case KVMPPC_GSID_TB_OFFSET:
319 			rc = kvmppc_gse_put_u64(gsb, iden,
320 						vcpu->arch.vcore->tb_offset);
321 			break;
322 		case KVMPPC_GSID_FPSCR:
323 			rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
324 			break;
325 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
326 			i = iden - KVMPPC_GSID_VSRS(0);
327 			memcpy(&v, &vcpu->arch.fp.fpr[i],
328 			       sizeof(vcpu->arch.fp.fpr[i]));
329 			rc = kvmppc_gse_put_vector128(gsb, iden, &v);
330 			break;
331 #ifdef CONFIG_VSX
332 		case KVMPPC_GSID_VSCR:
333 			rc = kvmppc_gse_put_u32(gsb, iden,
334 						vcpu->arch.vr.vscr.u[3]);
335 			break;
336 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
337 			i = iden - KVMPPC_GSID_VSRS(32);
338 			rc = kvmppc_gse_put_vector128(gsb, iden,
339 						      &vcpu->arch.vr.vr[i]);
340 			break;
341 #endif
342 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
343 			u64 dw;
344 
345 			dw = vcpu->arch.dec_expires -
346 			     vcpu->arch.vcore->tb_offset;
347 			rc = kvmppc_gse_put_u64(gsb, iden, dw);
348 			break;
349 		}
350 		case KVMPPC_GSID_LOGICAL_PVR:
351 			/*
352 			 * Though 'arch_compat == 0' would mean the default
353 			 * compatibility, arch_compat, being a Guest Wide
354 			 * Element, cannot be filled with a value of 0 in GSB
355 			 * as this would result into a kernel trap.
356 			 * Hence, when `arch_compat == 0`, arch_compat should
357 			 * default to L1's PVR.
358 			 */
359 			if (!vcpu->arch.vcore->arch_compat) {
360 				if (cpu_has_feature(CPU_FTR_ARCH_31))
361 					arch_compat = PVR_ARCH_31;
362 				else if (cpu_has_feature(CPU_FTR_ARCH_300))
363 					arch_compat = PVR_ARCH_300;
364 			} else {
365 				arch_compat = vcpu->arch.vcore->arch_compat;
366 			}
367 			rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
368 			break;
369 		}
370 
371 		if (rc < 0)
372 			return rc;
373 	}
374 
375 	return 0;
376 }
377 
378 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
379 					struct kvmppc_gs_buff *gsb)
380 {
381 	struct kvmppc_gs_parser gsp = { 0 };
382 	struct kvmhv_nestedv2_io *io;
383 	struct kvmppc_gs_bitmap *valids;
384 	struct kvm_vcpu *vcpu;
385 	struct kvmppc_gs_elem *gse;
386 	vector128 v;
387 	int rc, i;
388 	u16 iden;
389 
390 	vcpu = gsm->data;
391 
392 	rc = kvmppc_gse_parse(&gsp, gsb);
393 	if (rc < 0)
394 		return rc;
395 
396 	io = &vcpu->arch.nestedv2_io;
397 	valids = &io->valids;
398 
399 	kvmppc_gsp_for_each(&gsp, iden, gse)
400 	{
401 		switch (iden) {
402 		case KVMPPC_GSID_DSCR:
403 			vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
404 			break;
405 		case KVMPPC_GSID_MMCRA:
406 			vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
407 			break;
408 		case KVMPPC_GSID_HFSCR:
409 			vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
410 			break;
411 		case KVMPPC_GSID_PURR:
412 			vcpu->arch.purr = kvmppc_gse_get_u64(gse);
413 			break;
414 		case KVMPPC_GSID_SPURR:
415 			vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
416 			break;
417 		case KVMPPC_GSID_AMR:
418 			vcpu->arch.amr = kvmppc_gse_get_u64(gse);
419 			break;
420 		case KVMPPC_GSID_UAMOR:
421 			vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
422 			break;
423 		case KVMPPC_GSID_SIAR:
424 			vcpu->arch.siar = kvmppc_gse_get_u64(gse);
425 			break;
426 		case KVMPPC_GSID_SDAR:
427 			vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
428 			break;
429 		case KVMPPC_GSID_IAMR:
430 			vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
431 			break;
432 		case KVMPPC_GSID_DAWR0:
433 			vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
434 			break;
435 		case KVMPPC_GSID_DAWR1:
436 			vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
437 			break;
438 		case KVMPPC_GSID_DAWRX0:
439 			vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
440 			break;
441 		case KVMPPC_GSID_DAWRX1:
442 			vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
443 			break;
444 		case KVMPPC_GSID_CIABR:
445 			vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
446 			break;
447 		case KVMPPC_GSID_WORT:
448 			vcpu->arch.wort = kvmppc_gse_get_u32(gse);
449 			break;
450 		case KVMPPC_GSID_PPR:
451 			vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
452 			break;
453 		case KVMPPC_GSID_PSPB:
454 			vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
455 			break;
456 		case KVMPPC_GSID_TAR:
457 			vcpu->arch.tar = kvmppc_gse_get_u64(gse);
458 			break;
459 		case KVMPPC_GSID_FSCR:
460 			vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
461 			break;
462 		case KVMPPC_GSID_EBBHR:
463 			vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
464 			break;
465 		case KVMPPC_GSID_EBBRR:
466 			vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
467 			break;
468 		case KVMPPC_GSID_BESCR:
469 			vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
470 			break;
471 		case KVMPPC_GSID_IC:
472 			vcpu->arch.ic = kvmppc_gse_get_u64(gse);
473 			break;
474 		case KVMPPC_GSID_CTRL:
475 			vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
476 			break;
477 		case KVMPPC_GSID_PIDR:
478 			vcpu->arch.pid = kvmppc_gse_get_u32(gse);
479 			break;
480 		case KVMPPC_GSID_AMOR:
481 			break;
482 		case KVMPPC_GSID_VRSAVE:
483 			vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
484 			break;
485 		case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
486 			i = iden - KVMPPC_GSID_MMCR(0);
487 			vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
488 			break;
489 		case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
490 			i = iden - KVMPPC_GSID_SIER(0);
491 			vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
492 			break;
493 		case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
494 			i = iden - KVMPPC_GSID_PMC(0);
495 			vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
496 			break;
497 		case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
498 			i = iden - KVMPPC_GSID_GPR(0);
499 			vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
500 			break;
501 		case KVMPPC_GSID_CR:
502 			vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
503 			break;
504 		case KVMPPC_GSID_XER:
505 			vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
506 			break;
507 		case KVMPPC_GSID_CTR:
508 			vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
509 			break;
510 		case KVMPPC_GSID_LR:
511 			vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
512 			break;
513 		case KVMPPC_GSID_NIA:
514 			vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
515 			break;
516 		case KVMPPC_GSID_SRR0:
517 			vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
518 			break;
519 		case KVMPPC_GSID_SRR1:
520 			vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
521 			break;
522 		case KVMPPC_GSID_SPRG0:
523 			vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
524 			break;
525 		case KVMPPC_GSID_SPRG1:
526 			vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
527 			break;
528 		case KVMPPC_GSID_SPRG2:
529 			vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
530 			break;
531 		case KVMPPC_GSID_SPRG3:
532 			vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
533 			break;
534 		case KVMPPC_GSID_DAR:
535 			vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
536 			break;
537 		case KVMPPC_GSID_DSISR:
538 			vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
539 			break;
540 		case KVMPPC_GSID_MSR:
541 			vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
542 			break;
543 		case KVMPPC_GSID_VTB:
544 			vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
545 			break;
546 		case KVMPPC_GSID_LPCR:
547 			vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
548 			break;
549 		case KVMPPC_GSID_TB_OFFSET:
550 			vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
551 			break;
552 		case KVMPPC_GSID_FPSCR:
553 			vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
554 			break;
555 		case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
556 			kvmppc_gse_get_vector128(gse, &v);
557 			i = iden - KVMPPC_GSID_VSRS(0);
558 			memcpy(&vcpu->arch.fp.fpr[i], &v,
559 			       sizeof(vcpu->arch.fp.fpr[i]));
560 			break;
561 #ifdef CONFIG_VSX
562 		case KVMPPC_GSID_VSCR:
563 			vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
564 			break;
565 		case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
566 			i = iden - KVMPPC_GSID_VSRS(32);
567 			kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
568 			break;
569 #endif
570 		case KVMPPC_GSID_HDAR:
571 			vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
572 			break;
573 		case KVMPPC_GSID_HDSISR:
574 			vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
575 			break;
576 		case KVMPPC_GSID_ASDR:
577 			vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
578 			break;
579 		case KVMPPC_GSID_HEIR:
580 			vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
581 			break;
582 		case KVMPPC_GSID_DEC_EXPIRY_TB: {
583 			u64 dw;
584 
585 			dw = kvmppc_gse_get_u64(gse);
586 			vcpu->arch.dec_expires =
587 				dw + vcpu->arch.vcore->tb_offset;
588 			break;
589 		}
590 		case KVMPPC_GSID_LOGICAL_PVR:
591 			vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
592 			break;
593 		default:
594 			continue;
595 		}
596 		kvmppc_gsbm_set(valids, iden);
597 	}
598 
599 	return 0;
600 }
601 
602 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
603 	.get_size = gs_msg_ops_vcpu_get_size,
604 	.fill_info = gs_msg_ops_vcpu_fill_info,
605 	.refresh_info = gs_msg_ops_vcpu_refresh_info,
606 };
607 
608 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
609 				      struct kvmhv_nestedv2_io *io)
610 {
611 	struct kvmhv_nestedv2_config *cfg;
612 	struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
613 	unsigned long guest_id, vcpu_id;
614 	struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
615 	int rc;
616 
617 	cfg = &io->cfg;
618 	guest_id = vcpu->kvm->arch.lpid;
619 	vcpu_id = vcpu->vcpu_id;
620 
621 	gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
622 			     GFP_KERNEL);
623 	if (!gsm) {
624 		rc = -ENOMEM;
625 		goto err;
626 	}
627 
628 	gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
629 			     GFP_KERNEL);
630 	if (!gsb) {
631 		rc = -ENOMEM;
632 		goto free_gsm;
633 	}
634 
635 	rc = kvmppc_gsb_receive_datum(gsb, gsm,
636 				      KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
637 	if (rc < 0) {
638 		pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
639 		goto free_gsb;
640 	}
641 
642 	vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
643 					 vcpu_id, GFP_KERNEL);
644 	if (!vcpu_run_output) {
645 		rc = -ENOMEM;
646 		goto free_gsb;
647 	}
648 
649 	cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
650 	cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
651 	io->vcpu_run_output = vcpu_run_output;
652 
653 	gsm->flags = 0;
654 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
655 	if (rc < 0) {
656 		pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
657 		goto free_gs_out;
658 	}
659 
660 	vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
661 	if (!vcpu_message) {
662 		rc = -ENOMEM;
663 		goto free_gs_out;
664 	}
665 	kvmppc_gsm_include_all(vcpu_message);
666 
667 	io->vcpu_message = vcpu_message;
668 
669 	vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
670 					vcpu_id, GFP_KERNEL);
671 	if (!vcpu_run_input) {
672 		rc = -ENOMEM;
673 		goto free_vcpu_message;
674 	}
675 
676 	io->vcpu_run_input = vcpu_run_input;
677 	cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
678 	cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
679 	rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
680 	if (rc < 0) {
681 		pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
682 		goto free_vcpu_run_input;
683 	}
684 
685 	vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
686 				       KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
687 	if (!vcore_message) {
688 		rc = -ENOMEM;
689 		goto free_vcpu_run_input;
690 	}
691 
692 	kvmppc_gsm_include_all(vcore_message);
693 	kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
694 	io->vcore_message = vcore_message;
695 
696 	kvmppc_gsbm_fill(&io->valids);
697 	kvmppc_gsm_free(gsm);
698 	kvmppc_gsb_free(gsb);
699 	return 0;
700 
701 free_vcpu_run_input:
702 	kvmppc_gsb_free(vcpu_run_input);
703 free_vcpu_message:
704 	kvmppc_gsm_free(vcpu_message);
705 free_gs_out:
706 	kvmppc_gsb_free(vcpu_run_output);
707 free_gsb:
708 	kvmppc_gsb_free(gsb);
709 free_gsm:
710 	kvmppc_gsm_free(gsm);
711 err:
712 	return rc;
713 }
714 
715 /**
716  * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
717  * @vcpu: vcpu
718  * @iden: guest state ID
719  *
720  * Mark a guest state ID as having been changed by the L1 host and thus
721  * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
722  */
723 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
724 {
725 	struct kvmhv_nestedv2_io *io;
726 	struct kvmppc_gs_bitmap *valids;
727 	struct kvmppc_gs_msg *gsm;
728 
729 	if (!iden)
730 		return 0;
731 
732 	io = &vcpu->arch.nestedv2_io;
733 	valids = &io->valids;
734 	gsm = io->vcpu_message;
735 	kvmppc_gsm_include(gsm, iden);
736 	gsm = io->vcore_message;
737 	kvmppc_gsm_include(gsm, iden);
738 	kvmppc_gsbm_set(valids, iden);
739 	return 0;
740 }
741 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
742 
743 /**
744  * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
745  * @vcpu: vcpu
746  * @iden: guest state ID
747  *
748  * Reload the value for the guest state ID from the L0 host into the L1 host.
749  * This is cached so that going out to the L0 host only happens if necessary.
750  */
751 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
752 {
753 	struct kvmhv_nestedv2_io *io;
754 	struct kvmppc_gs_bitmap *valids;
755 	struct kvmppc_gs_buff *gsb;
756 	struct kvmppc_gs_msg gsm;
757 	int rc;
758 
759 	if (!iden)
760 		return 0;
761 
762 	io = &vcpu->arch.nestedv2_io;
763 	valids = &io->valids;
764 	if (kvmppc_gsbm_test(valids, iden))
765 		return 0;
766 
767 	gsb = io->vcpu_run_input;
768 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
769 	rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
770 	if (rc < 0) {
771 		pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
772 		return rc;
773 	}
774 	return 0;
775 }
776 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
777 
778 /**
779  * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
780  * @vcpu: vcpu
781  * @time_limit: hdec expiry tb
782  *
783  * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
784  * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
785  * wide values need to be sent with H_GUEST_SET first.
786  *
787  * The hdec tb offset is always sent to L0 host.
788  */
789 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
790 {
791 	struct kvmhv_nestedv2_io *io;
792 	struct kvmppc_gs_buff *gsb;
793 	struct kvmppc_gs_msg *gsm;
794 	int rc;
795 
796 	io = &vcpu->arch.nestedv2_io;
797 	gsb = io->vcpu_run_input;
798 	gsm = io->vcore_message;
799 	rc = kvmppc_gsb_send_data(gsb, gsm);
800 	if (rc < 0) {
801 		pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
802 		return rc;
803 	}
804 
805 	gsm = io->vcpu_message;
806 	kvmppc_gsb_reset(gsb);
807 	rc = kvmppc_gsm_fill_info(gsm, gsb);
808 	if (rc < 0) {
809 		pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
810 		return rc;
811 	}
812 
813 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
814 	if (rc < 0)
815 		return rc;
816 	return 0;
817 }
818 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
819 
820 /**
821  * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
822  * L0 host
823  * @lpid: guest id
824  * @dw0: partition table double word
825  * @dw1: process table double word
826  */
827 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
828 {
829 	struct kvmppc_gs_part_table patbl;
830 	struct kvmppc_gs_proc_table prtbl;
831 	struct kvmppc_gs_buff *gsb;
832 	size_t size;
833 	int rc;
834 
835 	size = kvmppc_gse_total_size(
836 		       kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
837 	       kvmppc_gse_total_size(
838 		       kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
839 	       sizeof(struct kvmppc_gs_header);
840 	gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
841 	if (!gsb)
842 		return -ENOMEM;
843 
844 	patbl.address = dw0 & RPDB_MASK;
845 	patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
846 			  ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
847 			 31);
848 	patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
849 	rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
850 	if (rc < 0)
851 		goto free_gsb;
852 
853 	prtbl.address = dw1 & PRTB_MASK;
854 	prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
855 	rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
856 	if (rc < 0)
857 		goto free_gsb;
858 
859 	rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
860 	if (rc < 0) {
861 		pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
862 		goto free_gsb;
863 	}
864 
865 	kvmppc_gsb_free(gsb);
866 	return 0;
867 
868 free_gsb:
869 	kvmppc_gsb_free(gsb);
870 	return rc;
871 }
872 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
873 
874 /**
875  * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
876  * @vcpu: vcpu
877  * @vpa: L1 logical real address
878  */
879 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
880 {
881 	struct kvmhv_nestedv2_io *io;
882 	struct kvmppc_gs_buff *gsb;
883 	int rc = 0;
884 
885 	io = &vcpu->arch.nestedv2_io;
886 	gsb = io->vcpu_run_input;
887 
888 	kvmppc_gsb_reset(gsb);
889 	rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
890 	if (rc < 0)
891 		goto out;
892 
893 	rc = kvmppc_gsb_send(gsb, 0);
894 	if (rc < 0)
895 		pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
896 
897 out:
898 	kvmppc_gsb_reset(gsb);
899 	return rc;
900 }
901 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
902 
903 /**
904  * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
905  * @vcpu: vcpu
906  *
907  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
908  */
909 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
910 {
911 	struct kvmhv_nestedv2_io *io;
912 	struct kvmppc_gs_buff *gsb;
913 	struct kvmppc_gs_msg gsm;
914 
915 	io = &vcpu->arch.nestedv2_io;
916 	gsb = io->vcpu_run_output;
917 
918 	vcpu->arch.fault_dar = 0;
919 	vcpu->arch.fault_dsisr = 0;
920 	vcpu->arch.fault_gpa = 0;
921 	vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
922 
923 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
924 	return kvmppc_gsm_refresh_info(&gsm, gsb);
925 }
926 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
927 
928 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
929 				     struct kvmhv_nestedv2_io *io)
930 {
931 	kvmppc_gsm_free(io->vcpu_message);
932 	kvmppc_gsm_free(io->vcore_message);
933 	kvmppc_gsb_free(io->vcpu_run_input);
934 	kvmppc_gsb_free(io->vcpu_run_output);
935 }
936 
937 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
938 {
939 	struct kvmhv_nestedv2_io *io;
940 	struct kvmppc_gs_bitmap *valids;
941 	struct kvmppc_gs_buff *gsb;
942 	struct kvmppc_gs_msg gsm;
943 	int rc = 0;
944 
945 
946 	io = &vcpu->arch.nestedv2_io;
947 	valids = &io->valids;
948 
949 	gsb = io->vcpu_run_input;
950 	kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
951 
952 	for (int i = 0; i < 32; i++) {
953 		if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
954 			kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
955 	}
956 
957 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
958 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
959 
960 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
961 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
962 
963 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
964 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
965 
966 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
967 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
968 
969 	if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
970 		kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
971 
972 	rc = kvmppc_gsb_receive_data(gsb, &gsm);
973 	if (rc < 0)
974 		pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
975 
976 	return rc;
977 }
978 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
979 
980 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
981 				       struct pt_regs *regs)
982 {
983 	for (int i = 0; i < 32; i++)
984 		kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
985 
986 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
987 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
988 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
989 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
990 	kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
991 
992 	return 0;
993 }
994 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
995 
996 /**
997  * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
998  * @vcpu: vcpu
999  * @io: NESTEDv2 nested io state
1000  *
1001  * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
1002  */
1003 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
1004 			       struct kvmhv_nestedv2_io *io)
1005 {
1006 	long rc;
1007 
1008 	rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
1009 
1010 	if (rc != H_SUCCESS) {
1011 		pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
1012 		switch (rc) {
1013 		case H_NOT_ENOUGH_RESOURCES:
1014 		case H_ABORTED:
1015 			return -ENOMEM;
1016 		case H_AUTHORITY:
1017 			return -EPERM;
1018 		default:
1019 			return -EINVAL;
1020 		}
1021 	}
1022 
1023 	rc = kvmhv_nestedv2_host_create(vcpu, io);
1024 
1025 	return rc;
1026 }
1027 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
1028 
1029 /**
1030  * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
1031  * @vcpu: vcpu
1032  * @io: NESTEDv2 nested io state
1033  */
1034 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
1035 			      struct kvmhv_nestedv2_io *io)
1036 {
1037 	kvmhv_nestedv2_host_free(vcpu, io);
1038 }
1039 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
1040