1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com>
4 *
5 * Authors:
6 * Jordan Niethe <jniethe5@gmail.com>
7 *
8 * Description: KVM functions specific to running on Book 3S
9 * processors as a NESTEDv2 guest.
10 *
11 */
12
13 #include "linux/blk-mq.h"
14 #include "linux/console.h"
15 #include "linux/gfp_types.h"
16 #include "linux/signal.h"
17 #include <linux/kernel.h>
18 #include <linux/kvm_host.h>
19 #include <linux/pgtable.h>
20
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23 #include <asm/hvcall.h>
24 #include <asm/pgalloc.h>
25 #include <asm/reg.h>
26 #include <asm/plpar_wrappers.h>
27 #include <asm/guest-state-buffer.h>
28 #include "trace_hv.h"
29
30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly;
31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2);
32
33
34 static size_t
gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg * gsm)35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm)
36 {
37 u16 ids[] = {
38 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
39 KVMPPC_GSID_RUN_INPUT,
40 KVMPPC_GSID_RUN_OUTPUT,
41
42 };
43 size_t size = 0;
44
45 for (int i = 0; i < ARRAY_SIZE(ids); i++)
46 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i]));
47 return size;
48 }
49
50 static int
gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb,
52 struct kvmppc_gs_msg *gsm)
53 {
54 struct kvmhv_nestedv2_config *cfg;
55 int rc;
56
57 cfg = gsm->data;
58
59 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) {
60 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE,
61 cfg->vcpu_run_output_size);
62 if (rc < 0)
63 return rc;
64 }
65
66 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) {
67 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT,
68 cfg->vcpu_run_input_cfg);
69 if (rc < 0)
70 return rc;
71 }
72
73 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) {
74 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT,
75 cfg->vcpu_run_output_cfg);
76 if (rc < 0)
77 return rc;
78 }
79
80 return 0;
81 }
82
83 static int
gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm,
85 struct kvmppc_gs_buff *gsb)
86 {
87 struct kvmhv_nestedv2_config *cfg;
88 struct kvmppc_gs_parser gsp = { 0 };
89 struct kvmppc_gs_elem *gse;
90 int rc;
91
92 cfg = gsm->data;
93
94 rc = kvmppc_gse_parse(&gsp, gsb);
95 if (rc < 0)
96 return rc;
97
98 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
99 if (gse)
100 cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse);
101 return 0;
102 }
103
104 static struct kvmppc_gs_msg_ops config_msg_ops = {
105 .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size,
106 .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info,
107 .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info,
108 };
109
gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg * gsm)110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm)
111 {
112 struct kvmppc_gs_bitmap gsbm = { 0 };
113 size_t size = 0;
114 u16 iden;
115
116 kvmppc_gsbm_fill(&gsbm);
117 kvmppc_gsbm_for_each(&gsbm, iden)
118 {
119 switch (iden) {
120 case KVMPPC_GSID_HOST_STATE_SIZE:
121 case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE:
122 case KVMPPC_GSID_PARTITION_TABLE:
123 case KVMPPC_GSID_PROCESS_TABLE:
124 case KVMPPC_GSID_RUN_INPUT:
125 case KVMPPC_GSID_RUN_OUTPUT:
126 break;
127 default:
128 size += kvmppc_gse_total_size(kvmppc_gsid_size(iden));
129 }
130 }
131 return size;
132 }
133
gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff * gsb,struct kvmppc_gs_msg * gsm)134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb,
135 struct kvmppc_gs_msg *gsm)
136 {
137 struct kvm_vcpu *vcpu;
138 vector128 v;
139 int rc, i;
140 u16 iden;
141 u32 arch_compat = 0;
142
143 vcpu = gsm->data;
144
145 kvmppc_gsm_for_each(gsm, iden)
146 {
147 rc = 0;
148
149 if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) !=
150 (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE))
151 continue;
152
153 switch (iden) {
154 case KVMPPC_GSID_DSCR:
155 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr);
156 break;
157 case KVMPPC_GSID_MMCRA:
158 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra);
159 break;
160 case KVMPPC_GSID_HFSCR:
161 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr);
162 break;
163 case KVMPPC_GSID_PURR:
164 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr);
165 break;
166 case KVMPPC_GSID_SPURR:
167 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr);
168 break;
169 case KVMPPC_GSID_AMR:
170 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr);
171 break;
172 case KVMPPC_GSID_UAMOR:
173 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor);
174 break;
175 case KVMPPC_GSID_SIAR:
176 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar);
177 break;
178 case KVMPPC_GSID_SDAR:
179 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar);
180 break;
181 case KVMPPC_GSID_IAMR:
182 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr);
183 break;
184 case KVMPPC_GSID_DAWR0:
185 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0);
186 break;
187 case KVMPPC_GSID_DAWR1:
188 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1);
189 break;
190 case KVMPPC_GSID_DAWRX0:
191 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0);
192 break;
193 case KVMPPC_GSID_DAWRX1:
194 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1);
195 break;
196 case KVMPPC_GSID_DEXCR:
197 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dexcr);
198 break;
199 case KVMPPC_GSID_HASHKEYR:
200 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashkeyr);
201 break;
202 case KVMPPC_GSID_HASHPKEYR:
203 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hashpkeyr);
204 break;
205 case KVMPPC_GSID_CIABR:
206 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr);
207 break;
208 case KVMPPC_GSID_WORT:
209 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort);
210 break;
211 case KVMPPC_GSID_PPR:
212 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr);
213 break;
214 case KVMPPC_GSID_PSPB:
215 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb);
216 break;
217 case KVMPPC_GSID_TAR:
218 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar);
219 break;
220 case KVMPPC_GSID_FSCR:
221 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr);
222 break;
223 case KVMPPC_GSID_EBBHR:
224 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr);
225 break;
226 case KVMPPC_GSID_EBBRR:
227 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr);
228 break;
229 case KVMPPC_GSID_BESCR:
230 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr);
231 break;
232 case KVMPPC_GSID_IC:
233 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic);
234 break;
235 case KVMPPC_GSID_CTRL:
236 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl);
237 break;
238 case KVMPPC_GSID_PIDR:
239 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid);
240 break;
241 case KVMPPC_GSID_AMOR: {
242 u64 amor = ~0;
243
244 rc = kvmppc_gse_put_u64(gsb, iden, amor);
245 break;
246 }
247 case KVMPPC_GSID_VRSAVE:
248 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave);
249 break;
250 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
251 i = iden - KVMPPC_GSID_MMCR(0);
252 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]);
253 break;
254 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
255 i = iden - KVMPPC_GSID_SIER(0);
256 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]);
257 break;
258 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
259 i = iden - KVMPPC_GSID_PMC(0);
260 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]);
261 break;
262 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
263 i = iden - KVMPPC_GSID_GPR(0);
264 rc = kvmppc_gse_put_u64(gsb, iden,
265 vcpu->arch.regs.gpr[i]);
266 break;
267 case KVMPPC_GSID_CR:
268 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr);
269 break;
270 case KVMPPC_GSID_XER:
271 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer);
272 break;
273 case KVMPPC_GSID_CTR:
274 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr);
275 break;
276 case KVMPPC_GSID_LR:
277 rc = kvmppc_gse_put_u64(gsb, iden,
278 vcpu->arch.regs.link);
279 break;
280 case KVMPPC_GSID_NIA:
281 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip);
282 break;
283 case KVMPPC_GSID_SRR0:
284 rc = kvmppc_gse_put_u64(gsb, iden,
285 vcpu->arch.shregs.srr0);
286 break;
287 case KVMPPC_GSID_SRR1:
288 rc = kvmppc_gse_put_u64(gsb, iden,
289 vcpu->arch.shregs.srr1);
290 break;
291 case KVMPPC_GSID_SPRG0:
292 rc = kvmppc_gse_put_u64(gsb, iden,
293 vcpu->arch.shregs.sprg0);
294 break;
295 case KVMPPC_GSID_SPRG1:
296 rc = kvmppc_gse_put_u64(gsb, iden,
297 vcpu->arch.shregs.sprg1);
298 break;
299 case KVMPPC_GSID_SPRG2:
300 rc = kvmppc_gse_put_u64(gsb, iden,
301 vcpu->arch.shregs.sprg2);
302 break;
303 case KVMPPC_GSID_SPRG3:
304 rc = kvmppc_gse_put_u64(gsb, iden,
305 vcpu->arch.shregs.sprg3);
306 break;
307 case KVMPPC_GSID_DAR:
308 rc = kvmppc_gse_put_u64(gsb, iden,
309 vcpu->arch.shregs.dar);
310 break;
311 case KVMPPC_GSID_DSISR:
312 rc = kvmppc_gse_put_u32(gsb, iden,
313 vcpu->arch.shregs.dsisr);
314 break;
315 case KVMPPC_GSID_MSR:
316 rc = kvmppc_gse_put_u64(gsb, iden,
317 vcpu->arch.shregs.msr);
318 break;
319 case KVMPPC_GSID_VTB:
320 rc = kvmppc_gse_put_u64(gsb, iden,
321 vcpu->arch.vcore->vtb);
322 break;
323 case KVMPPC_GSID_DPDES:
324 rc = kvmppc_gse_put_u64(gsb, iden,
325 vcpu->arch.vcore->dpdes);
326 break;
327 case KVMPPC_GSID_LPCR:
328 rc = kvmppc_gse_put_u64(gsb, iden,
329 vcpu->arch.vcore->lpcr);
330 break;
331 case KVMPPC_GSID_TB_OFFSET:
332 rc = kvmppc_gse_put_u64(gsb, iden,
333 vcpu->arch.vcore->tb_offset);
334 break;
335 case KVMPPC_GSID_FPSCR:
336 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr);
337 break;
338 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
339 i = iden - KVMPPC_GSID_VSRS(0);
340 memcpy(&v, &vcpu->arch.fp.fpr[i],
341 sizeof(vcpu->arch.fp.fpr[i]));
342 rc = kvmppc_gse_put_vector128(gsb, iden, &v);
343 break;
344 #ifdef CONFIG_VSX
345 case KVMPPC_GSID_VSCR:
346 rc = kvmppc_gse_put_u32(gsb, iden,
347 vcpu->arch.vr.vscr.u[3]);
348 break;
349 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
350 i = iden - KVMPPC_GSID_VSRS(32);
351 rc = kvmppc_gse_put_vector128(gsb, iden,
352 &vcpu->arch.vr.vr[i]);
353 break;
354 #endif
355 case KVMPPC_GSID_DEC_EXPIRY_TB: {
356 u64 dw;
357
358 dw = vcpu->arch.dec_expires -
359 vcpu->arch.vcore->tb_offset;
360 rc = kvmppc_gse_put_u64(gsb, iden, dw);
361 break;
362 }
363 case KVMPPC_GSID_LOGICAL_PVR:
364 /*
365 * Though 'arch_compat == 0' would mean the default
366 * compatibility, arch_compat, being a Guest Wide
367 * Element, cannot be filled with a value of 0 in GSB
368 * as this would result into a kernel trap.
369 * Hence, when `arch_compat == 0`, arch_compat should
370 * default to L1's PVR.
371 */
372 if (!vcpu->arch.vcore->arch_compat) {
373 if (cpu_has_feature(CPU_FTR_ARCH_31))
374 arch_compat = PVR_ARCH_31;
375 else if (cpu_has_feature(CPU_FTR_ARCH_300))
376 arch_compat = PVR_ARCH_300;
377 } else {
378 arch_compat = vcpu->arch.vcore->arch_compat;
379 }
380 rc = kvmppc_gse_put_u32(gsb, iden, arch_compat);
381 break;
382 }
383
384 if (rc < 0)
385 return rc;
386 }
387
388 return 0;
389 }
390
gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg * gsm,struct kvmppc_gs_buff * gsb)391 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm,
392 struct kvmppc_gs_buff *gsb)
393 {
394 struct kvmppc_gs_parser gsp = { 0 };
395 struct kvmhv_nestedv2_io *io;
396 struct kvmppc_gs_bitmap *valids;
397 struct kvm_vcpu *vcpu;
398 struct kvmppc_gs_elem *gse;
399 vector128 v;
400 int rc, i;
401 u16 iden;
402
403 vcpu = gsm->data;
404
405 rc = kvmppc_gse_parse(&gsp, gsb);
406 if (rc < 0)
407 return rc;
408
409 io = &vcpu->arch.nestedv2_io;
410 valids = &io->valids;
411
412 kvmppc_gsp_for_each(&gsp, iden, gse)
413 {
414 switch (iden) {
415 case KVMPPC_GSID_DSCR:
416 vcpu->arch.dscr = kvmppc_gse_get_u64(gse);
417 break;
418 case KVMPPC_GSID_MMCRA:
419 vcpu->arch.mmcra = kvmppc_gse_get_u64(gse);
420 break;
421 case KVMPPC_GSID_HFSCR:
422 vcpu->arch.hfscr = kvmppc_gse_get_u64(gse);
423 break;
424 case KVMPPC_GSID_PURR:
425 vcpu->arch.purr = kvmppc_gse_get_u64(gse);
426 break;
427 case KVMPPC_GSID_SPURR:
428 vcpu->arch.spurr = kvmppc_gse_get_u64(gse);
429 break;
430 case KVMPPC_GSID_AMR:
431 vcpu->arch.amr = kvmppc_gse_get_u64(gse);
432 break;
433 case KVMPPC_GSID_UAMOR:
434 vcpu->arch.uamor = kvmppc_gse_get_u64(gse);
435 break;
436 case KVMPPC_GSID_SIAR:
437 vcpu->arch.siar = kvmppc_gse_get_u64(gse);
438 break;
439 case KVMPPC_GSID_SDAR:
440 vcpu->arch.sdar = kvmppc_gse_get_u64(gse);
441 break;
442 case KVMPPC_GSID_IAMR:
443 vcpu->arch.iamr = kvmppc_gse_get_u64(gse);
444 break;
445 case KVMPPC_GSID_DAWR0:
446 vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse);
447 break;
448 case KVMPPC_GSID_DAWR1:
449 vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse);
450 break;
451 case KVMPPC_GSID_DAWRX0:
452 vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse);
453 break;
454 case KVMPPC_GSID_DAWRX1:
455 vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse);
456 break;
457 case KVMPPC_GSID_DEXCR:
458 vcpu->arch.dexcr = kvmppc_gse_get_u64(gse);
459 break;
460 case KVMPPC_GSID_HASHKEYR:
461 vcpu->arch.hashkeyr = kvmppc_gse_get_u64(gse);
462 break;
463 case KVMPPC_GSID_HASHPKEYR:
464 vcpu->arch.hashpkeyr = kvmppc_gse_get_u64(gse);
465 break;
466 case KVMPPC_GSID_CIABR:
467 vcpu->arch.ciabr = kvmppc_gse_get_u64(gse);
468 break;
469 case KVMPPC_GSID_WORT:
470 vcpu->arch.wort = kvmppc_gse_get_u32(gse);
471 break;
472 case KVMPPC_GSID_PPR:
473 vcpu->arch.ppr = kvmppc_gse_get_u64(gse);
474 break;
475 case KVMPPC_GSID_PSPB:
476 vcpu->arch.pspb = kvmppc_gse_get_u32(gse);
477 break;
478 case KVMPPC_GSID_TAR:
479 vcpu->arch.tar = kvmppc_gse_get_u64(gse);
480 break;
481 case KVMPPC_GSID_FSCR:
482 vcpu->arch.fscr = kvmppc_gse_get_u64(gse);
483 break;
484 case KVMPPC_GSID_EBBHR:
485 vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse);
486 break;
487 case KVMPPC_GSID_EBBRR:
488 vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse);
489 break;
490 case KVMPPC_GSID_BESCR:
491 vcpu->arch.bescr = kvmppc_gse_get_u64(gse);
492 break;
493 case KVMPPC_GSID_IC:
494 vcpu->arch.ic = kvmppc_gse_get_u64(gse);
495 break;
496 case KVMPPC_GSID_CTRL:
497 vcpu->arch.ctrl = kvmppc_gse_get_u64(gse);
498 break;
499 case KVMPPC_GSID_PIDR:
500 vcpu->arch.pid = kvmppc_gse_get_u32(gse);
501 break;
502 case KVMPPC_GSID_AMOR:
503 break;
504 case KVMPPC_GSID_VRSAVE:
505 vcpu->arch.vrsave = kvmppc_gse_get_u32(gse);
506 break;
507 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3):
508 i = iden - KVMPPC_GSID_MMCR(0);
509 vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse);
510 break;
511 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2):
512 i = iden - KVMPPC_GSID_SIER(0);
513 vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse);
514 break;
515 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5):
516 i = iden - KVMPPC_GSID_PMC(0);
517 vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse);
518 break;
519 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31):
520 i = iden - KVMPPC_GSID_GPR(0);
521 vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse);
522 break;
523 case KVMPPC_GSID_CR:
524 vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse);
525 break;
526 case KVMPPC_GSID_XER:
527 vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse);
528 break;
529 case KVMPPC_GSID_CTR:
530 vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse);
531 break;
532 case KVMPPC_GSID_LR:
533 vcpu->arch.regs.link = kvmppc_gse_get_u64(gse);
534 break;
535 case KVMPPC_GSID_NIA:
536 vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse);
537 break;
538 case KVMPPC_GSID_SRR0:
539 vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse);
540 break;
541 case KVMPPC_GSID_SRR1:
542 vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse);
543 break;
544 case KVMPPC_GSID_SPRG0:
545 vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse);
546 break;
547 case KVMPPC_GSID_SPRG1:
548 vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse);
549 break;
550 case KVMPPC_GSID_SPRG2:
551 vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse);
552 break;
553 case KVMPPC_GSID_SPRG3:
554 vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse);
555 break;
556 case KVMPPC_GSID_DAR:
557 vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse);
558 break;
559 case KVMPPC_GSID_DSISR:
560 vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse);
561 break;
562 case KVMPPC_GSID_MSR:
563 vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse);
564 break;
565 case KVMPPC_GSID_VTB:
566 vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse);
567 break;
568 case KVMPPC_GSID_DPDES:
569 vcpu->arch.vcore->dpdes = kvmppc_gse_get_u64(gse);
570 break;
571 case KVMPPC_GSID_LPCR:
572 vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse);
573 break;
574 case KVMPPC_GSID_TB_OFFSET:
575 vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse);
576 break;
577 case KVMPPC_GSID_FPSCR:
578 vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse);
579 break;
580 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31):
581 kvmppc_gse_get_vector128(gse, &v);
582 i = iden - KVMPPC_GSID_VSRS(0);
583 memcpy(&vcpu->arch.fp.fpr[i], &v,
584 sizeof(vcpu->arch.fp.fpr[i]));
585 break;
586 #ifdef CONFIG_VSX
587 case KVMPPC_GSID_VSCR:
588 vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse);
589 break;
590 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63):
591 i = iden - KVMPPC_GSID_VSRS(32);
592 kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]);
593 break;
594 #endif
595 case KVMPPC_GSID_HDAR:
596 vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse);
597 break;
598 case KVMPPC_GSID_HDSISR:
599 vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse);
600 break;
601 case KVMPPC_GSID_ASDR:
602 vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse);
603 break;
604 case KVMPPC_GSID_HEIR:
605 vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse);
606 break;
607 case KVMPPC_GSID_DEC_EXPIRY_TB: {
608 u64 dw;
609
610 dw = kvmppc_gse_get_u64(gse);
611 vcpu->arch.dec_expires =
612 dw + vcpu->arch.vcore->tb_offset;
613 break;
614 }
615 case KVMPPC_GSID_LOGICAL_PVR:
616 vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse);
617 break;
618 default:
619 continue;
620 }
621 kvmppc_gsbm_set(valids, iden);
622 }
623
624 return 0;
625 }
626
627 static struct kvmppc_gs_msg_ops vcpu_message_ops = {
628 .get_size = gs_msg_ops_vcpu_get_size,
629 .fill_info = gs_msg_ops_vcpu_fill_info,
630 .refresh_info = gs_msg_ops_vcpu_refresh_info,
631 };
632
kvmhv_nestedv2_host_create(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)633 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu,
634 struct kvmhv_nestedv2_io *io)
635 {
636 struct kvmhv_nestedv2_config *cfg;
637 struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input;
638 unsigned long guest_id, vcpu_id;
639 struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message;
640 int rc;
641
642 cfg = &io->cfg;
643 guest_id = vcpu->kvm->arch.lpid;
644 vcpu_id = vcpu->vcpu_id;
645
646 gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE,
647 GFP_KERNEL);
648 if (!gsm) {
649 rc = -ENOMEM;
650 goto err;
651 }
652
653 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id,
654 GFP_KERNEL);
655 if (!gsb) {
656 rc = -ENOMEM;
657 goto free_gsm;
658 }
659
660 rc = kvmppc_gsb_receive_datum(gsb, gsm,
661 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE);
662 if (rc < 0) {
663 pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n");
664 goto free_gsb;
665 }
666
667 vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id,
668 vcpu_id, GFP_KERNEL);
669 if (!vcpu_run_output) {
670 rc = -ENOMEM;
671 goto free_gsb;
672 }
673
674 cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output);
675 cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output);
676 io->vcpu_run_output = vcpu_run_output;
677
678 gsm->flags = 0;
679 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT);
680 if (rc < 0) {
681 pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n");
682 goto free_gs_out;
683 }
684
685 vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL);
686 if (!vcpu_message) {
687 rc = -ENOMEM;
688 goto free_gs_out;
689 }
690 kvmppc_gsm_include_all(vcpu_message);
691
692 io->vcpu_message = vcpu_message;
693
694 vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id,
695 vcpu_id, GFP_KERNEL);
696 if (!vcpu_run_input) {
697 rc = -ENOMEM;
698 goto free_vcpu_message;
699 }
700
701 io->vcpu_run_input = vcpu_run_input;
702 cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input);
703 cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input);
704 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT);
705 if (rc < 0) {
706 pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n");
707 goto free_vcpu_run_input;
708 }
709
710 vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu,
711 KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL);
712 if (!vcore_message) {
713 rc = -ENOMEM;
714 goto free_vcpu_run_input;
715 }
716
717 kvmppc_gsm_include_all(vcore_message);
718 kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR);
719 io->vcore_message = vcore_message;
720
721 kvmppc_gsbm_fill(&io->valids);
722 kvmppc_gsm_free(gsm);
723 kvmppc_gsb_free(gsb);
724 return 0;
725
726 free_vcpu_run_input:
727 kvmppc_gsb_free(vcpu_run_input);
728 free_vcpu_message:
729 kvmppc_gsm_free(vcpu_message);
730 free_gs_out:
731 kvmppc_gsb_free(vcpu_run_output);
732 free_gsb:
733 kvmppc_gsb_free(gsb);
734 free_gsm:
735 kvmppc_gsm_free(gsm);
736 err:
737 return rc;
738 }
739
740 /**
741 * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host
742 * @vcpu: vcpu
743 * @iden: guest state ID
744 *
745 * Mark a guest state ID as having been changed by the L1 host and thus
746 * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu()
747 */
__kvmhv_nestedv2_mark_dirty(struct kvm_vcpu * vcpu,u16 iden)748 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden)
749 {
750 struct kvmhv_nestedv2_io *io;
751 struct kvmppc_gs_bitmap *valids;
752 struct kvmppc_gs_msg *gsm;
753
754 if (!iden)
755 return 0;
756
757 io = &vcpu->arch.nestedv2_io;
758 valids = &io->valids;
759 gsm = io->vcpu_message;
760 kvmppc_gsm_include(gsm, iden);
761 gsm = io->vcore_message;
762 kvmppc_gsm_include(gsm, iden);
763 kvmppc_gsbm_set(valids, iden);
764 return 0;
765 }
766 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty);
767
768 /**
769 * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host
770 * @vcpu: vcpu
771 * @iden: guest state ID
772 *
773 * Reload the value for the guest state ID from the L0 host into the L1 host.
774 * This is cached so that going out to the L0 host only happens if necessary.
775 */
__kvmhv_nestedv2_cached_reload(struct kvm_vcpu * vcpu,u16 iden)776 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden)
777 {
778 struct kvmhv_nestedv2_io *io;
779 struct kvmppc_gs_bitmap *valids;
780 struct kvmppc_gs_buff *gsb;
781 struct kvmppc_gs_msg gsm;
782 int rc;
783
784 if (!iden)
785 return 0;
786
787 io = &vcpu->arch.nestedv2_io;
788 valids = &io->valids;
789 if (kvmppc_gsbm_test(valids, iden))
790 return 0;
791
792 gsb = io->vcpu_run_input;
793 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden));
794 rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden);
795 if (rc < 0) {
796 pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden);
797 return rc;
798 }
799 return 0;
800 }
801 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload);
802
803 /**
804 * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host
805 * @vcpu: vcpu
806 * @time_limit: hdec expiry tb
807 *
808 * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host.
809 * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest
810 * wide values need to be sent with H_GUEST_SET first.
811 *
812 * The hdec tb offset is always sent to L0 host.
813 */
kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu * vcpu,u64 time_limit)814 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit)
815 {
816 struct kvmhv_nestedv2_io *io;
817 struct kvmppc_gs_buff *gsb;
818 struct kvmppc_gs_msg *gsm;
819 int rc;
820
821 io = &vcpu->arch.nestedv2_io;
822 gsb = io->vcpu_run_input;
823 gsm = io->vcore_message;
824 rc = kvmppc_gsb_send_data(gsb, gsm);
825 if (rc < 0) {
826 pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n");
827 return rc;
828 }
829
830 gsm = io->vcpu_message;
831 kvmppc_gsb_reset(gsb);
832 rc = kvmppc_gsm_fill_info(gsm, gsb);
833 if (rc < 0) {
834 pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n");
835 return rc;
836 }
837
838 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit);
839 if (rc < 0)
840 return rc;
841 return 0;
842 }
843 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu);
844
845 /**
846 * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to
847 * L0 host
848 * @lpid: guest id
849 * @dw0: partition table double word
850 * @dw1: process table double word
851 */
kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid,u64 dw0,u64 dw1)852 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1)
853 {
854 struct kvmppc_gs_part_table patbl;
855 struct kvmppc_gs_proc_table prtbl;
856 struct kvmppc_gs_buff *gsb;
857 size_t size;
858 int rc;
859
860 size = kvmppc_gse_total_size(
861 kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) +
862 kvmppc_gse_total_size(
863 kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) +
864 sizeof(struct kvmppc_gs_header);
865 gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL);
866 if (!gsb)
867 return -ENOMEM;
868
869 patbl.address = dw0 & RPDB_MASK;
870 patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) |
871 ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) +
872 31);
873 patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3);
874 rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl);
875 if (rc < 0)
876 goto free_gsb;
877
878 prtbl.address = dw1 & PRTB_MASK;
879 prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12);
880 rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl);
881 if (rc < 0)
882 goto free_gsb;
883
884 rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE);
885 if (rc < 0) {
886 pr_err("KVM-NESTEDv2: couldn't set the PATE\n");
887 goto free_gsb;
888 }
889
890 kvmppc_gsb_free(gsb);
891 return 0;
892
893 free_gsb:
894 kvmppc_gsb_free(gsb);
895 return rc;
896 }
897 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry);
898
899 /**
900 * kvmhv_nestedv2_set_vpa() - register L2 VPA with L0
901 * @vcpu: vcpu
902 * @vpa: L1 logical real address
903 */
kvmhv_nestedv2_set_vpa(struct kvm_vcpu * vcpu,unsigned long vpa)904 int kvmhv_nestedv2_set_vpa(struct kvm_vcpu *vcpu, unsigned long vpa)
905 {
906 struct kvmhv_nestedv2_io *io;
907 struct kvmppc_gs_buff *gsb;
908 int rc = 0;
909
910 io = &vcpu->arch.nestedv2_io;
911 gsb = io->vcpu_run_input;
912
913 kvmppc_gsb_reset(gsb);
914 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_VPA, vpa);
915 if (rc < 0)
916 goto out;
917
918 rc = kvmppc_gsb_send(gsb, 0);
919 if (rc < 0)
920 pr_err("KVM-NESTEDv2: couldn't register the L2 VPA (rc=%d)\n", rc);
921
922 out:
923 kvmppc_gsb_reset(gsb);
924 return rc;
925 }
926 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_vpa);
927
928 /**
929 * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output
930 * @vcpu: vcpu
931 *
932 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
933 */
kvmhv_nestedv2_parse_output(struct kvm_vcpu * vcpu)934 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu)
935 {
936 struct kvmhv_nestedv2_io *io;
937 struct kvmppc_gs_buff *gsb;
938 struct kvmppc_gs_msg gsm;
939
940 io = &vcpu->arch.nestedv2_io;
941 gsb = io->vcpu_run_output;
942
943 vcpu->arch.fault_dar = 0;
944 vcpu->arch.fault_dsisr = 0;
945 vcpu->arch.fault_gpa = 0;
946 vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED;
947
948 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
949 return kvmppc_gsm_refresh_info(&gsm, gsb);
950 }
951 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output);
952
kvmhv_nestedv2_host_free(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)953 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu,
954 struct kvmhv_nestedv2_io *io)
955 {
956 kvmppc_gsm_free(io->vcpu_message);
957 kvmppc_gsm_free(io->vcore_message);
958 kvmppc_gsb_free(io->vcpu_run_input);
959 kvmppc_gsb_free(io->vcpu_run_output);
960 }
961
__kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)962 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs)
963 {
964 struct kvmhv_nestedv2_io *io;
965 struct kvmppc_gs_bitmap *valids;
966 struct kvmppc_gs_buff *gsb;
967 struct kvmppc_gs_msg gsm;
968 int rc = 0;
969
970
971 io = &vcpu->arch.nestedv2_io;
972 valids = &io->valids;
973
974 gsb = io->vcpu_run_input;
975 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0);
976
977 for (int i = 0; i < 32; i++) {
978 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i)))
979 kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i));
980 }
981
982 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR))
983 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR);
984
985 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER))
986 kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER);
987
988 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR))
989 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR);
990
991 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR))
992 kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR);
993
994 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA))
995 kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA);
996
997 rc = kvmppc_gsb_receive_data(gsb, &gsm);
998 if (rc < 0)
999 pr_err("KVM-NESTEDv2: couldn't reload ptregs\n");
1000
1001 return rc;
1002 }
1003 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs);
1004
__kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu * vcpu,struct pt_regs * regs)1005 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu,
1006 struct pt_regs *regs)
1007 {
1008 for (int i = 0; i < 32; i++)
1009 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i));
1010
1011 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR);
1012 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER);
1013 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR);
1014 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR);
1015 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA);
1016
1017 return 0;
1018 }
1019 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs);
1020
1021 /**
1022 * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API
1023 * @vcpu: vcpu
1024 * @io: NESTEDv2 nested io state
1025 *
1026 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu.
1027 */
kvmhv_nestedv2_vcpu_create(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)1028 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu,
1029 struct kvmhv_nestedv2_io *io)
1030 {
1031 long rc;
1032
1033 rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id);
1034
1035 if (rc != H_SUCCESS) {
1036 pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc);
1037 switch (rc) {
1038 case H_NOT_ENOUGH_RESOURCES:
1039 case H_ABORTED:
1040 return -ENOMEM;
1041 case H_AUTHORITY:
1042 return -EPERM;
1043 default:
1044 return -EINVAL;
1045 }
1046 }
1047
1048 rc = kvmhv_nestedv2_host_create(vcpu, io);
1049
1050 return rc;
1051 }
1052 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create);
1053
1054 /**
1055 * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state
1056 * @vcpu: vcpu
1057 * @io: NESTEDv2 nested io state
1058 */
kvmhv_nestedv2_vcpu_free(struct kvm_vcpu * vcpu,struct kvmhv_nestedv2_io * io)1059 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu,
1060 struct kvmhv_nestedv2_io *io)
1061 {
1062 kvmhv_nestedv2_host_free(vcpu, io);
1063 }
1064 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free);
1065