1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright 2023 Jordan Niethe, IBM Corp. <jniethe5@gmail.com> 4 * 5 * Authors: 6 * Jordan Niethe <jniethe5@gmail.com> 7 * 8 * Description: KVM functions specific to running on Book 3S 9 * processors as a NESTEDv2 guest. 10 * 11 */ 12 13 #include "linux/blk-mq.h" 14 #include "linux/console.h" 15 #include "linux/gfp_types.h" 16 #include "linux/signal.h" 17 #include <linux/kernel.h> 18 #include <linux/kvm_host.h> 19 #include <linux/pgtable.h> 20 21 #include <asm/kvm_ppc.h> 22 #include <asm/kvm_book3s.h> 23 #include <asm/hvcall.h> 24 #include <asm/pgalloc.h> 25 #include <asm/reg.h> 26 #include <asm/plpar_wrappers.h> 27 #include <asm/guest-state-buffer.h> 28 #include "trace_hv.h" 29 30 struct static_key_false __kvmhv_is_nestedv2 __read_mostly; 31 EXPORT_SYMBOL_GPL(__kvmhv_is_nestedv2); 32 33 34 static size_t 35 gs_msg_ops_kvmhv_nestedv2_config_get_size(struct kvmppc_gs_msg *gsm) 36 { 37 u16 ids[] = { 38 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE, 39 KVMPPC_GSID_RUN_INPUT, 40 KVMPPC_GSID_RUN_OUTPUT, 41 42 }; 43 size_t size = 0; 44 45 for (int i = 0; i < ARRAY_SIZE(ids); i++) 46 size += kvmppc_gse_total_size(kvmppc_gsid_size(ids[i])); 47 return size; 48 } 49 50 static int 51 gs_msg_ops_kvmhv_nestedv2_config_fill_info(struct kvmppc_gs_buff *gsb, 52 struct kvmppc_gs_msg *gsm) 53 { 54 struct kvmhv_nestedv2_config *cfg; 55 int rc; 56 57 cfg = gsm->data; 58 59 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE)) { 60 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE, 61 cfg->vcpu_run_output_size); 62 if (rc < 0) 63 return rc; 64 } 65 66 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_INPUT)) { 67 rc = kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_INPUT, 68 cfg->vcpu_run_input_cfg); 69 if (rc < 0) 70 return rc; 71 } 72 73 if (kvmppc_gsm_includes(gsm, KVMPPC_GSID_RUN_OUTPUT)) { 74 kvmppc_gse_put_buff_info(gsb, KVMPPC_GSID_RUN_OUTPUT, 75 cfg->vcpu_run_output_cfg); 76 if (rc < 0) 77 return rc; 78 } 79 80 return 0; 81 } 82 83 static int 84 gs_msg_ops_kvmhv_nestedv2_config_refresh_info(struct kvmppc_gs_msg *gsm, 85 struct kvmppc_gs_buff *gsb) 86 { 87 struct kvmhv_nestedv2_config *cfg; 88 struct kvmppc_gs_parser gsp = { 0 }; 89 struct kvmppc_gs_elem *gse; 90 int rc; 91 92 cfg = gsm->data; 93 94 rc = kvmppc_gse_parse(&gsp, gsb); 95 if (rc < 0) 96 return rc; 97 98 gse = kvmppc_gsp_lookup(&gsp, KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE); 99 if (gse) 100 cfg->vcpu_run_output_size = kvmppc_gse_get_u64(gse); 101 return 0; 102 } 103 104 static struct kvmppc_gs_msg_ops config_msg_ops = { 105 .get_size = gs_msg_ops_kvmhv_nestedv2_config_get_size, 106 .fill_info = gs_msg_ops_kvmhv_nestedv2_config_fill_info, 107 .refresh_info = gs_msg_ops_kvmhv_nestedv2_config_refresh_info, 108 }; 109 110 static size_t gs_msg_ops_vcpu_get_size(struct kvmppc_gs_msg *gsm) 111 { 112 struct kvmppc_gs_bitmap gsbm = { 0 }; 113 size_t size = 0; 114 u16 iden; 115 116 kvmppc_gsbm_fill(&gsbm); 117 kvmppc_gsbm_for_each(&gsbm, iden) 118 { 119 switch (iden) { 120 case KVMPPC_GSID_HOST_STATE_SIZE: 121 case KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE: 122 case KVMPPC_GSID_PARTITION_TABLE: 123 case KVMPPC_GSID_PROCESS_TABLE: 124 case KVMPPC_GSID_RUN_INPUT: 125 case KVMPPC_GSID_RUN_OUTPUT: 126 break; 127 default: 128 size += kvmppc_gse_total_size(kvmppc_gsid_size(iden)); 129 } 130 } 131 return size; 132 } 133 134 static int gs_msg_ops_vcpu_fill_info(struct kvmppc_gs_buff *gsb, 135 struct kvmppc_gs_msg *gsm) 136 { 137 struct kvm_vcpu *vcpu; 138 vector128 v; 139 int rc, i; 140 u16 iden; 141 142 vcpu = gsm->data; 143 144 kvmppc_gsm_for_each(gsm, iden) 145 { 146 rc = 0; 147 148 if ((gsm->flags & KVMPPC_GS_FLAGS_WIDE) != 149 (kvmppc_gsid_flags(iden) & KVMPPC_GS_FLAGS_WIDE)) 150 continue; 151 152 switch (iden) { 153 case KVMPPC_GSID_DSCR: 154 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dscr); 155 break; 156 case KVMPPC_GSID_MMCRA: 157 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcra); 158 break; 159 case KVMPPC_GSID_HFSCR: 160 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.hfscr); 161 break; 162 case KVMPPC_GSID_PURR: 163 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.purr); 164 break; 165 case KVMPPC_GSID_SPURR: 166 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.spurr); 167 break; 168 case KVMPPC_GSID_AMR: 169 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.amr); 170 break; 171 case KVMPPC_GSID_UAMOR: 172 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.uamor); 173 break; 174 case KVMPPC_GSID_SIAR: 175 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.siar); 176 break; 177 case KVMPPC_GSID_SDAR: 178 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sdar); 179 break; 180 case KVMPPC_GSID_IAMR: 181 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.iamr); 182 break; 183 case KVMPPC_GSID_DAWR0: 184 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr0); 185 break; 186 case KVMPPC_GSID_DAWR1: 187 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.dawr1); 188 break; 189 case KVMPPC_GSID_DAWRX0: 190 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx0); 191 break; 192 case KVMPPC_GSID_DAWRX1: 193 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.dawrx1); 194 break; 195 case KVMPPC_GSID_CIABR: 196 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ciabr); 197 break; 198 case KVMPPC_GSID_WORT: 199 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.wort); 200 break; 201 case KVMPPC_GSID_PPR: 202 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ppr); 203 break; 204 case KVMPPC_GSID_PSPB: 205 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pspb); 206 break; 207 case KVMPPC_GSID_TAR: 208 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.tar); 209 break; 210 case KVMPPC_GSID_FSCR: 211 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fscr); 212 break; 213 case KVMPPC_GSID_EBBHR: 214 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbhr); 215 break; 216 case KVMPPC_GSID_EBBRR: 217 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ebbrr); 218 break; 219 case KVMPPC_GSID_BESCR: 220 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.bescr); 221 break; 222 case KVMPPC_GSID_IC: 223 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ic); 224 break; 225 case KVMPPC_GSID_CTRL: 226 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.ctrl); 227 break; 228 case KVMPPC_GSID_PIDR: 229 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pid); 230 break; 231 case KVMPPC_GSID_AMOR: { 232 u64 amor = ~0; 233 234 rc = kvmppc_gse_put_u64(gsb, iden, amor); 235 break; 236 } 237 case KVMPPC_GSID_VRSAVE: 238 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.vrsave); 239 break; 240 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3): 241 i = iden - KVMPPC_GSID_MMCR(0); 242 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.mmcr[i]); 243 break; 244 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2): 245 i = iden - KVMPPC_GSID_SIER(0); 246 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.sier[i]); 247 break; 248 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5): 249 i = iden - KVMPPC_GSID_PMC(0); 250 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.pmc[i]); 251 break; 252 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31): 253 i = iden - KVMPPC_GSID_GPR(0); 254 rc = kvmppc_gse_put_u64(gsb, iden, 255 vcpu->arch.regs.gpr[i]); 256 break; 257 case KVMPPC_GSID_CR: 258 rc = kvmppc_gse_put_u32(gsb, iden, vcpu->arch.regs.ccr); 259 break; 260 case KVMPPC_GSID_XER: 261 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.xer); 262 break; 263 case KVMPPC_GSID_CTR: 264 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.ctr); 265 break; 266 case KVMPPC_GSID_LR: 267 rc = kvmppc_gse_put_u64(gsb, iden, 268 vcpu->arch.regs.link); 269 break; 270 case KVMPPC_GSID_NIA: 271 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.regs.nip); 272 break; 273 case KVMPPC_GSID_SRR0: 274 rc = kvmppc_gse_put_u64(gsb, iden, 275 vcpu->arch.shregs.srr0); 276 break; 277 case KVMPPC_GSID_SRR1: 278 rc = kvmppc_gse_put_u64(gsb, iden, 279 vcpu->arch.shregs.srr1); 280 break; 281 case KVMPPC_GSID_SPRG0: 282 rc = kvmppc_gse_put_u64(gsb, iden, 283 vcpu->arch.shregs.sprg0); 284 break; 285 case KVMPPC_GSID_SPRG1: 286 rc = kvmppc_gse_put_u64(gsb, iden, 287 vcpu->arch.shregs.sprg1); 288 break; 289 case KVMPPC_GSID_SPRG2: 290 rc = kvmppc_gse_put_u64(gsb, iden, 291 vcpu->arch.shregs.sprg2); 292 break; 293 case KVMPPC_GSID_SPRG3: 294 rc = kvmppc_gse_put_u64(gsb, iden, 295 vcpu->arch.shregs.sprg3); 296 break; 297 case KVMPPC_GSID_DAR: 298 rc = kvmppc_gse_put_u64(gsb, iden, 299 vcpu->arch.shregs.dar); 300 break; 301 case KVMPPC_GSID_DSISR: 302 rc = kvmppc_gse_put_u32(gsb, iden, 303 vcpu->arch.shregs.dsisr); 304 break; 305 case KVMPPC_GSID_MSR: 306 rc = kvmppc_gse_put_u64(gsb, iden, 307 vcpu->arch.shregs.msr); 308 break; 309 case KVMPPC_GSID_VTB: 310 rc = kvmppc_gse_put_u64(gsb, iden, 311 vcpu->arch.vcore->vtb); 312 break; 313 case KVMPPC_GSID_LPCR: 314 rc = kvmppc_gse_put_u64(gsb, iden, 315 vcpu->arch.vcore->lpcr); 316 break; 317 case KVMPPC_GSID_TB_OFFSET: 318 rc = kvmppc_gse_put_u64(gsb, iden, 319 vcpu->arch.vcore->tb_offset); 320 break; 321 case KVMPPC_GSID_FPSCR: 322 rc = kvmppc_gse_put_u64(gsb, iden, vcpu->arch.fp.fpscr); 323 break; 324 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31): 325 i = iden - KVMPPC_GSID_VSRS(0); 326 memcpy(&v, &vcpu->arch.fp.fpr[i], 327 sizeof(vcpu->arch.fp.fpr[i])); 328 rc = kvmppc_gse_put_vector128(gsb, iden, &v); 329 break; 330 #ifdef CONFIG_VSX 331 case KVMPPC_GSID_VSCR: 332 rc = kvmppc_gse_put_u32(gsb, iden, 333 vcpu->arch.vr.vscr.u[3]); 334 break; 335 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63): 336 i = iden - KVMPPC_GSID_VSRS(32); 337 rc = kvmppc_gse_put_vector128(gsb, iden, 338 &vcpu->arch.vr.vr[i]); 339 break; 340 #endif 341 case KVMPPC_GSID_DEC_EXPIRY_TB: { 342 u64 dw; 343 344 dw = vcpu->arch.dec_expires - 345 vcpu->arch.vcore->tb_offset; 346 rc = kvmppc_gse_put_u64(gsb, iden, dw); 347 break; 348 } 349 case KVMPPC_GSID_LOGICAL_PVR: 350 rc = kvmppc_gse_put_u32(gsb, iden, 351 vcpu->arch.vcore->arch_compat); 352 break; 353 } 354 355 if (rc < 0) 356 return rc; 357 } 358 359 return 0; 360 } 361 362 static int gs_msg_ops_vcpu_refresh_info(struct kvmppc_gs_msg *gsm, 363 struct kvmppc_gs_buff *gsb) 364 { 365 struct kvmppc_gs_parser gsp = { 0 }; 366 struct kvmhv_nestedv2_io *io; 367 struct kvmppc_gs_bitmap *valids; 368 struct kvm_vcpu *vcpu; 369 struct kvmppc_gs_elem *gse; 370 vector128 v; 371 int rc, i; 372 u16 iden; 373 374 vcpu = gsm->data; 375 376 rc = kvmppc_gse_parse(&gsp, gsb); 377 if (rc < 0) 378 return rc; 379 380 io = &vcpu->arch.nestedv2_io; 381 valids = &io->valids; 382 383 kvmppc_gsp_for_each(&gsp, iden, gse) 384 { 385 switch (iden) { 386 case KVMPPC_GSID_DSCR: 387 vcpu->arch.dscr = kvmppc_gse_get_u64(gse); 388 break; 389 case KVMPPC_GSID_MMCRA: 390 vcpu->arch.mmcra = kvmppc_gse_get_u64(gse); 391 break; 392 case KVMPPC_GSID_HFSCR: 393 vcpu->arch.hfscr = kvmppc_gse_get_u64(gse); 394 break; 395 case KVMPPC_GSID_PURR: 396 vcpu->arch.purr = kvmppc_gse_get_u64(gse); 397 break; 398 case KVMPPC_GSID_SPURR: 399 vcpu->arch.spurr = kvmppc_gse_get_u64(gse); 400 break; 401 case KVMPPC_GSID_AMR: 402 vcpu->arch.amr = kvmppc_gse_get_u64(gse); 403 break; 404 case KVMPPC_GSID_UAMOR: 405 vcpu->arch.uamor = kvmppc_gse_get_u64(gse); 406 break; 407 case KVMPPC_GSID_SIAR: 408 vcpu->arch.siar = kvmppc_gse_get_u64(gse); 409 break; 410 case KVMPPC_GSID_SDAR: 411 vcpu->arch.sdar = kvmppc_gse_get_u64(gse); 412 break; 413 case KVMPPC_GSID_IAMR: 414 vcpu->arch.iamr = kvmppc_gse_get_u64(gse); 415 break; 416 case KVMPPC_GSID_DAWR0: 417 vcpu->arch.dawr0 = kvmppc_gse_get_u64(gse); 418 break; 419 case KVMPPC_GSID_DAWR1: 420 vcpu->arch.dawr1 = kvmppc_gse_get_u64(gse); 421 break; 422 case KVMPPC_GSID_DAWRX0: 423 vcpu->arch.dawrx0 = kvmppc_gse_get_u32(gse); 424 break; 425 case KVMPPC_GSID_DAWRX1: 426 vcpu->arch.dawrx1 = kvmppc_gse_get_u32(gse); 427 break; 428 case KVMPPC_GSID_CIABR: 429 vcpu->arch.ciabr = kvmppc_gse_get_u64(gse); 430 break; 431 case KVMPPC_GSID_WORT: 432 vcpu->arch.wort = kvmppc_gse_get_u32(gse); 433 break; 434 case KVMPPC_GSID_PPR: 435 vcpu->arch.ppr = kvmppc_gse_get_u64(gse); 436 break; 437 case KVMPPC_GSID_PSPB: 438 vcpu->arch.pspb = kvmppc_gse_get_u32(gse); 439 break; 440 case KVMPPC_GSID_TAR: 441 vcpu->arch.tar = kvmppc_gse_get_u64(gse); 442 break; 443 case KVMPPC_GSID_FSCR: 444 vcpu->arch.fscr = kvmppc_gse_get_u64(gse); 445 break; 446 case KVMPPC_GSID_EBBHR: 447 vcpu->arch.ebbhr = kvmppc_gse_get_u64(gse); 448 break; 449 case KVMPPC_GSID_EBBRR: 450 vcpu->arch.ebbrr = kvmppc_gse_get_u64(gse); 451 break; 452 case KVMPPC_GSID_BESCR: 453 vcpu->arch.bescr = kvmppc_gse_get_u64(gse); 454 break; 455 case KVMPPC_GSID_IC: 456 vcpu->arch.ic = kvmppc_gse_get_u64(gse); 457 break; 458 case KVMPPC_GSID_CTRL: 459 vcpu->arch.ctrl = kvmppc_gse_get_u64(gse); 460 break; 461 case KVMPPC_GSID_PIDR: 462 vcpu->arch.pid = kvmppc_gse_get_u32(gse); 463 break; 464 case KVMPPC_GSID_AMOR: 465 break; 466 case KVMPPC_GSID_VRSAVE: 467 vcpu->arch.vrsave = kvmppc_gse_get_u32(gse); 468 break; 469 case KVMPPC_GSID_MMCR(0)... KVMPPC_GSID_MMCR(3): 470 i = iden - KVMPPC_GSID_MMCR(0); 471 vcpu->arch.mmcr[i] = kvmppc_gse_get_u64(gse); 472 break; 473 case KVMPPC_GSID_SIER(0)... KVMPPC_GSID_SIER(2): 474 i = iden - KVMPPC_GSID_SIER(0); 475 vcpu->arch.sier[i] = kvmppc_gse_get_u64(gse); 476 break; 477 case KVMPPC_GSID_PMC(0)... KVMPPC_GSID_PMC(5): 478 i = iden - KVMPPC_GSID_PMC(0); 479 vcpu->arch.pmc[i] = kvmppc_gse_get_u32(gse); 480 break; 481 case KVMPPC_GSID_GPR(0)... KVMPPC_GSID_GPR(31): 482 i = iden - KVMPPC_GSID_GPR(0); 483 vcpu->arch.regs.gpr[i] = kvmppc_gse_get_u64(gse); 484 break; 485 case KVMPPC_GSID_CR: 486 vcpu->arch.regs.ccr = kvmppc_gse_get_u32(gse); 487 break; 488 case KVMPPC_GSID_XER: 489 vcpu->arch.regs.xer = kvmppc_gse_get_u64(gse); 490 break; 491 case KVMPPC_GSID_CTR: 492 vcpu->arch.regs.ctr = kvmppc_gse_get_u64(gse); 493 break; 494 case KVMPPC_GSID_LR: 495 vcpu->arch.regs.link = kvmppc_gse_get_u64(gse); 496 break; 497 case KVMPPC_GSID_NIA: 498 vcpu->arch.regs.nip = kvmppc_gse_get_u64(gse); 499 break; 500 case KVMPPC_GSID_SRR0: 501 vcpu->arch.shregs.srr0 = kvmppc_gse_get_u64(gse); 502 break; 503 case KVMPPC_GSID_SRR1: 504 vcpu->arch.shregs.srr1 = kvmppc_gse_get_u64(gse); 505 break; 506 case KVMPPC_GSID_SPRG0: 507 vcpu->arch.shregs.sprg0 = kvmppc_gse_get_u64(gse); 508 break; 509 case KVMPPC_GSID_SPRG1: 510 vcpu->arch.shregs.sprg1 = kvmppc_gse_get_u64(gse); 511 break; 512 case KVMPPC_GSID_SPRG2: 513 vcpu->arch.shregs.sprg2 = kvmppc_gse_get_u64(gse); 514 break; 515 case KVMPPC_GSID_SPRG3: 516 vcpu->arch.shregs.sprg3 = kvmppc_gse_get_u64(gse); 517 break; 518 case KVMPPC_GSID_DAR: 519 vcpu->arch.shregs.dar = kvmppc_gse_get_u64(gse); 520 break; 521 case KVMPPC_GSID_DSISR: 522 vcpu->arch.shregs.dsisr = kvmppc_gse_get_u32(gse); 523 break; 524 case KVMPPC_GSID_MSR: 525 vcpu->arch.shregs.msr = kvmppc_gse_get_u64(gse); 526 break; 527 case KVMPPC_GSID_VTB: 528 vcpu->arch.vcore->vtb = kvmppc_gse_get_u64(gse); 529 break; 530 case KVMPPC_GSID_LPCR: 531 vcpu->arch.vcore->lpcr = kvmppc_gse_get_u64(gse); 532 break; 533 case KVMPPC_GSID_TB_OFFSET: 534 vcpu->arch.vcore->tb_offset = kvmppc_gse_get_u64(gse); 535 break; 536 case KVMPPC_GSID_FPSCR: 537 vcpu->arch.fp.fpscr = kvmppc_gse_get_u64(gse); 538 break; 539 case KVMPPC_GSID_VSRS(0)... KVMPPC_GSID_VSRS(31): 540 kvmppc_gse_get_vector128(gse, &v); 541 i = iden - KVMPPC_GSID_VSRS(0); 542 memcpy(&vcpu->arch.fp.fpr[i], &v, 543 sizeof(vcpu->arch.fp.fpr[i])); 544 break; 545 #ifdef CONFIG_VSX 546 case KVMPPC_GSID_VSCR: 547 vcpu->arch.vr.vscr.u[3] = kvmppc_gse_get_u32(gse); 548 break; 549 case KVMPPC_GSID_VSRS(32)... KVMPPC_GSID_VSRS(63): 550 i = iden - KVMPPC_GSID_VSRS(32); 551 kvmppc_gse_get_vector128(gse, &vcpu->arch.vr.vr[i]); 552 break; 553 #endif 554 case KVMPPC_GSID_HDAR: 555 vcpu->arch.fault_dar = kvmppc_gse_get_u64(gse); 556 break; 557 case KVMPPC_GSID_HDSISR: 558 vcpu->arch.fault_dsisr = kvmppc_gse_get_u32(gse); 559 break; 560 case KVMPPC_GSID_ASDR: 561 vcpu->arch.fault_gpa = kvmppc_gse_get_u64(gse); 562 break; 563 case KVMPPC_GSID_HEIR: 564 vcpu->arch.emul_inst = kvmppc_gse_get_u64(gse); 565 break; 566 case KVMPPC_GSID_DEC_EXPIRY_TB: { 567 u64 dw; 568 569 dw = kvmppc_gse_get_u64(gse); 570 vcpu->arch.dec_expires = 571 dw + vcpu->arch.vcore->tb_offset; 572 break; 573 } 574 case KVMPPC_GSID_LOGICAL_PVR: 575 vcpu->arch.vcore->arch_compat = kvmppc_gse_get_u32(gse); 576 break; 577 default: 578 continue; 579 } 580 kvmppc_gsbm_set(valids, iden); 581 } 582 583 return 0; 584 } 585 586 static struct kvmppc_gs_msg_ops vcpu_message_ops = { 587 .get_size = gs_msg_ops_vcpu_get_size, 588 .fill_info = gs_msg_ops_vcpu_fill_info, 589 .refresh_info = gs_msg_ops_vcpu_refresh_info, 590 }; 591 592 static int kvmhv_nestedv2_host_create(struct kvm_vcpu *vcpu, 593 struct kvmhv_nestedv2_io *io) 594 { 595 struct kvmhv_nestedv2_config *cfg; 596 struct kvmppc_gs_buff *gsb, *vcpu_run_output, *vcpu_run_input; 597 unsigned long guest_id, vcpu_id; 598 struct kvmppc_gs_msg *gsm, *vcpu_message, *vcore_message; 599 int rc; 600 601 cfg = &io->cfg; 602 guest_id = vcpu->kvm->arch.lpid; 603 vcpu_id = vcpu->vcpu_id; 604 605 gsm = kvmppc_gsm_new(&config_msg_ops, cfg, KVMPPC_GS_FLAGS_WIDE, 606 GFP_KERNEL); 607 if (!gsm) { 608 rc = -ENOMEM; 609 goto err; 610 } 611 612 gsb = kvmppc_gsb_new(kvmppc_gsm_size(gsm), guest_id, vcpu_id, 613 GFP_KERNEL); 614 if (!gsb) { 615 rc = -ENOMEM; 616 goto free_gsm; 617 } 618 619 rc = kvmppc_gsb_receive_datum(gsb, gsm, 620 KVMPPC_GSID_RUN_OUTPUT_MIN_SIZE); 621 if (rc < 0) { 622 pr_err("KVM-NESTEDv2: couldn't get vcpu run output buffer minimum size\n"); 623 goto free_gsb; 624 } 625 626 vcpu_run_output = kvmppc_gsb_new(cfg->vcpu_run_output_size, guest_id, 627 vcpu_id, GFP_KERNEL); 628 if (!vcpu_run_output) { 629 rc = -ENOMEM; 630 goto free_gsb; 631 } 632 633 cfg->vcpu_run_output_cfg.address = kvmppc_gsb_paddress(vcpu_run_output); 634 cfg->vcpu_run_output_cfg.size = kvmppc_gsb_capacity(vcpu_run_output); 635 io->vcpu_run_output = vcpu_run_output; 636 637 gsm->flags = 0; 638 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_OUTPUT); 639 if (rc < 0) { 640 pr_err("KVM-NESTEDv2: couldn't set vcpu run output buffer\n"); 641 goto free_gs_out; 642 } 643 644 vcpu_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 0, GFP_KERNEL); 645 if (!vcpu_message) { 646 rc = -ENOMEM; 647 goto free_gs_out; 648 } 649 kvmppc_gsm_include_all(vcpu_message); 650 651 io->vcpu_message = vcpu_message; 652 653 vcpu_run_input = kvmppc_gsb_new(kvmppc_gsm_size(vcpu_message), guest_id, 654 vcpu_id, GFP_KERNEL); 655 if (!vcpu_run_input) { 656 rc = -ENOMEM; 657 goto free_vcpu_message; 658 } 659 660 io->vcpu_run_input = vcpu_run_input; 661 cfg->vcpu_run_input_cfg.address = kvmppc_gsb_paddress(vcpu_run_input); 662 cfg->vcpu_run_input_cfg.size = kvmppc_gsb_capacity(vcpu_run_input); 663 rc = kvmppc_gsb_send_datum(gsb, gsm, KVMPPC_GSID_RUN_INPUT); 664 if (rc < 0) { 665 pr_err("KVM-NESTEDv2: couldn't set vcpu run input buffer\n"); 666 goto free_vcpu_run_input; 667 } 668 669 vcore_message = kvmppc_gsm_new(&vcpu_message_ops, vcpu, 670 KVMPPC_GS_FLAGS_WIDE, GFP_KERNEL); 671 if (!vcore_message) { 672 rc = -ENOMEM; 673 goto free_vcpu_run_input; 674 } 675 676 kvmppc_gsm_include_all(vcore_message); 677 kvmppc_gsbm_clear(&vcore_message->bitmap, KVMPPC_GSID_LOGICAL_PVR); 678 io->vcore_message = vcore_message; 679 680 kvmppc_gsbm_fill(&io->valids); 681 kvmppc_gsm_free(gsm); 682 kvmppc_gsb_free(gsb); 683 return 0; 684 685 free_vcpu_run_input: 686 kvmppc_gsb_free(vcpu_run_input); 687 free_vcpu_message: 688 kvmppc_gsm_free(vcpu_message); 689 free_gs_out: 690 kvmppc_gsb_free(vcpu_run_output); 691 free_gsb: 692 kvmppc_gsb_free(gsb); 693 free_gsm: 694 kvmppc_gsm_free(gsm); 695 err: 696 return rc; 697 } 698 699 /** 700 * __kvmhv_nestedv2_mark_dirty() - mark a Guest State ID to be sent to the host 701 * @vcpu: vcpu 702 * @iden: guest state ID 703 * 704 * Mark a guest state ID as having been changed by the L1 host and thus 705 * the new value must be sent to the L0 hypervisor. See kvmhv_nestedv2_flush_vcpu() 706 */ 707 int __kvmhv_nestedv2_mark_dirty(struct kvm_vcpu *vcpu, u16 iden) 708 { 709 struct kvmhv_nestedv2_io *io; 710 struct kvmppc_gs_bitmap *valids; 711 struct kvmppc_gs_msg *gsm; 712 713 if (!iden) 714 return 0; 715 716 io = &vcpu->arch.nestedv2_io; 717 valids = &io->valids; 718 gsm = io->vcpu_message; 719 kvmppc_gsm_include(gsm, iden); 720 gsm = io->vcore_message; 721 kvmppc_gsm_include(gsm, iden); 722 kvmppc_gsbm_set(valids, iden); 723 return 0; 724 } 725 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty); 726 727 /** 728 * __kvmhv_nestedv2_cached_reload() - reload a Guest State ID from the host 729 * @vcpu: vcpu 730 * @iden: guest state ID 731 * 732 * Reload the value for the guest state ID from the L0 host into the L1 host. 733 * This is cached so that going out to the L0 host only happens if necessary. 734 */ 735 int __kvmhv_nestedv2_cached_reload(struct kvm_vcpu *vcpu, u16 iden) 736 { 737 struct kvmhv_nestedv2_io *io; 738 struct kvmppc_gs_bitmap *valids; 739 struct kvmppc_gs_buff *gsb; 740 struct kvmppc_gs_msg gsm; 741 int rc; 742 743 if (!iden) 744 return 0; 745 746 io = &vcpu->arch.nestedv2_io; 747 valids = &io->valids; 748 if (kvmppc_gsbm_test(valids, iden)) 749 return 0; 750 751 gsb = io->vcpu_run_input; 752 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, kvmppc_gsid_flags(iden)); 753 rc = kvmppc_gsb_receive_datum(gsb, &gsm, iden); 754 if (rc < 0) { 755 pr_err("KVM-NESTEDv2: couldn't get GSID: 0x%x\n", iden); 756 return rc; 757 } 758 return 0; 759 } 760 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_cached_reload); 761 762 /** 763 * kvmhv_nestedv2_flush_vcpu() - send modified Guest State IDs to the host 764 * @vcpu: vcpu 765 * @time_limit: hdec expiry tb 766 * 767 * Send the values marked by __kvmhv_nestedv2_mark_dirty() to the L0 host. 768 * Thread wide values are copied to the H_GUEST_RUN_VCPU input buffer. Guest 769 * wide values need to be sent with H_GUEST_SET first. 770 * 771 * The hdec tb offset is always sent to L0 host. 772 */ 773 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit) 774 { 775 struct kvmhv_nestedv2_io *io; 776 struct kvmppc_gs_buff *gsb; 777 struct kvmppc_gs_msg *gsm; 778 int rc; 779 780 io = &vcpu->arch.nestedv2_io; 781 gsb = io->vcpu_run_input; 782 gsm = io->vcore_message; 783 rc = kvmppc_gsb_send_data(gsb, gsm); 784 if (rc < 0) { 785 pr_err("KVM-NESTEDv2: couldn't set guest wide elements\n"); 786 return rc; 787 } 788 789 gsm = io->vcpu_message; 790 kvmppc_gsb_reset(gsb); 791 rc = kvmppc_gsm_fill_info(gsm, gsb); 792 if (rc < 0) { 793 pr_err("KVM-NESTEDv2: couldn't fill vcpu run input buffer\n"); 794 return rc; 795 } 796 797 rc = kvmppc_gse_put_u64(gsb, KVMPPC_GSID_HDEC_EXPIRY_TB, time_limit); 798 if (rc < 0) 799 return rc; 800 return 0; 801 } 802 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_flush_vcpu); 803 804 /** 805 * kvmhv_nestedv2_set_ptbl_entry() - send partition and process table state to 806 * L0 host 807 * @lpid: guest id 808 * @dw0: partition table double word 809 * @dw1: process table double word 810 */ 811 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1) 812 { 813 struct kvmppc_gs_part_table patbl; 814 struct kvmppc_gs_proc_table prtbl; 815 struct kvmppc_gs_buff *gsb; 816 size_t size; 817 int rc; 818 819 size = kvmppc_gse_total_size( 820 kvmppc_gsid_size(KVMPPC_GSID_PARTITION_TABLE)) + 821 kvmppc_gse_total_size( 822 kvmppc_gsid_size(KVMPPC_GSID_PROCESS_TABLE)) + 823 sizeof(struct kvmppc_gs_header); 824 gsb = kvmppc_gsb_new(size, lpid, 0, GFP_KERNEL); 825 if (!gsb) 826 return -ENOMEM; 827 828 patbl.address = dw0 & RPDB_MASK; 829 patbl.ea_bits = ((((dw0 & RTS1_MASK) >> (RTS1_SHIFT - 3)) | 830 ((dw0 & RTS2_MASK) >> RTS2_SHIFT)) + 831 31); 832 patbl.gpd_size = 1ul << ((dw0 & RPDS_MASK) + 3); 833 rc = kvmppc_gse_put_part_table(gsb, KVMPPC_GSID_PARTITION_TABLE, patbl); 834 if (rc < 0) 835 goto free_gsb; 836 837 prtbl.address = dw1 & PRTB_MASK; 838 prtbl.gpd_size = 1ul << ((dw1 & PRTS_MASK) + 12); 839 rc = kvmppc_gse_put_proc_table(gsb, KVMPPC_GSID_PROCESS_TABLE, prtbl); 840 if (rc < 0) 841 goto free_gsb; 842 843 rc = kvmppc_gsb_send(gsb, KVMPPC_GS_FLAGS_WIDE); 844 if (rc < 0) { 845 pr_err("KVM-NESTEDv2: couldn't set the PATE\n"); 846 goto free_gsb; 847 } 848 849 kvmppc_gsb_free(gsb); 850 return 0; 851 852 free_gsb: 853 kvmppc_gsb_free(gsb); 854 return rc; 855 } 856 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_set_ptbl_entry); 857 858 /** 859 * kvmhv_nestedv2_parse_output() - receive values from H_GUEST_RUN_VCPU output 860 * @vcpu: vcpu 861 * 862 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu. 863 */ 864 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu) 865 { 866 struct kvmhv_nestedv2_io *io; 867 struct kvmppc_gs_buff *gsb; 868 struct kvmppc_gs_msg gsm; 869 870 io = &vcpu->arch.nestedv2_io; 871 gsb = io->vcpu_run_output; 872 873 vcpu->arch.fault_dar = 0; 874 vcpu->arch.fault_dsisr = 0; 875 vcpu->arch.fault_gpa = 0; 876 vcpu->arch.emul_inst = KVM_INST_FETCH_FAILED; 877 878 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0); 879 return kvmppc_gsm_refresh_info(&gsm, gsb); 880 } 881 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_parse_output); 882 883 static void kvmhv_nestedv2_host_free(struct kvm_vcpu *vcpu, 884 struct kvmhv_nestedv2_io *io) 885 { 886 kvmppc_gsm_free(io->vcpu_message); 887 kvmppc_gsm_free(io->vcore_message); 888 kvmppc_gsb_free(io->vcpu_run_input); 889 kvmppc_gsb_free(io->vcpu_run_output); 890 } 891 892 int __kvmhv_nestedv2_reload_ptregs(struct kvm_vcpu *vcpu, struct pt_regs *regs) 893 { 894 struct kvmhv_nestedv2_io *io; 895 struct kvmppc_gs_bitmap *valids; 896 struct kvmppc_gs_buff *gsb; 897 struct kvmppc_gs_msg gsm; 898 int rc = 0; 899 900 901 io = &vcpu->arch.nestedv2_io; 902 valids = &io->valids; 903 904 gsb = io->vcpu_run_input; 905 kvmppc_gsm_init(&gsm, &vcpu_message_ops, vcpu, 0); 906 907 for (int i = 0; i < 32; i++) { 908 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_GPR(i))) 909 kvmppc_gsm_include(&gsm, KVMPPC_GSID_GPR(i)); 910 } 911 912 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CR)) 913 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CR); 914 915 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_XER)) 916 kvmppc_gsm_include(&gsm, KVMPPC_GSID_XER); 917 918 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_CTR)) 919 kvmppc_gsm_include(&gsm, KVMPPC_GSID_CTR); 920 921 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_LR)) 922 kvmppc_gsm_include(&gsm, KVMPPC_GSID_LR); 923 924 if (!kvmppc_gsbm_test(valids, KVMPPC_GSID_NIA)) 925 kvmppc_gsm_include(&gsm, KVMPPC_GSID_NIA); 926 927 rc = kvmppc_gsb_receive_data(gsb, &gsm); 928 if (rc < 0) 929 pr_err("KVM-NESTEDv2: couldn't reload ptregs\n"); 930 931 return rc; 932 } 933 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_reload_ptregs); 934 935 int __kvmhv_nestedv2_mark_dirty_ptregs(struct kvm_vcpu *vcpu, 936 struct pt_regs *regs) 937 { 938 for (int i = 0; i < 32; i++) 939 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_GPR(i)); 940 941 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CR); 942 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_XER); 943 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_CTR); 944 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_LR); 945 kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_NIA); 946 947 return 0; 948 } 949 EXPORT_SYMBOL_GPL(__kvmhv_nestedv2_mark_dirty_ptregs); 950 951 /** 952 * kvmhv_nestedv2_vcpu_create() - create nested vcpu for the NESTEDv2 API 953 * @vcpu: vcpu 954 * @io: NESTEDv2 nested io state 955 * 956 * Parse the output buffer from H_GUEST_RUN_VCPU to update vcpu. 957 */ 958 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, 959 struct kvmhv_nestedv2_io *io) 960 { 961 long rc; 962 963 rc = plpar_guest_create_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id); 964 965 if (rc != H_SUCCESS) { 966 pr_err("KVM: Create Guest vcpu hcall failed, rc=%ld\n", rc); 967 switch (rc) { 968 case H_NOT_ENOUGH_RESOURCES: 969 case H_ABORTED: 970 return -ENOMEM; 971 case H_AUTHORITY: 972 return -EPERM; 973 default: 974 return -EINVAL; 975 } 976 } 977 978 rc = kvmhv_nestedv2_host_create(vcpu, io); 979 980 return rc; 981 } 982 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_create); 983 984 /** 985 * kvmhv_nestedv2_vcpu_free() - free the NESTEDv2 state 986 * @vcpu: vcpu 987 * @io: NESTEDv2 nested io state 988 */ 989 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, 990 struct kvmhv_nestedv2_io *io) 991 { 992 kvmhv_nestedv2_host_free(vcpu, io); 993 } 994 EXPORT_SYMBOL_GPL(kvmhv_nestedv2_vcpu_free); 995