1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. 4 * 5 * Author: Yu Liu, <yu.liu@freescale.com> 6 * 7 * Description: 8 * This file is derived from arch/powerpc/kvm/44x.c, 9 * by Hollis Blanchard <hollisb@us.ibm.com>. 10 */ 11 12 #include <linux/kvm_host.h> 13 #include <linux/slab.h> 14 #include <linux/err.h> 15 #include <linux/export.h> 16 #include <linux/module.h> 17 #include <linux/miscdevice.h> 18 19 #include <asm/reg.h> 20 #include <asm/cputable.h> 21 #include <asm/kvm_ppc.h> 22 23 #include "../mm/mmu_decl.h" 24 #include "booke.h" 25 #include "e500.h" 26 27 struct id { 28 unsigned long val; 29 struct id **pentry; 30 }; 31 32 #define NUM_TIDS 256 33 34 /* 35 * This table provide mappings from: 36 * (guestAS,guestTID,guestPR) --> ID of physical cpu 37 * guestAS [0..1] 38 * guestTID [0..255] 39 * guestPR [0..1] 40 * ID [1..255] 41 * Each vcpu keeps one vcpu_id_table. 42 */ 43 struct vcpu_id_table { 44 struct id id[2][NUM_TIDS][2]; 45 }; 46 47 /* 48 * This table provide reversed mappings of vcpu_id_table: 49 * ID --> address of vcpu_id_table item. 50 * Each physical core has one pcpu_id_table. 51 */ 52 struct pcpu_id_table { 53 struct id *entry[NUM_TIDS]; 54 }; 55 56 static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids); 57 58 /* This variable keeps last used shadow ID on local core. 59 * The valid range of shadow ID is [1..255] */ 60 static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); 61 62 /* 63 * Allocate a free shadow id and setup a valid sid mapping in given entry. 64 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match. 65 * 66 * The caller must have preemption disabled, and keep it that way until 67 * it has finished with the returned shadow id (either written into the 68 * TLB or arch.shadow_pid, or discarded). 69 */ 70 static inline int local_sid_setup_one(struct id *entry) 71 { 72 unsigned long sid; 73 int ret = -1; 74 75 sid = __this_cpu_inc_return(pcpu_last_used_sid); 76 if (sid < NUM_TIDS) { 77 __this_cpu_write(pcpu_sids.entry[sid], entry); 78 entry->val = sid; 79 entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]); 80 ret = sid; 81 } 82 83 /* 84 * If sid == NUM_TIDS, we've run out of sids. We return -1, and 85 * the caller will invalidate everything and start over. 86 * 87 * sid > NUM_TIDS indicates a race, which we disable preemption to 88 * avoid. 89 */ 90 WARN_ON(sid > NUM_TIDS); 91 92 return ret; 93 } 94 95 /* 96 * Check if given entry contain a valid shadow id mapping. 97 * An ID mapping is considered valid only if 98 * both vcpu and pcpu know this mapping. 99 * 100 * The caller must have preemption disabled, and keep it that way until 101 * it has finished with the returned shadow id (either written into the 102 * TLB or arch.shadow_pid, or discarded). 103 */ 104 static inline int local_sid_lookup(struct id *entry) 105 { 106 if (entry && entry->val != 0 && 107 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry && 108 entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val])) 109 return entry->val; 110 return -1; 111 } 112 113 /* Invalidate all id mappings on local core -- call with preempt disabled */ 114 static inline void local_sid_destroy_all(void) 115 { 116 __this_cpu_write(pcpu_last_used_sid, 0); 117 memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids)); 118 } 119 120 static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) 121 { 122 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL); 123 return vcpu_e500->idt; 124 } 125 126 static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500) 127 { 128 kfree(vcpu_e500->idt); 129 vcpu_e500->idt = NULL; 130 } 131 132 /* Map guest pid to shadow. 133 * We use PID to keep shadow of current guest non-zero PID, 134 * and use PID1 to keep shadow of guest zero PID. 135 * So that guest tlbe with TID=0 can be accessed at any time */ 136 static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500) 137 { 138 preempt_disable(); 139 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500, 140 get_cur_as(&vcpu_e500->vcpu), 141 get_cur_pid(&vcpu_e500->vcpu), 142 get_cur_pr(&vcpu_e500->vcpu), 1); 143 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500, 144 get_cur_as(&vcpu_e500->vcpu), 0, 145 get_cur_pr(&vcpu_e500->vcpu), 1); 146 preempt_enable(); 147 } 148 149 /* Invalidate all mappings on vcpu */ 150 static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500) 151 { 152 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table)); 153 154 /* Update shadow pid when mappings are changed */ 155 kvmppc_e500_recalc_shadow_pid(vcpu_e500); 156 } 157 158 /* Invalidate one ID mapping on vcpu */ 159 static inline void kvmppc_e500_id_table_reset_one( 160 struct kvmppc_vcpu_e500 *vcpu_e500, 161 int as, int pid, int pr) 162 { 163 struct vcpu_id_table *idt = vcpu_e500->idt; 164 165 BUG_ON(as >= 2); 166 BUG_ON(pid >= NUM_TIDS); 167 BUG_ON(pr >= 2); 168 169 idt->id[as][pid][pr].val = 0; 170 idt->id[as][pid][pr].pentry = NULL; 171 172 /* Update shadow pid when mappings are changed */ 173 kvmppc_e500_recalc_shadow_pid(vcpu_e500); 174 } 175 176 /* 177 * Map guest (vcpu,AS,ID,PR) to physical core shadow id. 178 * This function first lookup if a valid mapping exists, 179 * if not, then creates a new one. 180 * 181 * The caller must have preemption disabled, and keep it that way until 182 * it has finished with the returned shadow id (either written into the 183 * TLB or arch.shadow_pid, or discarded). 184 */ 185 unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500, 186 unsigned int as, unsigned int gid, 187 unsigned int pr, int avoid_recursion) 188 { 189 struct vcpu_id_table *idt = vcpu_e500->idt; 190 int sid; 191 192 BUG_ON(as >= 2); 193 BUG_ON(gid >= NUM_TIDS); 194 BUG_ON(pr >= 2); 195 196 sid = local_sid_lookup(&idt->id[as][gid][pr]); 197 198 while (sid <= 0) { 199 /* No mapping yet */ 200 sid = local_sid_setup_one(&idt->id[as][gid][pr]); 201 if (sid <= 0) { 202 _tlbil_all(); 203 local_sid_destroy_all(); 204 } 205 206 /* Update shadow pid when mappings are changed */ 207 if (!avoid_recursion) 208 kvmppc_e500_recalc_shadow_pid(vcpu_e500); 209 } 210 211 return sid; 212 } 213 214 unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu, 215 struct kvm_book3e_206_tlb_entry *gtlbe) 216 { 217 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe), 218 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0); 219 } 220 221 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) 222 { 223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 224 225 if (vcpu->arch.pid != pid) { 226 vcpu_e500->pid[0] = vcpu->arch.pid = pid; 227 kvmppc_e500_recalc_shadow_pid(vcpu_e500); 228 } 229 } 230 231 /* gtlbe must not be mapped by more than one host tlbe */ 232 void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500, 233 struct kvm_book3e_206_tlb_entry *gtlbe) 234 { 235 struct vcpu_id_table *idt = vcpu_e500->idt; 236 unsigned int pr, tid, ts; 237 int pid; 238 u32 val, eaddr; 239 unsigned long flags; 240 241 ts = get_tlb_ts(gtlbe); 242 tid = get_tlb_tid(gtlbe); 243 244 preempt_disable(); 245 246 /* One guest ID may be mapped to two shadow IDs */ 247 for (pr = 0; pr < 2; pr++) { 248 /* 249 * The shadow PID can have a valid mapping on at most one 250 * host CPU. In the common case, it will be valid on this 251 * CPU, in which case we do a local invalidation of the 252 * specific address. 253 * 254 * If the shadow PID is not valid on the current host CPU, 255 * we invalidate the entire shadow PID. 256 */ 257 pid = local_sid_lookup(&idt->id[ts][tid][pr]); 258 if (pid <= 0) { 259 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr); 260 continue; 261 } 262 263 /* 264 * The guest is invalidating a 4K entry which is in a PID 265 * that has a valid shadow mapping on this host CPU. We 266 * search host TLB to invalidate it's shadow TLB entry, 267 * similar to __tlbil_va except that we need to look in AS1. 268 */ 269 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS; 270 eaddr = get_tlb_eaddr(gtlbe); 271 272 local_irq_save(flags); 273 274 mtspr(SPRN_MAS6, val); 275 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); 276 val = mfspr(SPRN_MAS1); 277 if (val & MAS1_VALID) { 278 mtspr(SPRN_MAS1, val & ~MAS1_VALID); 279 asm volatile("tlbwe"); 280 } 281 282 local_irq_restore(flags); 283 } 284 285 preempt_enable(); 286 } 287 288 void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500) 289 { 290 kvmppc_e500_id_table_reset_all(vcpu_e500); 291 } 292 293 void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) 294 { 295 /* Recalc shadow pid since MSR changes */ 296 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); 297 } 298 299 static void kvmppc_core_vcpu_load_e500(struct kvm_vcpu *vcpu, int cpu) 300 { 301 kvmppc_booke_vcpu_load(vcpu, cpu); 302 303 /* Shadow PID may be expired on local core */ 304 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu)); 305 } 306 307 static void kvmppc_core_vcpu_put_e500(struct kvm_vcpu *vcpu) 308 { 309 #ifdef CONFIG_SPE 310 if (vcpu->arch.shadow_msr & MSR_SPE) 311 kvmppc_vcpu_disable_spe(vcpu); 312 #endif 313 314 kvmppc_booke_vcpu_put(vcpu); 315 } 316 317 int kvmppc_core_check_processor_compat(void) 318 { 319 int r; 320 321 if (strcmp(cur_cpu_spec->cpu_name, "e500v2") == 0) 322 r = 0; 323 else 324 r = -ENOTSUPP; 325 326 return r; 327 } 328 329 static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 330 { 331 struct kvm_book3e_206_tlb_entry *tlbe; 332 333 /* Insert large initial mapping for guest. */ 334 tlbe = get_entry(vcpu_e500, 1, 0); 335 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); 336 tlbe->mas2 = 0; 337 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK; 338 339 /* 4K map for serial output. Used by kernel wrapper. */ 340 tlbe = get_entry(vcpu_e500, 1, 1); 341 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); 342 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; 343 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; 344 } 345 346 int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) 347 { 348 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 349 350 kvmppc_e500_tlb_setup(vcpu_e500); 351 352 /* Registers init */ 353 vcpu->arch.pvr = mfspr(SPRN_PVR); 354 vcpu_e500->svr = mfspr(SPRN_SVR); 355 356 vcpu->arch.cpu_type = KVM_CPU_E500V2; 357 358 return 0; 359 } 360 361 static int kvmppc_core_get_sregs_e500(struct kvm_vcpu *vcpu, 362 struct kvm_sregs *sregs) 363 { 364 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 365 366 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE | 367 KVM_SREGS_E_PM; 368 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; 369 370 sregs->u.e.impl.fsl.features = 0; 371 sregs->u.e.impl.fsl.svr = vcpu_e500->svr; 372 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; 373 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; 374 375 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 376 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; 377 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; 378 sregs->u.e.ivor_high[3] = 379 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; 380 381 kvmppc_get_sregs_ivor(vcpu, sregs); 382 kvmppc_get_sregs_e500_tlb(vcpu, sregs); 383 return 0; 384 } 385 386 static int kvmppc_core_set_sregs_e500(struct kvm_vcpu *vcpu, 387 struct kvm_sregs *sregs) 388 { 389 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 390 int ret; 391 392 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { 393 vcpu_e500->svr = sregs->u.e.impl.fsl.svr; 394 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; 395 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; 396 } 397 398 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs); 399 if (ret < 0) 400 return ret; 401 402 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) 403 return 0; 404 405 if (sregs->u.e.features & KVM_SREGS_E_SPE) { 406 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = 407 sregs->u.e.ivor_high[0]; 408 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = 409 sregs->u.e.ivor_high[1]; 410 vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = 411 sregs->u.e.ivor_high[2]; 412 } 413 414 if (sregs->u.e.features & KVM_SREGS_E_PM) { 415 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = 416 sregs->u.e.ivor_high[3]; 417 } 418 419 return kvmppc_set_sregs_ivor(vcpu, sregs); 420 } 421 422 static int kvmppc_get_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, 423 union kvmppc_one_reg *val) 424 { 425 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 426 return r; 427 } 428 429 static int kvmppc_set_one_reg_e500(struct kvm_vcpu *vcpu, u64 id, 430 union kvmppc_one_reg *val) 431 { 432 int r = kvmppc_get_one_reg_e500_tlb(vcpu, id, val); 433 return r; 434 } 435 436 static struct kvm_vcpu *kvmppc_core_vcpu_create_e500(struct kvm *kvm, 437 unsigned int id) 438 { 439 struct kvmppc_vcpu_e500 *vcpu_e500; 440 struct kvm_vcpu *vcpu; 441 int err; 442 443 vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 444 if (!vcpu_e500) { 445 err = -ENOMEM; 446 goto out; 447 } 448 449 vcpu = &vcpu_e500->vcpu; 450 err = kvm_vcpu_init(vcpu, kvm, id); 451 if (err) 452 goto free_vcpu; 453 454 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) { 455 err = -ENOMEM; 456 goto uninit_vcpu; 457 } 458 459 err = kvmppc_e500_tlb_init(vcpu_e500); 460 if (err) 461 goto uninit_id; 462 463 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); 464 if (!vcpu->arch.shared) { 465 err = -ENOMEM; 466 goto uninit_tlb; 467 } 468 469 return vcpu; 470 471 uninit_tlb: 472 kvmppc_e500_tlb_uninit(vcpu_e500); 473 uninit_id: 474 kvmppc_e500_id_table_free(vcpu_e500); 475 uninit_vcpu: 476 kvm_vcpu_uninit(vcpu); 477 free_vcpu: 478 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 479 out: 480 return ERR_PTR(err); 481 } 482 483 static void kvmppc_core_vcpu_free_e500(struct kvm_vcpu *vcpu) 484 { 485 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 486 487 free_page((unsigned long)vcpu->arch.shared); 488 kvmppc_e500_tlb_uninit(vcpu_e500); 489 kvmppc_e500_id_table_free(vcpu_e500); 490 kvm_vcpu_uninit(vcpu); 491 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 492 } 493 494 static int kvmppc_core_init_vm_e500(struct kvm *kvm) 495 { 496 return 0; 497 } 498 499 static void kvmppc_core_destroy_vm_e500(struct kvm *kvm) 500 { 501 } 502 503 static struct kvmppc_ops kvm_ops_e500 = { 504 .get_sregs = kvmppc_core_get_sregs_e500, 505 .set_sregs = kvmppc_core_set_sregs_e500, 506 .get_one_reg = kvmppc_get_one_reg_e500, 507 .set_one_reg = kvmppc_set_one_reg_e500, 508 .vcpu_load = kvmppc_core_vcpu_load_e500, 509 .vcpu_put = kvmppc_core_vcpu_put_e500, 510 .vcpu_create = kvmppc_core_vcpu_create_e500, 511 .vcpu_free = kvmppc_core_vcpu_free_e500, 512 .mmu_destroy = kvmppc_mmu_destroy_e500, 513 .init_vm = kvmppc_core_init_vm_e500, 514 .destroy_vm = kvmppc_core_destroy_vm_e500, 515 .emulate_op = kvmppc_core_emulate_op_e500, 516 .emulate_mtspr = kvmppc_core_emulate_mtspr_e500, 517 .emulate_mfspr = kvmppc_core_emulate_mfspr_e500, 518 }; 519 520 static int __init kvmppc_e500_init(void) 521 { 522 int r, i; 523 unsigned long ivor[3]; 524 /* Process remaining handlers above the generic first 16 */ 525 unsigned long *handler = &kvmppc_booke_handler_addr[16]; 526 unsigned long handler_len; 527 unsigned long max_ivor = 0; 528 529 r = kvmppc_core_check_processor_compat(); 530 if (r) 531 goto err_out; 532 533 r = kvmppc_booke_init(); 534 if (r) 535 goto err_out; 536 537 /* copy extra E500 exception handlers */ 538 ivor[0] = mfspr(SPRN_IVOR32); 539 ivor[1] = mfspr(SPRN_IVOR33); 540 ivor[2] = mfspr(SPRN_IVOR34); 541 for (i = 0; i < 3; i++) { 542 if (ivor[i] > ivor[max_ivor]) 543 max_ivor = i; 544 545 handler_len = handler[i + 1] - handler[i]; 546 memcpy((void *)kvmppc_booke_handlers + ivor[i], 547 (void *)handler[i], handler_len); 548 } 549 handler_len = handler[max_ivor + 1] - handler[max_ivor]; 550 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + 551 ivor[max_ivor] + handler_len); 552 553 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE); 554 if (r) 555 goto err_out; 556 kvm_ops_e500.owner = THIS_MODULE; 557 kvmppc_pr_ops = &kvm_ops_e500; 558 559 err_out: 560 return r; 561 } 562 563 static void __exit kvmppc_e500_exit(void) 564 { 565 kvmppc_pr_ops = NULL; 566 kvmppc_booke_exit(); 567 } 568 569 module_init(kvmppc_e500_init); 570 module_exit(kvmppc_e500_exit); 571 MODULE_ALIAS_MISCDEV(KVM_MINOR); 572 MODULE_ALIAS("devname:kvm"); 573