book3s_hv.c (3102f7843c75014fa15d3e6fda3b49f61bc467b4) book3s_hv.c (5deb8e7ad8ac7e3fcdfa042acff617f461b361c2)
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>

--- 1252 unchanged lines hidden (view full) ---

1261static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1262 unsigned int id)
1263{
1264 struct kvm_vcpu *vcpu;
1265 int err = -EINVAL;
1266 int core;
1267 struct kvmppc_vcore *vcore;
1268
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>

--- 1252 unchanged lines hidden (view full) ---

1261static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1262 unsigned int id)
1263{
1264 struct kvm_vcpu *vcpu;
1265 int err = -EINVAL;
1266 int core;
1267 struct kvmppc_vcore *vcore;
1268
1269 core = id / threads_per_subcore;
1269 core = id / threads_per_core;
1270 if (core >= KVM_MAX_VCORES)
1271 goto out;
1272
1273 err = -ENOMEM;
1274 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1275 if (!vcpu)
1276 goto out;
1277
1278 err = kvm_vcpu_init(vcpu, kvm, id);
1279 if (err)
1280 goto free_vcpu;
1281
1282 vcpu->arch.shared = &vcpu->arch.shregs;
1270 if (core >= KVM_MAX_VCORES)
1271 goto out;
1272
1273 err = -ENOMEM;
1274 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1275 if (!vcpu)
1276 goto out;
1277
1278 err = kvm_vcpu_init(vcpu, kvm, id);
1279 if (err)
1280 goto free_vcpu;
1281
1282 vcpu->arch.shared = &vcpu->arch.shregs;
1283#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1284 /*
1285 * The shared struct is never shared on HV,
1286 * so we can always use host endianness
1287 */
1288#ifdef __BIG_ENDIAN__
1289 vcpu->arch.shared_big_endian = true;
1290#else
1291 vcpu->arch.shared_big_endian = false;
1292#endif
1293#endif
1283 vcpu->arch.mmcr[0] = MMCR0_FC;
1284 vcpu->arch.ctrl = CTRL_RUNLATCH;
1285 /* default to host PVR, since we can't spoof it */
1286 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1287 spin_lock_init(&vcpu->arch.vpa_update_lock);
1288 spin_lock_init(&vcpu->arch.tbacct_lock);
1289 vcpu->arch.busy_preempt = TB_NIL;
1290 vcpu->arch.intr_msr = MSR_SF | MSR_ME;

--- 9 unchanged lines hidden (view full) ---

1300 if (!vcore) {
1301 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1302 if (vcore) {
1303 INIT_LIST_HEAD(&vcore->runnable_threads);
1304 spin_lock_init(&vcore->lock);
1305 init_waitqueue_head(&vcore->wq);
1306 vcore->preempt_tb = TB_NIL;
1307 vcore->lpcr = kvm->arch.lpcr;
1294 vcpu->arch.mmcr[0] = MMCR0_FC;
1295 vcpu->arch.ctrl = CTRL_RUNLATCH;
1296 /* default to host PVR, since we can't spoof it */
1297 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
1298 spin_lock_init(&vcpu->arch.vpa_update_lock);
1299 spin_lock_init(&vcpu->arch.tbacct_lock);
1300 vcpu->arch.busy_preempt = TB_NIL;
1301 vcpu->arch.intr_msr = MSR_SF | MSR_ME;

--- 9 unchanged lines hidden (view full) ---

1311 if (!vcore) {
1312 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1313 if (vcore) {
1314 INIT_LIST_HEAD(&vcore->runnable_threads);
1315 spin_lock_init(&vcore->lock);
1316 init_waitqueue_head(&vcore->wq);
1317 vcore->preempt_tb = TB_NIL;
1318 vcore->lpcr = kvm->arch.lpcr;
1308 vcore->first_vcpuid = core * threads_per_subcore;
1319 vcore->first_vcpuid = core * threads_per_core;
1309 vcore->kvm = kvm;
1310 }
1311 kvm->arch.vcores[core] = vcore;
1312 kvm->arch.online_vcores++;
1313 }
1314 mutex_unlock(&kvm->lock);
1315
1316 if (!vcore)

--- 173 unchanged lines hidden (view full) ---

1490/*
1491 * Check that we are on thread 0 and that any other threads in
1492 * this core are off-line. Then grab the threads so they can't
1493 * enter the kernel.
1494 */
1495static int on_primary_thread(void)
1496{
1497 int cpu = smp_processor_id();
1320 vcore->kvm = kvm;
1321 }
1322 kvm->arch.vcores[core] = vcore;
1323 kvm->arch.online_vcores++;
1324 }
1325 mutex_unlock(&kvm->lock);
1326
1327 if (!vcore)

--- 173 unchanged lines hidden (view full) ---

1501/*
1502 * Check that we are on thread 0 and that any other threads in
1503 * this core are off-line. Then grab the threads so they can't
1504 * enter the kernel.
1505 */
1506static int on_primary_thread(void)
1507{
1508 int cpu = smp_processor_id();
1498 int thr;
1509 int thr = cpu_thread_in_core(cpu);
1499
1510
1500 /* Are we on a primary subcore? */
1501 if (cpu_thread_in_subcore(cpu))
1511 if (thr)
1502 return 0;
1512 return 0;
1503
1504 thr = 0;
1505 while (++thr < threads_per_subcore)
1513 while (++thr < threads_per_core)
1506 if (cpu_online(cpu + thr))
1507 return 0;
1508
1509 /* Grab all hw threads so they can't go into the kernel */
1514 if (cpu_online(cpu + thr))
1515 return 0;
1516
1517 /* Grab all hw threads so they can't go into the kernel */
1510 for (thr = 1; thr < threads_per_subcore; ++thr) {
1518 for (thr = 1; thr < threads_per_core; ++thr) {
1511 if (kvmppc_grab_hwthread(cpu + thr)) {
1512 /* Couldn't grab one; let the others go */
1513 do {
1514 kvmppc_release_hwthread(cpu + thr);
1515 } while (--thr > 0);
1516 return 0;
1517 }
1518 }

--- 42 unchanged lines hidden (view full) ---

1561 if (need_vpa_update) {
1562 spin_unlock(&vc->lock);
1563 for (i = 0; i < need_vpa_update; ++i)
1564 kvmppc_update_vpas(vcpus_to_update[i]);
1565 spin_lock(&vc->lock);
1566 }
1567
1568 /*
1519 if (kvmppc_grab_hwthread(cpu + thr)) {
1520 /* Couldn't grab one; let the others go */
1521 do {
1522 kvmppc_release_hwthread(cpu + thr);
1523 } while (--thr > 0);
1524 return 0;
1525 }
1526 }

--- 42 unchanged lines hidden (view full) ---

1569 if (need_vpa_update) {
1570 spin_unlock(&vc->lock);
1571 for (i = 0; i < need_vpa_update; ++i)
1572 kvmppc_update_vpas(vcpus_to_update[i]);
1573 spin_lock(&vc->lock);
1574 }
1575
1576 /*
1569 * Make sure we are running on primary threads, and that secondary
1570 * threads are offline. Also check if the number of threads in this
1571 * guest are greater than the current system threads per guest.
1577 * Make sure we are running on thread 0, and that
1578 * secondary threads are offline.
1572 */
1579 */
1573 if ((threads_per_core > 1) &&
1574 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
1580 if (threads_per_core > 1 && !on_primary_thread()) {
1575 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1576 vcpu->arch.ret = -EBUSY;
1577 goto out;
1578 }
1579
1581 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1582 vcpu->arch.ret = -EBUSY;
1583 goto out;
1584 }
1585
1580
1581 vc->pcpu = smp_processor_id();
1582 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1583 kvmppc_start_thread(vcpu);
1584 kvmppc_create_dtl_entry(vcpu, vc);
1585 }
1586
1587 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1588 get_paca()->kvm_hstate.kvm_vcore = vc;

--- 11 unchanged lines hidden (view full) ---

1600
1601 spin_lock(&vc->lock);
1602 /* disable sending of IPIs on virtual external irqs */
1603 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1604 vcpu->cpu = -1;
1605 /* wait for secondary threads to finish writing their state to memory */
1606 if (vc->nap_count < vc->n_woken)
1607 kvmppc_wait_for_nap(vc);
1586 vc->pcpu = smp_processor_id();
1587 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
1588 kvmppc_start_thread(vcpu);
1589 kvmppc_create_dtl_entry(vcpu, vc);
1590 }
1591
1592 /* Set this explicitly in case thread 0 doesn't have a vcpu */
1593 get_paca()->kvm_hstate.kvm_vcore = vc;

--- 11 unchanged lines hidden (view full) ---

1605
1606 spin_lock(&vc->lock);
1607 /* disable sending of IPIs on virtual external irqs */
1608 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
1609 vcpu->cpu = -1;
1610 /* wait for secondary threads to finish writing their state to memory */
1611 if (vc->nap_count < vc->n_woken)
1612 kvmppc_wait_for_nap(vc);
1608 for (i = 0; i < threads_per_subcore; ++i)
1613 for (i = 0; i < threads_per_core; ++i)
1609 kvmppc_release_hwthread(vc->pcpu + i);
1610 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1611 vc->vcore_state = VCORE_EXITING;
1612 spin_unlock(&vc->lock);
1613
1614 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1615
1616 /* make sure updates to secondary vcpu structs are visible now */

--- 701 unchanged lines hidden (view full) ---

2318 lpcr |= LPCR_ONL;
2319 }
2320 kvm->arch.lpcr = lpcr;
2321
2322 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
2323 spin_lock_init(&kvm->arch.slot_phys_lock);
2324
2325 /*
1614 kvmppc_release_hwthread(vc->pcpu + i);
1615 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1616 vc->vcore_state = VCORE_EXITING;
1617 spin_unlock(&vc->lock);
1618
1619 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
1620
1621 /* make sure updates to secondary vcpu structs are visible now */

--- 701 unchanged lines hidden (view full) ---

2323 lpcr |= LPCR_ONL;
2324 }
2325 kvm->arch.lpcr = lpcr;
2326
2327 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
2328 spin_lock_init(&kvm->arch.slot_phys_lock);
2329
2330 /*
2326 * Track that we now have a HV mode VM active. This blocks secondary
2327 * CPU threads from coming online.
2331 * Don't allow secondary CPU threads to come online
2332 * while any KVM VMs exist.
2328 */
2333 */
2329 kvm_hv_vm_activated();
2334 inhibit_secondary_onlining();
2330
2331 return 0;
2332}
2333
2334static void kvmppc_free_vcores(struct kvm *kvm)
2335{
2336 long int i;
2337
2338 for (i = 0; i < KVM_MAX_VCORES; ++i)
2339 kfree(kvm->arch.vcores[i]);
2340 kvm->arch.online_vcores = 0;
2341}
2342
2343static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2344{
2335
2336 return 0;
2337}
2338
2339static void kvmppc_free_vcores(struct kvm *kvm)
2340{
2341 long int i;
2342
2343 for (i = 0; i < KVM_MAX_VCORES; ++i)
2344 kfree(kvm->arch.vcores[i]);
2345 kvm->arch.online_vcores = 0;
2346}
2347
2348static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
2349{
2345 kvm_hv_vm_deactivated();
2350 uninhibit_secondary_onlining();
2346
2347 kvmppc_free_vcores(kvm);
2348 if (kvm->arch.rma) {
2349 kvm_release_rma(kvm->arch.rma);
2350 kvm->arch.rma = NULL;
2351 }
2352
2353 kvmppc_free_hpt(kvm);

--- 141 unchanged lines hidden ---
2351
2352 kvmppc_free_vcores(kvm);
2353 if (kvm->arch.rma) {
2354 kvm_release_rma(kvm->arch.rma);
2355 kvm->arch.rma = NULL;
2356 }
2357
2358 kvmppc_free_hpt(kvm);

--- 141 unchanged lines hidden ---