1 /* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright IBM Corp. 2007 16 * 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 */ 20 21 #include <linux/errno.h> 22 #include <linux/err.h> 23 #include <linux/kvm_host.h> 24 #include <linux/module.h> 25 #include <linux/vmalloc.h> 26 #include <linux/fs.h> 27 #include <asm/cputable.h> 28 #include <asm/uaccess.h> 29 #include <asm/kvm_ppc.h> 30 #include <asm/tlbflush.h> 31 #include "timing.h" 32 #include "../mm/mmu_decl.h" 33 34 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) 35 { 36 return gfn; 37 } 38 39 int kvm_cpu_has_interrupt(struct kvm_vcpu *v) 40 { 41 return !!(v->arch.pending_exceptions); 42 } 43 44 int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 45 { 46 /* do real check here */ 47 return 1; 48 } 49 50 int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 51 { 52 return !(v->arch.msr & MSR_WE); 53 } 54 55 56 int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) 57 { 58 enum emulation_result er; 59 int r; 60 61 er = kvmppc_emulate_instruction(run, vcpu); 62 switch (er) { 63 case EMULATE_DONE: 64 /* Future optimization: only reload non-volatiles if they were 65 * actually modified. */ 66 r = RESUME_GUEST_NV; 67 break; 68 case EMULATE_DO_MMIO: 69 run->exit_reason = KVM_EXIT_MMIO; 70 /* We must reload nonvolatiles because "update" load/store 71 * instructions modify register state. */ 72 /* Future optimization: only reload non-volatiles if they were 73 * actually modified. */ 74 r = RESUME_HOST_NV; 75 break; 76 case EMULATE_FAIL: 77 /* XXX Deliver Program interrupt to guest. */ 78 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, 79 vcpu->arch.last_inst); 80 r = RESUME_HOST; 81 break; 82 default: 83 BUG(); 84 } 85 86 return r; 87 } 88 89 void kvm_arch_hardware_enable(void *garbage) 90 { 91 } 92 93 void kvm_arch_hardware_disable(void *garbage) 94 { 95 } 96 97 int kvm_arch_hardware_setup(void) 98 { 99 return 0; 100 } 101 102 void kvm_arch_hardware_unsetup(void) 103 { 104 } 105 106 void kvm_arch_check_processor_compat(void *rtn) 107 { 108 *(int *)rtn = kvmppc_core_check_processor_compat(); 109 } 110 111 struct kvm *kvm_arch_create_vm(void) 112 { 113 struct kvm *kvm; 114 115 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); 116 if (!kvm) 117 return ERR_PTR(-ENOMEM); 118 119 return kvm; 120 } 121 122 static void kvmppc_free_vcpus(struct kvm *kvm) 123 { 124 unsigned int i; 125 126 for (i = 0; i < KVM_MAX_VCPUS; ++i) { 127 if (kvm->vcpus[i]) { 128 kvm_arch_vcpu_free(kvm->vcpus[i]); 129 kvm->vcpus[i] = NULL; 130 } 131 } 132 } 133 134 void kvm_arch_sync_events(struct kvm *kvm) 135 { 136 } 137 138 void kvm_arch_destroy_vm(struct kvm *kvm) 139 { 140 kvmppc_free_vcpus(kvm); 141 kvm_free_physmem(kvm); 142 kfree(kvm); 143 } 144 145 int kvm_dev_ioctl_check_extension(long ext) 146 { 147 int r; 148 149 switch (ext) { 150 case KVM_CAP_COALESCED_MMIO: 151 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 152 break; 153 default: 154 r = 0; 155 break; 156 } 157 return r; 158 159 } 160 161 long kvm_arch_dev_ioctl(struct file *filp, 162 unsigned int ioctl, unsigned long arg) 163 { 164 return -EINVAL; 165 } 166 167 int kvm_arch_set_memory_region(struct kvm *kvm, 168 struct kvm_userspace_memory_region *mem, 169 struct kvm_memory_slot old, 170 int user_alloc) 171 { 172 return 0; 173 } 174 175 void kvm_arch_flush_shadow(struct kvm *kvm) 176 { 177 } 178 179 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) 180 { 181 struct kvm_vcpu *vcpu; 182 vcpu = kvmppc_core_vcpu_create(kvm, id); 183 kvmppc_create_vcpu_debugfs(vcpu, id); 184 return vcpu; 185 } 186 187 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) 188 { 189 kvmppc_remove_vcpu_debugfs(vcpu); 190 kvmppc_core_vcpu_free(vcpu); 191 } 192 193 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 194 { 195 kvm_arch_vcpu_free(vcpu); 196 } 197 198 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) 199 { 200 return kvmppc_core_pending_dec(vcpu); 201 } 202 203 static void kvmppc_decrementer_func(unsigned long data) 204 { 205 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; 206 207 kvmppc_core_queue_dec(vcpu); 208 209 if (waitqueue_active(&vcpu->wq)) { 210 wake_up_interruptible(&vcpu->wq); 211 vcpu->stat.halt_wakeup++; 212 } 213 } 214 215 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) 216 { 217 setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, 218 (unsigned long)vcpu); 219 220 return 0; 221 } 222 223 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) 224 { 225 kvmppc_mmu_destroy(vcpu); 226 } 227 228 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 229 { 230 kvmppc_core_vcpu_load(vcpu, cpu); 231 } 232 233 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 234 { 235 kvmppc_core_vcpu_put(vcpu); 236 } 237 238 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, 239 struct kvm_guest_debug *dbg) 240 { 241 return -EINVAL; 242 } 243 244 static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, 245 struct kvm_run *run) 246 { 247 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 248 *gpr = run->dcr.data; 249 } 250 251 static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, 252 struct kvm_run *run) 253 { 254 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; 255 256 if (run->mmio.len > sizeof(*gpr)) { 257 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); 258 return; 259 } 260 261 if (vcpu->arch.mmio_is_bigendian) { 262 switch (run->mmio.len) { 263 case 4: *gpr = *(u32 *)run->mmio.data; break; 264 case 2: *gpr = *(u16 *)run->mmio.data; break; 265 case 1: *gpr = *(u8 *)run->mmio.data; break; 266 } 267 } else { 268 /* Convert BE data from userland back to LE. */ 269 switch (run->mmio.len) { 270 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; 271 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; 272 case 1: *gpr = *(u8 *)run->mmio.data; break; 273 } 274 } 275 } 276 277 int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, 278 unsigned int rt, unsigned int bytes, int is_bigendian) 279 { 280 if (bytes > sizeof(run->mmio.data)) { 281 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 282 run->mmio.len); 283 } 284 285 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 286 run->mmio.len = bytes; 287 run->mmio.is_write = 0; 288 289 vcpu->arch.io_gpr = rt; 290 vcpu->arch.mmio_is_bigendian = is_bigendian; 291 vcpu->mmio_needed = 1; 292 vcpu->mmio_is_write = 0; 293 294 return EMULATE_DO_MMIO; 295 } 296 297 int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, 298 u32 val, unsigned int bytes, int is_bigendian) 299 { 300 void *data = run->mmio.data; 301 302 if (bytes > sizeof(run->mmio.data)) { 303 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, 304 run->mmio.len); 305 } 306 307 run->mmio.phys_addr = vcpu->arch.paddr_accessed; 308 run->mmio.len = bytes; 309 run->mmio.is_write = 1; 310 vcpu->mmio_needed = 1; 311 vcpu->mmio_is_write = 1; 312 313 /* Store the value at the lowest bytes in 'data'. */ 314 if (is_bigendian) { 315 switch (bytes) { 316 case 4: *(u32 *)data = val; break; 317 case 2: *(u16 *)data = val; break; 318 case 1: *(u8 *)data = val; break; 319 } 320 } else { 321 /* Store LE value into 'data'. */ 322 switch (bytes) { 323 case 4: st_le32(data, val); break; 324 case 2: st_le16(data, val); break; 325 case 1: *(u8 *)data = val; break; 326 } 327 } 328 329 return EMULATE_DO_MMIO; 330 } 331 332 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) 333 { 334 int r; 335 sigset_t sigsaved; 336 337 vcpu_load(vcpu); 338 339 if (vcpu->sigset_active) 340 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); 341 342 if (vcpu->mmio_needed) { 343 if (!vcpu->mmio_is_write) 344 kvmppc_complete_mmio_load(vcpu, run); 345 vcpu->mmio_needed = 0; 346 } else if (vcpu->arch.dcr_needed) { 347 if (!vcpu->arch.dcr_is_write) 348 kvmppc_complete_dcr_load(vcpu, run); 349 vcpu->arch.dcr_needed = 0; 350 } 351 352 kvmppc_core_deliver_interrupts(vcpu); 353 354 local_irq_disable(); 355 kvm_guest_enter(); 356 r = __kvmppc_vcpu_run(run, vcpu); 357 kvm_guest_exit(); 358 local_irq_enable(); 359 360 if (vcpu->sigset_active) 361 sigprocmask(SIG_SETMASK, &sigsaved, NULL); 362 363 vcpu_put(vcpu); 364 365 return r; 366 } 367 368 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 369 { 370 kvmppc_core_queue_external(vcpu, irq); 371 372 if (waitqueue_active(&vcpu->wq)) { 373 wake_up_interruptible(&vcpu->wq); 374 vcpu->stat.halt_wakeup++; 375 } 376 377 return 0; 378 } 379 380 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 381 struct kvm_mp_state *mp_state) 382 { 383 return -EINVAL; 384 } 385 386 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, 387 struct kvm_mp_state *mp_state) 388 { 389 return -EINVAL; 390 } 391 392 long kvm_arch_vcpu_ioctl(struct file *filp, 393 unsigned int ioctl, unsigned long arg) 394 { 395 struct kvm_vcpu *vcpu = filp->private_data; 396 void __user *argp = (void __user *)arg; 397 long r; 398 399 switch (ioctl) { 400 case KVM_INTERRUPT: { 401 struct kvm_interrupt irq; 402 r = -EFAULT; 403 if (copy_from_user(&irq, argp, sizeof(irq))) 404 goto out; 405 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); 406 break; 407 } 408 default: 409 r = -EINVAL; 410 } 411 412 out: 413 return r; 414 } 415 416 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) 417 { 418 return -ENOTSUPP; 419 } 420 421 long kvm_arch_vm_ioctl(struct file *filp, 422 unsigned int ioctl, unsigned long arg) 423 { 424 long r; 425 426 switch (ioctl) { 427 default: 428 r = -EINVAL; 429 } 430 431 return r; 432 } 433 434 int kvm_arch_init(void *opaque) 435 { 436 return 0; 437 } 438 439 void kvm_arch_exit(void) 440 { 441 } 442