1/* 2 * This program is free software; you can redistribute it and/or modify 3 * it under the terms of the GNU General Public License, version 2, as 4 * published by the Free Software Foundation. 5 * 6 * This program is distributed in the hope that it will be useful, 7 * but WITHOUT ANY WARRANTY; without even the implied warranty of 8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 9 * GNU General Public License for more details. 10 * 11 * You should have received a copy of the GNU General Public License 12 * along with this program; if not, write to the Free Software 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 14 * 15 * Copyright SUSE Linux Products GmbH 2010 16 * Copyright 2010-2011 Freescale Semiconductor, Inc. 17 * 18 * Authors: Alexander Graf <agraf@suse.de> 19 */ 20 21#include <asm/ppc_asm.h> 22#include <asm/kvm_asm.h> 23#include <asm/reg.h> 24#include <asm/page.h> 25#include <asm/asm-offsets.h> 26 27/* Hypercall entry point. Will be patched with device tree instructions. */ 28 29.global kvm_hypercall_start 30kvm_hypercall_start: 31 li r3, -1 32 nop 33 nop 34 nop 35 blr 36 37#define KVM_MAGIC_PAGE (-4096) 38 39#ifdef CONFIG_64BIT 40#define LL64(reg, offs, reg2) ld reg, (offs)(reg2) 41#define STL64(reg, offs, reg2) std reg, (offs)(reg2) 42#else 43#define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) 44#define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) 45#endif 46 47#define SCRATCH_SAVE \ 48 /* Enable critical section. We are critical if \ 49 shared->critical == r1 */ \ 50 STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ 51 \ 52 /* Save state */ \ 53 PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ 54 PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ 55 mfcr r31; \ 56 stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); 57 58#define SCRATCH_RESTORE \ 59 /* Restore state */ \ 60 PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ 61 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ 62 mtcr r30; \ 63 PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ 64 \ 65 /* Disable critical section. We are critical if \ 66 shared->critical == r1 and r2 is always != r1 */ \ 67 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); 68 69.global kvm_template_start 70kvm_template_start: 71 72.global kvm_emulate_mtmsrd 73kvm_emulate_mtmsrd: 74 75 SCRATCH_SAVE 76 77 /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ 78 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 79 lis r30, (~(MSR_EE | MSR_RI))@h 80 ori r30, r30, (~(MSR_EE | MSR_RI))@l 81 and r31, r31, r30 82 83 /* OR the register's (MSR_EE|MSR_RI) on MSR */ 84kvm_emulate_mtmsrd_reg: 85 ori r30, r0, 0 86 andi. r30, r30, (MSR_EE|MSR_RI) 87 or r31, r31, r30 88 89 /* Put MSR back into magic page */ 90 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 91 92 /* Check if we have to fetch an interrupt */ 93 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 94 cmpwi r31, 0 95 beq+ no_check 96 97 /* Check if we may trigger an interrupt */ 98 andi. r30, r30, MSR_EE 99 beq no_check 100 101 SCRATCH_RESTORE 102 103 /* Nag hypervisor */ 104kvm_emulate_mtmsrd_orig_ins: 105 tlbsync 106 107 b kvm_emulate_mtmsrd_branch 108 109no_check: 110 111 SCRATCH_RESTORE 112 113 /* Go back to caller */ 114kvm_emulate_mtmsrd_branch: 115 b . 116kvm_emulate_mtmsrd_end: 117 118.global kvm_emulate_mtmsrd_branch_offs 119kvm_emulate_mtmsrd_branch_offs: 120 .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 121 122.global kvm_emulate_mtmsrd_reg_offs 123kvm_emulate_mtmsrd_reg_offs: 124 .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 125 126.global kvm_emulate_mtmsrd_orig_ins_offs 127kvm_emulate_mtmsrd_orig_ins_offs: 128 .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 129 130.global kvm_emulate_mtmsrd_len 131kvm_emulate_mtmsrd_len: 132 .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 133 134 135#define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) 136#define MSR_CRITICAL_BITS ~MSR_SAFE_BITS 137 138.global kvm_emulate_mtmsr 139kvm_emulate_mtmsr: 140 141 SCRATCH_SAVE 142 143 /* Fetch old MSR in r31 */ 144 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 145 146 /* Find the changed bits between old and new MSR */ 147kvm_emulate_mtmsr_reg1: 148 ori r30, r0, 0 149 xor r31, r30, r31 150 151 /* Check if we need to really do mtmsr */ 152 LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) 153 and. r31, r31, r30 154 155 /* No critical bits changed? Maybe we can stay in the guest. */ 156 beq maybe_stay_in_guest 157 158do_mtmsr: 159 160 SCRATCH_RESTORE 161 162 /* Just fire off the mtmsr if it's critical */ 163kvm_emulate_mtmsr_orig_ins: 164 mtmsr r0 165 166 b kvm_emulate_mtmsr_branch 167 168maybe_stay_in_guest: 169 170 /* Get the target register in r30 */ 171kvm_emulate_mtmsr_reg2: 172 ori r30, r0, 0 173 174 /* Put MSR into magic page because we don't call mtmsr */ 175 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 176 177 /* Check if we have to fetch an interrupt */ 178 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 179 cmpwi r31, 0 180 beq+ no_mtmsr 181 182 /* Check if we may trigger an interrupt */ 183 andi. r31, r30, MSR_EE 184 bne do_mtmsr 185 186no_mtmsr: 187 188 SCRATCH_RESTORE 189 190 /* Go back to caller */ 191kvm_emulate_mtmsr_branch: 192 b . 193kvm_emulate_mtmsr_end: 194 195.global kvm_emulate_mtmsr_branch_offs 196kvm_emulate_mtmsr_branch_offs: 197 .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 198 199.global kvm_emulate_mtmsr_reg1_offs 200kvm_emulate_mtmsr_reg1_offs: 201 .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 202 203.global kvm_emulate_mtmsr_reg2_offs 204kvm_emulate_mtmsr_reg2_offs: 205 .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 206 207.global kvm_emulate_mtmsr_orig_ins_offs 208kvm_emulate_mtmsr_orig_ins_offs: 209 .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 210 211.global kvm_emulate_mtmsr_len 212kvm_emulate_mtmsr_len: 213 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 214 215/* also used for wrteei 1 */ 216.global kvm_emulate_wrtee 217kvm_emulate_wrtee: 218 219 SCRATCH_SAVE 220 221 /* Fetch old MSR in r31 */ 222 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 223 224 /* Insert new MSR[EE] */ 225kvm_emulate_wrtee_reg: 226 ori r30, r0, 0 227 rlwimi r31, r30, 0, MSR_EE 228 229 /* 230 * If MSR[EE] is now set, check for a pending interrupt. 231 * We could skip this if MSR[EE] was already on, but that 232 * should be rare, so don't bother. 233 */ 234 andi. r30, r30, MSR_EE 235 236 /* Put MSR into magic page because we don't call wrtee */ 237 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 238 239 beq no_wrtee 240 241 /* Check if we have to fetch an interrupt */ 242 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 243 cmpwi r30, 0 244 bne do_wrtee 245 246no_wrtee: 247 SCRATCH_RESTORE 248 249 /* Go back to caller */ 250kvm_emulate_wrtee_branch: 251 b . 252 253do_wrtee: 254 SCRATCH_RESTORE 255 256 /* Just fire off the wrtee if it's critical */ 257kvm_emulate_wrtee_orig_ins: 258 wrtee r0 259 260 b kvm_emulate_wrtee_branch 261 262kvm_emulate_wrtee_end: 263 264.global kvm_emulate_wrtee_branch_offs 265kvm_emulate_wrtee_branch_offs: 266 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4 267 268.global kvm_emulate_wrtee_reg_offs 269kvm_emulate_wrtee_reg_offs: 270 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4 271 272.global kvm_emulate_wrtee_orig_ins_offs 273kvm_emulate_wrtee_orig_ins_offs: 274 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4 275 276.global kvm_emulate_wrtee_len 277kvm_emulate_wrtee_len: 278 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4 279 280.global kvm_emulate_wrteei_0 281kvm_emulate_wrteei_0: 282 SCRATCH_SAVE 283 284 /* Fetch old MSR in r31 */ 285 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 286 287 /* Remove MSR_EE from old MSR */ 288 rlwinm r31, r31, 0, ~MSR_EE 289 290 /* Write new MSR value back */ 291 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 292 293 SCRATCH_RESTORE 294 295 /* Go back to caller */ 296kvm_emulate_wrteei_0_branch: 297 b . 298kvm_emulate_wrteei_0_end: 299 300.global kvm_emulate_wrteei_0_branch_offs 301kvm_emulate_wrteei_0_branch_offs: 302 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4 303 304.global kvm_emulate_wrteei_0_len 305kvm_emulate_wrteei_0_len: 306 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4 307 308.global kvm_emulate_mtsrin 309kvm_emulate_mtsrin: 310 311 SCRATCH_SAVE 312 313 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 314 andi. r31, r31, MSR_DR | MSR_IR 315 beq kvm_emulate_mtsrin_reg1 316 317 SCRATCH_RESTORE 318 319kvm_emulate_mtsrin_orig_ins: 320 nop 321 b kvm_emulate_mtsrin_branch 322 323kvm_emulate_mtsrin_reg1: 324 /* rX >> 26 */ 325 rlwinm r30,r0,6,26,29 326 327kvm_emulate_mtsrin_reg2: 328 stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) 329 330 SCRATCH_RESTORE 331 332 /* Go back to caller */ 333kvm_emulate_mtsrin_branch: 334 b . 335kvm_emulate_mtsrin_end: 336 337.global kvm_emulate_mtsrin_branch_offs 338kvm_emulate_mtsrin_branch_offs: 339 .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 340 341.global kvm_emulate_mtsrin_reg1_offs 342kvm_emulate_mtsrin_reg1_offs: 343 .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 344 345.global kvm_emulate_mtsrin_reg2_offs 346kvm_emulate_mtsrin_reg2_offs: 347 .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 348 349.global kvm_emulate_mtsrin_orig_ins_offs 350kvm_emulate_mtsrin_orig_ins_offs: 351 .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 352 353.global kvm_emulate_mtsrin_len 354kvm_emulate_mtsrin_len: 355 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 356 357.global kvm_template_end 358kvm_template_end: 359