1 /* 2 * Copyright 2012 Michael Ellerman, IBM Corporation. 3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License, version 2, as 7 * published by the Free Software Foundation. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/kvm_host.h> 12 #include <linux/err.h> 13 14 #include <asm/kvm_book3s.h> 15 #include <asm/kvm_ppc.h> 16 #include <asm/hvcall.h> 17 #include <asm/xics.h> 18 #include <asm/debug.h> 19 #include <asm/synch.h> 20 #include <asm/ppc-opcode.h> 21 22 #include "book3s_xics.h" 23 24 #define DEBUG_PASSUP 25 26 static inline void rm_writeb(unsigned long paddr, u8 val) 27 { 28 __asm__ __volatile__("sync; stbcix %0,0,%1" 29 : : "r" (val), "r" (paddr) : "memory"); 30 } 31 32 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu, 33 struct kvm_vcpu *this_vcpu) 34 { 35 struct kvmppc_icp *this_icp = this_vcpu->arch.icp; 36 unsigned long xics_phys; 37 int cpu; 38 39 /* Mark the target VCPU as having an interrupt pending */ 40 vcpu->stat.queue_intr++; 41 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 42 43 /* Kick self ? Just set MER and return */ 44 if (vcpu == this_vcpu) { 45 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER); 46 return; 47 } 48 49 /* Check if the core is loaded, if not, too hard */ 50 cpu = vcpu->cpu; 51 if (cpu < 0 || cpu >= nr_cpu_ids) { 52 this_icp->rm_action |= XICS_RM_KICK_VCPU; 53 this_icp->rm_kick_target = vcpu; 54 return; 55 } 56 /* In SMT cpu will always point to thread 0, we adjust it */ 57 cpu += vcpu->arch.ptid; 58 59 /* Not too hard, then poke the target */ 60 xics_phys = paca[cpu].kvm_hstate.xics_phys; 61 rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); 62 } 63 64 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu) 65 { 66 /* Note: Only called on self ! */ 67 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 68 &vcpu->arch.pending_exceptions); 69 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER); 70 } 71 72 static inline bool icp_rm_try_update(struct kvmppc_icp *icp, 73 union kvmppc_icp_state old, 74 union kvmppc_icp_state new) 75 { 76 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu; 77 bool success; 78 79 /* Calculate new output value */ 80 new.out_ee = (new.xisr && (new.pending_pri < new.cppr)); 81 82 /* Attempt atomic update */ 83 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw; 84 if (!success) 85 goto bail; 86 87 /* 88 * Check for output state update 89 * 90 * Note that this is racy since another processor could be updating 91 * the state already. This is why we never clear the interrupt output 92 * here, we only ever set it. The clear only happens prior to doing 93 * an update and only by the processor itself. Currently we do it 94 * in Accept (H_XIRR) and Up_Cppr (H_XPPR). 95 * 96 * We also do not try to figure out whether the EE state has changed, 97 * we unconditionally set it if the new state calls for it. The reason 98 * for that is that we opportunistically remove the pending interrupt 99 * flag when raising CPPR, so we need to set it back here if an 100 * interrupt is still pending. 101 */ 102 if (new.out_ee) 103 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu); 104 105 /* Expose the state change for debug purposes */ 106 this_vcpu->arch.icp->rm_dbgstate = new; 107 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu; 108 109 bail: 110 return success; 111 } 112 113 static inline int check_too_hard(struct kvmppc_xics *xics, 114 struct kvmppc_icp *icp) 115 { 116 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS; 117 } 118 119 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp, 120 u8 new_cppr) 121 { 122 union kvmppc_icp_state old_state, new_state; 123 bool resend; 124 125 /* 126 * This handles several related states in one operation: 127 * 128 * ICP State: Down_CPPR 129 * 130 * Load CPPR with new value and if the XISR is 0 131 * then check for resends: 132 * 133 * ICP State: Resend 134 * 135 * If MFRR is more favored than CPPR, check for IPIs 136 * and notify ICS of a potential resend. This is done 137 * asynchronously (when used in real mode, we will have 138 * to exit here). 139 * 140 * We do not handle the complete Check_IPI as documented 141 * here. In the PAPR, this state will be used for both 142 * Set_MFRR and Down_CPPR. However, we know that we aren't 143 * changing the MFRR state here so we don't need to handle 144 * the case of an MFRR causing a reject of a pending irq, 145 * this will have been handled when the MFRR was set in the 146 * first place. 147 * 148 * Thus we don't have to handle rejects, only resends. 149 * 150 * When implementing real mode for HV KVM, resend will lead to 151 * a H_TOO_HARD return and the whole transaction will be handled 152 * in virtual mode. 153 */ 154 do { 155 old_state = new_state = ACCESS_ONCE(icp->state); 156 157 /* Down_CPPR */ 158 new_state.cppr = new_cppr; 159 160 /* 161 * Cut down Resend / Check_IPI / IPI 162 * 163 * The logic is that we cannot have a pending interrupt 164 * trumped by an IPI at this point (see above), so we 165 * know that either the pending interrupt is already an 166 * IPI (in which case we don't care to override it) or 167 * it's either more favored than us or non existent 168 */ 169 if (new_state.mfrr < new_cppr && 170 new_state.mfrr <= new_state.pending_pri) { 171 new_state.pending_pri = new_state.mfrr; 172 new_state.xisr = XICS_IPI; 173 } 174 175 /* Latch/clear resend bit */ 176 resend = new_state.need_resend; 177 new_state.need_resend = 0; 178 179 } while (!icp_rm_try_update(icp, old_state, new_state)); 180 181 /* 182 * Now handle resend checks. Those are asynchronous to the ICP 183 * state update in HW (ie bus transactions) so we can handle them 184 * separately here as well. 185 */ 186 if (resend) { 187 icp->rm_action |= XICS_RM_CHECK_RESEND; 188 icp->rm_resend_icp = icp; 189 } 190 } 191 192 193 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) 194 { 195 union kvmppc_icp_state old_state, new_state; 196 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 197 struct kvmppc_icp *icp = vcpu->arch.icp; 198 u32 xirr; 199 200 if (!xics || !xics->real_mode) 201 return H_TOO_HARD; 202 203 /* First clear the interrupt */ 204 icp_rm_clr_vcpu_irq(icp->vcpu); 205 206 /* 207 * ICP State: Accept_Interrupt 208 * 209 * Return the pending interrupt (if any) along with the 210 * current CPPR, then clear the XISR & set CPPR to the 211 * pending priority 212 */ 213 do { 214 old_state = new_state = ACCESS_ONCE(icp->state); 215 216 xirr = old_state.xisr | (((u32)old_state.cppr) << 24); 217 if (!old_state.xisr) 218 break; 219 new_state.cppr = new_state.pending_pri; 220 new_state.pending_pri = 0xff; 221 new_state.xisr = 0; 222 223 } while (!icp_rm_try_update(icp, old_state, new_state)); 224 225 /* Return the result in GPR4 */ 226 vcpu->arch.gpr[4] = xirr; 227 228 return check_too_hard(xics, icp); 229 } 230 231 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, 232 unsigned long mfrr) 233 { 234 union kvmppc_icp_state old_state, new_state; 235 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 236 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp; 237 u32 reject; 238 bool resend; 239 bool local; 240 241 if (!xics || !xics->real_mode) 242 return H_TOO_HARD; 243 244 local = this_icp->server_num == server; 245 if (local) 246 icp = this_icp; 247 else 248 icp = kvmppc_xics_find_server(vcpu->kvm, server); 249 if (!icp) 250 return H_PARAMETER; 251 252 /* 253 * ICP state: Set_MFRR 254 * 255 * If the CPPR is more favored than the new MFRR, then 256 * nothing needs to be done as there can be no XISR to 257 * reject. 258 * 259 * ICP state: Check_IPI 260 * 261 * If the CPPR is less favored, then we might be replacing 262 * an interrupt, and thus need to possibly reject it. 263 * 264 * ICP State: IPI 265 * 266 * Besides rejecting any pending interrupts, we also 267 * update XISR and pending_pri to mark IPI as pending. 268 * 269 * PAPR does not describe this state, but if the MFRR is being 270 * made less favored than its earlier value, there might be 271 * a previously-rejected interrupt needing to be resent. 272 * Ideally, we would want to resend only if 273 * prio(pending_interrupt) < mfrr && 274 * prio(pending_interrupt) < cppr 275 * where pending interrupt is the one that was rejected. But 276 * we don't have that state, so we simply trigger a resend 277 * whenever the MFRR is made less favored. 278 */ 279 do { 280 old_state = new_state = ACCESS_ONCE(icp->state); 281 282 /* Set_MFRR */ 283 new_state.mfrr = mfrr; 284 285 /* Check_IPI */ 286 reject = 0; 287 resend = false; 288 if (mfrr < new_state.cppr) { 289 /* Reject a pending interrupt if not an IPI */ 290 if (mfrr <= new_state.pending_pri) { 291 reject = new_state.xisr; 292 new_state.pending_pri = mfrr; 293 new_state.xisr = XICS_IPI; 294 } 295 } 296 297 if (mfrr > old_state.mfrr) { 298 resend = new_state.need_resend; 299 new_state.need_resend = 0; 300 } 301 } while (!icp_rm_try_update(icp, old_state, new_state)); 302 303 /* Pass rejects to virtual mode */ 304 if (reject && reject != XICS_IPI) { 305 this_icp->rm_action |= XICS_RM_REJECT; 306 this_icp->rm_reject = reject; 307 } 308 309 /* Pass resends to virtual mode */ 310 if (resend) { 311 this_icp->rm_action |= XICS_RM_CHECK_RESEND; 312 this_icp->rm_resend_icp = icp; 313 } 314 315 return check_too_hard(xics, this_icp); 316 } 317 318 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) 319 { 320 union kvmppc_icp_state old_state, new_state; 321 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 322 struct kvmppc_icp *icp = vcpu->arch.icp; 323 u32 reject; 324 325 if (!xics || !xics->real_mode) 326 return H_TOO_HARD; 327 328 /* 329 * ICP State: Set_CPPR 330 * 331 * We can safely compare the new value with the current 332 * value outside of the transaction as the CPPR is only 333 * ever changed by the processor on itself 334 */ 335 if (cppr > icp->state.cppr) { 336 icp_rm_down_cppr(xics, icp, cppr); 337 goto bail; 338 } else if (cppr == icp->state.cppr) 339 return H_SUCCESS; 340 341 /* 342 * ICP State: Up_CPPR 343 * 344 * The processor is raising its priority, this can result 345 * in a rejection of a pending interrupt: 346 * 347 * ICP State: Reject_Current 348 * 349 * We can remove EE from the current processor, the update 350 * transaction will set it again if needed 351 */ 352 icp_rm_clr_vcpu_irq(icp->vcpu); 353 354 do { 355 old_state = new_state = ACCESS_ONCE(icp->state); 356 357 reject = 0; 358 new_state.cppr = cppr; 359 360 if (cppr <= new_state.pending_pri) { 361 reject = new_state.xisr; 362 new_state.xisr = 0; 363 new_state.pending_pri = 0xff; 364 } 365 366 } while (!icp_rm_try_update(icp, old_state, new_state)); 367 368 /* Pass rejects to virtual mode */ 369 if (reject && reject != XICS_IPI) { 370 icp->rm_action |= XICS_RM_REJECT; 371 icp->rm_reject = reject; 372 } 373 bail: 374 return check_too_hard(xics, icp); 375 } 376 377 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) 378 { 379 struct kvmppc_xics *xics = vcpu->kvm->arch.xics; 380 struct kvmppc_icp *icp = vcpu->arch.icp; 381 struct kvmppc_ics *ics; 382 struct ics_irq_state *state; 383 u32 irq = xirr & 0x00ffffff; 384 u16 src; 385 386 if (!xics || !xics->real_mode) 387 return H_TOO_HARD; 388 389 /* 390 * ICP State: EOI 391 * 392 * Note: If EOI is incorrectly used by SW to lower the CPPR 393 * value (ie more favored), we do not check for rejection of 394 * a pending interrupt, this is a SW error and PAPR sepcifies 395 * that we don't have to deal with it. 396 * 397 * The sending of an EOI to the ICS is handled after the 398 * CPPR update 399 * 400 * ICP State: Down_CPPR which we handle 401 * in a separate function as it's shared with H_CPPR. 402 */ 403 icp_rm_down_cppr(xics, icp, xirr >> 24); 404 405 /* IPIs have no EOI */ 406 if (irq == XICS_IPI) 407 goto bail; 408 /* 409 * EOI handling: If the interrupt is still asserted, we need to 410 * resend it. We can take a lockless "peek" at the ICS state here. 411 * 412 * "Message" interrupts will never have "asserted" set 413 */ 414 ics = kvmppc_xics_find_ics(xics, irq, &src); 415 if (!ics) 416 goto bail; 417 state = &ics->irq_state[src]; 418 419 /* Still asserted, resend it, we make it look like a reject */ 420 if (state->asserted) { 421 icp->rm_action |= XICS_RM_REJECT; 422 icp->rm_reject = irq; 423 } 424 425 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) { 426 icp->rm_action |= XICS_RM_NOTIFY_EOI; 427 icp->rm_eoied_irq = irq; 428 } 429 bail: 430 return check_too_hard(xics, icp); 431 } 432