1 /************************************************************************** 2 * 3 * Copyright © 2009-2015 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/drmP.h> 29 #include "vmwgfx_drv.h" 30 31 #define VMW_FENCE_WRAP (1 << 24) 32 33 irqreturn_t vmw_irq_handler(int irq, void *arg) 34 { 35 struct drm_device *dev = (struct drm_device *)arg; 36 struct vmw_private *dev_priv = vmw_priv(dev); 37 uint32_t status, masked_status; 38 39 spin_lock(&dev_priv->irq_lock); 40 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 41 masked_status = status & dev_priv->irq_mask; 42 spin_unlock(&dev_priv->irq_lock); 43 44 if (likely(status)) 45 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 46 47 if (!masked_status) 48 return IRQ_NONE; 49 50 if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | 51 SVGA_IRQFLAG_FENCE_GOAL)) { 52 vmw_fences_update(dev_priv->fman); 53 wake_up_all(&dev_priv->fence_queue); 54 } 55 56 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 57 wake_up_all(&dev_priv->fifo_queue); 58 59 if (masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 60 SVGA_IRQFLAG_ERROR)) 61 vmw_cmdbuf_tasklet_schedule(dev_priv->cman); 62 63 return IRQ_HANDLED; 64 } 65 66 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 67 { 68 69 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 70 } 71 72 void vmw_update_seqno(struct vmw_private *dev_priv, 73 struct vmw_fifo_state *fifo_state) 74 { 75 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 76 uint32_t seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 77 78 if (dev_priv->last_read_seqno != seqno) { 79 dev_priv->last_read_seqno = seqno; 80 vmw_marker_pull(&fifo_state->marker_queue, seqno); 81 vmw_fences_update(dev_priv->fman); 82 } 83 } 84 85 bool vmw_seqno_passed(struct vmw_private *dev_priv, 86 uint32_t seqno) 87 { 88 struct vmw_fifo_state *fifo_state; 89 bool ret; 90 91 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 92 return true; 93 94 fifo_state = &dev_priv->fifo; 95 vmw_update_seqno(dev_priv, fifo_state); 96 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 97 return true; 98 99 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) && 100 vmw_fifo_idle(dev_priv, seqno)) 101 return true; 102 103 /** 104 * Then check if the seqno is higher than what we've actually 105 * emitted. Then the fence is stale and signaled. 106 */ 107 108 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) 109 > VMW_FENCE_WRAP); 110 111 return ret; 112 } 113 114 int vmw_fallback_wait(struct vmw_private *dev_priv, 115 bool lazy, 116 bool fifo_idle, 117 uint32_t seqno, 118 bool interruptible, 119 unsigned long timeout) 120 { 121 struct vmw_fifo_state *fifo_state = &dev_priv->fifo; 122 123 uint32_t count = 0; 124 uint32_t signal_seq; 125 int ret; 126 unsigned long end_jiffies = jiffies + timeout; 127 bool (*wait_condition)(struct vmw_private *, uint32_t); 128 DEFINE_WAIT(__wait); 129 130 wait_condition = (fifo_idle) ? &vmw_fifo_idle : 131 &vmw_seqno_passed; 132 133 /** 134 * Block command submission while waiting for idle. 135 */ 136 137 if (fifo_idle) { 138 down_read(&fifo_state->rwsem); 139 if (dev_priv->cman) { 140 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 141 10*HZ); 142 if (ret) 143 goto out_err; 144 } 145 } 146 147 signal_seq = atomic_read(&dev_priv->marker_seq); 148 ret = 0; 149 150 for (;;) { 151 prepare_to_wait(&dev_priv->fence_queue, &__wait, 152 (interruptible) ? 153 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 154 if (wait_condition(dev_priv, seqno)) 155 break; 156 if (time_after_eq(jiffies, end_jiffies)) { 157 DRM_ERROR("SVGA device lockup.\n"); 158 break; 159 } 160 if (lazy) 161 schedule_timeout(1); 162 else if ((++count & 0x0F) == 0) { 163 /** 164 * FIXME: Use schedule_hr_timeout here for 165 * newer kernels and lower CPU utilization. 166 */ 167 168 __set_current_state(TASK_RUNNING); 169 schedule(); 170 __set_current_state((interruptible) ? 171 TASK_INTERRUPTIBLE : 172 TASK_UNINTERRUPTIBLE); 173 } 174 if (interruptible && signal_pending(current)) { 175 ret = -ERESTARTSYS; 176 break; 177 } 178 } 179 finish_wait(&dev_priv->fence_queue, &__wait); 180 if (ret == 0 && fifo_idle) { 181 u32 __iomem *fifo_mem = dev_priv->mmio_virt; 182 iowrite32(signal_seq, fifo_mem + SVGA_FIFO_FENCE); 183 } 184 wake_up_all(&dev_priv->fence_queue); 185 out_err: 186 if (fifo_idle) 187 up_read(&fifo_state->rwsem); 188 189 return ret; 190 } 191 192 void vmw_seqno_waiter_add(struct vmw_private *dev_priv) 193 { 194 spin_lock(&dev_priv->waiter_lock); 195 if (dev_priv->fence_queue_waiters++ == 0) { 196 unsigned long irq_flags; 197 198 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 199 outl(SVGA_IRQFLAG_ANY_FENCE, 200 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 201 dev_priv->irq_mask |= SVGA_IRQFLAG_ANY_FENCE; 202 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 203 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 204 } 205 spin_unlock(&dev_priv->waiter_lock); 206 } 207 208 void vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 209 { 210 spin_lock(&dev_priv->waiter_lock); 211 if (--dev_priv->fence_queue_waiters == 0) { 212 unsigned long irq_flags; 213 214 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 215 dev_priv->irq_mask &= ~SVGA_IRQFLAG_ANY_FENCE; 216 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 217 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 218 } 219 spin_unlock(&dev_priv->waiter_lock); 220 } 221 222 223 void vmw_goal_waiter_add(struct vmw_private *dev_priv) 224 { 225 spin_lock(&dev_priv->waiter_lock); 226 if (dev_priv->goal_queue_waiters++ == 0) { 227 unsigned long irq_flags; 228 229 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 230 outl(SVGA_IRQFLAG_FENCE_GOAL, 231 dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 232 dev_priv->irq_mask |= SVGA_IRQFLAG_FENCE_GOAL; 233 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 234 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 235 } 236 spin_unlock(&dev_priv->waiter_lock); 237 } 238 239 void vmw_goal_waiter_remove(struct vmw_private *dev_priv) 240 { 241 spin_lock(&dev_priv->waiter_lock); 242 if (--dev_priv->goal_queue_waiters == 0) { 243 unsigned long irq_flags; 244 245 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 246 dev_priv->irq_mask &= ~SVGA_IRQFLAG_FENCE_GOAL; 247 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 248 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 249 } 250 spin_unlock(&dev_priv->waiter_lock); 251 } 252 253 int vmw_wait_seqno(struct vmw_private *dev_priv, 254 bool lazy, uint32_t seqno, 255 bool interruptible, unsigned long timeout) 256 { 257 long ret; 258 struct vmw_fifo_state *fifo = &dev_priv->fifo; 259 260 if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP)) 261 return 0; 262 263 if (likely(vmw_seqno_passed(dev_priv, seqno))) 264 return 0; 265 266 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 267 268 if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE)) 269 return vmw_fallback_wait(dev_priv, lazy, true, seqno, 270 interruptible, timeout); 271 272 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 273 return vmw_fallback_wait(dev_priv, lazy, false, seqno, 274 interruptible, timeout); 275 276 vmw_seqno_waiter_add(dev_priv); 277 278 if (interruptible) 279 ret = wait_event_interruptible_timeout 280 (dev_priv->fence_queue, 281 vmw_seqno_passed(dev_priv, seqno), 282 timeout); 283 else 284 ret = wait_event_timeout 285 (dev_priv->fence_queue, 286 vmw_seqno_passed(dev_priv, seqno), 287 timeout); 288 289 vmw_seqno_waiter_remove(dev_priv); 290 291 if (unlikely(ret == 0)) 292 ret = -EBUSY; 293 else if (likely(ret > 0)) 294 ret = 0; 295 296 return ret; 297 } 298 299 void vmw_irq_preinstall(struct drm_device *dev) 300 { 301 struct vmw_private *dev_priv = vmw_priv(dev); 302 uint32_t status; 303 304 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 305 return; 306 307 spin_lock_init(&dev_priv->irq_lock); 308 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 309 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 310 } 311 312 int vmw_irq_postinstall(struct drm_device *dev) 313 { 314 return 0; 315 } 316 317 void vmw_irq_uninstall(struct drm_device *dev) 318 { 319 struct vmw_private *dev_priv = vmw_priv(dev); 320 uint32_t status; 321 322 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 323 return; 324 325 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 326 327 status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 328 outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 329 } 330 331 void vmw_generic_waiter_add(struct vmw_private *dev_priv, 332 u32 flag, int *waiter_count) 333 { 334 unsigned long irq_flags; 335 336 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 337 if ((*waiter_count)++ == 0) { 338 outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); 339 dev_priv->irq_mask |= flag; 340 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 341 } 342 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 343 } 344 345 void vmw_generic_waiter_remove(struct vmw_private *dev_priv, 346 u32 flag, int *waiter_count) 347 { 348 unsigned long irq_flags; 349 350 spin_lock_irqsave(&dev_priv->irq_lock, irq_flags); 351 if (--(*waiter_count) == 0) { 352 dev_priv->irq_mask &= ~flag; 353 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 354 } 355 spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags); 356 } 357