1 // SPDX-License-Identifier: GPL-2.0 OR MIT 2 /************************************************************************** 3 * 4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <linux/pci.h> 29 #include <linux/sched/signal.h> 30 31 #include "vmwgfx_drv.h" 32 33 #define VMW_FENCE_WRAP (1 << 24) 34 35 static u32 vmw_irqflag_fence_goal(struct vmw_private *vmw) 36 { 37 if ((vmw->capabilities2 & SVGA_CAP2_EXTRA_REGS) != 0) 38 return SVGA_IRQFLAG_REG_FENCE_GOAL; 39 else 40 return SVGA_IRQFLAG_FENCE_GOAL; 41 } 42 43 /** 44 * vmw_thread_fn - Deferred (process context) irq handler 45 * 46 * @irq: irq number 47 * @arg: Closure argument. Pointer to a struct drm_device cast to void * 48 * 49 * This function implements the deferred part of irq processing. 50 * The function is guaranteed to run at least once after the 51 * vmw_irq_handler has returned with IRQ_WAKE_THREAD. 52 * 53 */ 54 static irqreturn_t vmw_thread_fn(int irq, void *arg) 55 { 56 struct drm_device *dev = (struct drm_device *)arg; 57 struct vmw_private *dev_priv = vmw_priv(dev); 58 irqreturn_t ret = IRQ_NONE; 59 60 if (test_and_clear_bit(VMW_IRQTHREAD_FENCE, 61 dev_priv->irqthread_pending)) { 62 vmw_fences_update(dev_priv->fman); 63 wake_up_all(&dev_priv->fence_queue); 64 ret = IRQ_HANDLED; 65 } 66 67 if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF, 68 dev_priv->irqthread_pending)) { 69 vmw_cmdbuf_irqthread(dev_priv->cman); 70 ret = IRQ_HANDLED; 71 } 72 73 return ret; 74 } 75 76 /** 77 * vmw_irq_handler: irq handler 78 * 79 * @irq: irq number 80 * @arg: Closure argument. Pointer to a struct drm_device cast to void * 81 * 82 * This function implements the quick part of irq processing. 83 * The function performs fast actions like clearing the device interrupt 84 * flags and also reasonably quick actions like waking processes waiting for 85 * FIFO space. Other IRQ actions are deferred to the IRQ thread. 86 */ 87 static irqreturn_t vmw_irq_handler(int irq, void *arg) 88 { 89 struct drm_device *dev = (struct drm_device *)arg; 90 struct vmw_private *dev_priv = vmw_priv(dev); 91 uint32_t status, masked_status; 92 irqreturn_t ret = IRQ_HANDLED; 93 94 status = vmw_irq_status_read(dev_priv); 95 masked_status = status & READ_ONCE(dev_priv->irq_mask); 96 97 if (likely(status)) 98 vmw_irq_status_write(dev_priv, status); 99 100 if (!status) 101 return IRQ_NONE; 102 103 if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) 104 wake_up_all(&dev_priv->fifo_queue); 105 106 if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE | 107 vmw_irqflag_fence_goal(dev_priv))) && 108 !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending)) 109 ret = IRQ_WAKE_THREAD; 110 111 if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER | 112 SVGA_IRQFLAG_ERROR)) && 113 !test_and_set_bit(VMW_IRQTHREAD_CMDBUF, 114 dev_priv->irqthread_pending)) 115 ret = IRQ_WAKE_THREAD; 116 117 return ret; 118 } 119 120 static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno) 121 { 122 123 return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0); 124 } 125 126 bool vmw_seqno_passed(struct vmw_private *dev_priv, 127 uint32_t seqno) 128 { 129 bool ret; 130 u32 last_read_seqno = atomic_read_acquire(&dev_priv->last_read_seqno); 131 132 if (last_read_seqno - seqno < VMW_FENCE_WRAP) 133 return true; 134 135 last_read_seqno = vmw_fences_update(dev_priv->fman); 136 if (last_read_seqno - seqno < VMW_FENCE_WRAP) 137 return true; 138 139 if (!vmw_has_fences(dev_priv) && vmw_fifo_idle(dev_priv, seqno)) 140 return true; 141 142 /** 143 * Then check if the seqno is higher than what we've actually 144 * emitted. Then the fence is stale and signaled. 145 */ 146 147 ret = ((atomic_read(&dev_priv->marker_seq) - seqno) 148 > VMW_FENCE_WRAP); 149 150 return ret; 151 } 152 153 int vmw_fallback_wait(struct vmw_private *dev_priv, 154 bool lazy, 155 bool fifo_idle, 156 uint32_t seqno, 157 bool interruptible, 158 unsigned long timeout) 159 { 160 struct vmw_fifo_state *fifo_state = dev_priv->fifo; 161 bool fifo_down = false; 162 163 uint32_t count = 0; 164 uint32_t signal_seq; 165 int ret; 166 unsigned long end_jiffies = jiffies + timeout; 167 bool (*wait_condition)(struct vmw_private *, uint32_t); 168 DEFINE_WAIT(__wait); 169 170 wait_condition = (fifo_idle) ? &vmw_fifo_idle : 171 &vmw_seqno_passed; 172 173 /** 174 * Block command submission while waiting for idle. 175 */ 176 177 if (fifo_idle) { 178 if (dev_priv->cman) { 179 ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible, 180 10*HZ); 181 if (ret) 182 goto out_err; 183 } else if (fifo_state) { 184 down_read(&fifo_state->rwsem); 185 fifo_down = true; 186 } 187 } 188 189 signal_seq = atomic_read(&dev_priv->marker_seq); 190 ret = 0; 191 192 for (;;) { 193 prepare_to_wait(&dev_priv->fence_queue, &__wait, 194 (interruptible) ? 195 TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); 196 if (wait_condition(dev_priv, seqno)) 197 break; 198 if (time_after_eq(jiffies, end_jiffies)) { 199 DRM_ERROR("SVGA device lockup.\n"); 200 break; 201 } 202 if (lazy) 203 schedule_timeout(1); 204 else if ((++count & 0x0F) == 0) { 205 /** 206 * FIXME: Use schedule_hr_timeout here for 207 * newer kernels and lower CPU utilization. 208 */ 209 210 __set_current_state(TASK_RUNNING); 211 schedule(); 212 __set_current_state((interruptible) ? 213 TASK_INTERRUPTIBLE : 214 TASK_UNINTERRUPTIBLE); 215 } 216 if (interruptible && signal_pending(current)) { 217 ret = -ERESTARTSYS; 218 break; 219 } 220 } 221 finish_wait(&dev_priv->fence_queue, &__wait); 222 if (ret == 0 && fifo_idle && fifo_state) 223 vmw_fence_write(dev_priv, signal_seq); 224 225 wake_up_all(&dev_priv->fence_queue); 226 out_err: 227 if (fifo_down) 228 up_read(&fifo_state->rwsem); 229 230 return ret; 231 } 232 233 bool vmw_generic_waiter_add(struct vmw_private *dev_priv, 234 u32 flag, int *waiter_count) 235 { 236 bool hw_programmed = false; 237 238 spin_lock(&dev_priv->waiter_lock); 239 if ((*waiter_count)++ == 0) { 240 vmw_irq_status_write(dev_priv, flag); 241 dev_priv->irq_mask |= flag; 242 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 243 hw_programmed = true; 244 } 245 spin_unlock(&dev_priv->waiter_lock); 246 return hw_programmed; 247 } 248 249 bool vmw_generic_waiter_remove(struct vmw_private *dev_priv, 250 u32 flag, int *waiter_count) 251 { 252 bool hw_programmed = false; 253 254 spin_lock(&dev_priv->waiter_lock); 255 if (--(*waiter_count) == 0) { 256 dev_priv->irq_mask &= ~flag; 257 vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask); 258 hw_programmed = true; 259 } 260 spin_unlock(&dev_priv->waiter_lock); 261 return hw_programmed; 262 } 263 264 bool vmw_seqno_waiter_add(struct vmw_private *dev_priv) 265 { 266 return vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 267 &dev_priv->fence_queue_waiters); 268 } 269 270 bool vmw_seqno_waiter_remove(struct vmw_private *dev_priv) 271 { 272 return vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE, 273 &dev_priv->fence_queue_waiters); 274 } 275 276 bool vmw_goal_waiter_add(struct vmw_private *dev_priv) 277 { 278 return vmw_generic_waiter_add(dev_priv, vmw_irqflag_fence_goal(dev_priv), 279 &dev_priv->goal_queue_waiters); 280 } 281 282 bool vmw_goal_waiter_remove(struct vmw_private *dev_priv) 283 { 284 return vmw_generic_waiter_remove(dev_priv, vmw_irqflag_fence_goal(dev_priv), 285 &dev_priv->goal_queue_waiters); 286 } 287 288 static void vmw_irq_preinstall(struct drm_device *dev) 289 { 290 struct vmw_private *dev_priv = vmw_priv(dev); 291 uint32_t status; 292 293 status = vmw_irq_status_read(dev_priv); 294 vmw_irq_status_write(dev_priv, status); 295 } 296 297 void vmw_irq_uninstall(struct drm_device *dev) 298 { 299 struct vmw_private *dev_priv = vmw_priv(dev); 300 struct pci_dev *pdev = to_pci_dev(dev->dev); 301 uint32_t status; 302 u32 i; 303 304 if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) 305 return; 306 307 vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); 308 309 status = vmw_irq_status_read(dev_priv); 310 vmw_irq_status_write(dev_priv, status); 311 312 for (i = 0; i < dev_priv->num_irq_vectors; ++i) 313 free_irq(dev_priv->irqs[i], dev); 314 315 pci_free_irq_vectors(pdev); 316 dev_priv->num_irq_vectors = 0; 317 } 318 319 /** 320 * vmw_irq_install - Install the irq handlers 321 * 322 * @dev_priv: Pointer to the vmw_private device. 323 * Return: Zero if successful. Negative number otherwise. 324 */ 325 int vmw_irq_install(struct vmw_private *dev_priv) 326 { 327 struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev); 328 struct drm_device *dev = &dev_priv->drm; 329 int ret; 330 int nvec; 331 int i = 0; 332 333 BUILD_BUG_ON((SVGA_IRQFLAG_MAX >> VMWGFX_MAX_NUM_IRQS) != 1); 334 BUG_ON(VMWGFX_MAX_NUM_IRQS != get_count_order(SVGA_IRQFLAG_MAX)); 335 336 nvec = pci_alloc_irq_vectors(pdev, 1, VMWGFX_MAX_NUM_IRQS, 337 PCI_IRQ_ALL_TYPES); 338 339 if (nvec <= 0) { 340 drm_err(&dev_priv->drm, 341 "IRQ's are unavailable, nvec: %d\n", nvec); 342 ret = nvec; 343 goto done; 344 } 345 346 vmw_irq_preinstall(dev); 347 348 for (i = 0; i < nvec; ++i) { 349 ret = pci_irq_vector(pdev, i); 350 if (ret < 0) { 351 drm_err(&dev_priv->drm, 352 "failed getting irq vector: %d\n", ret); 353 goto done; 354 } 355 dev_priv->irqs[i] = ret; 356 357 ret = request_threaded_irq(dev_priv->irqs[i], vmw_irq_handler, vmw_thread_fn, 358 IRQF_SHARED, VMWGFX_DRIVER_NAME, dev); 359 if (ret != 0) { 360 drm_err(&dev_priv->drm, 361 "Failed installing irq(%d): %d\n", 362 dev_priv->irqs[i], ret); 363 goto done; 364 } 365 } 366 367 done: 368 dev_priv->num_irq_vectors = i; 369 return ret; 370 } 371