1 /************************************************************************** 2 * 3 * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 28 #include <drm/drmP.h> 29 #include "vmwgfx_drv.h" 30 31 #define VMW_FENCE_WRAP (1 << 31) 32 33 struct vmw_fence_manager { 34 int num_fence_objects; 35 struct vmw_private *dev_priv; 36 spinlock_t lock; 37 struct list_head fence_list; 38 struct work_struct work, ping_work; 39 u32 user_fence_size; 40 u32 fence_size; 41 u32 event_fence_action_size; 42 bool fifo_down; 43 struct list_head cleanup_list; 44 uint32_t pending_actions[VMW_ACTION_MAX]; 45 struct mutex goal_irq_mutex; 46 bool goal_irq_on; /* Protected by @goal_irq_mutex */ 47 bool seqno_valid; /* Protected by @lock, and may not be set to true 48 without the @goal_irq_mutex held. */ 49 unsigned ctx; 50 }; 51 52 struct vmw_user_fence { 53 struct ttm_base_object base; 54 struct vmw_fence_obj fence; 55 }; 56 57 /** 58 * struct vmw_event_fence_action - fence action that delivers a drm event. 59 * 60 * @e: A struct drm_pending_event that controls the event delivery. 61 * @action: A struct vmw_fence_action to hook up to a fence. 62 * @fence: A referenced pointer to the fence to keep it alive while @action 63 * hangs on it. 64 * @dev: Pointer to a struct drm_device so we can access the event stuff. 65 * @kref: Both @e and @action has destructors, so we need to refcount. 66 * @size: Size accounted for this object. 67 * @tv_sec: If non-null, the variable pointed to will be assigned 68 * current time tv_sec val when the fence signals. 69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will 70 * be assigned the current time tv_usec val when the fence signals. 71 */ 72 struct vmw_event_fence_action { 73 struct vmw_fence_action action; 74 struct list_head fpriv_head; 75 76 struct drm_pending_event *event; 77 struct vmw_fence_obj *fence; 78 struct drm_device *dev; 79 80 uint32_t *tv_sec; 81 uint32_t *tv_usec; 82 }; 83 84 static struct vmw_fence_manager * 85 fman_from_fence(struct vmw_fence_obj *fence) 86 { 87 return container_of(fence->base.lock, struct vmw_fence_manager, lock); 88 } 89 90 /** 91 * Note on fencing subsystem usage of irqs: 92 * Typically the vmw_fences_update function is called 93 * 94 * a) When a new fence seqno has been submitted by the fifo code. 95 * b) On-demand when we have waiters. Sleeping waiters will switch on the 96 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE 97 * irq is received. When the last fence waiter is gone, that IRQ is masked 98 * away. 99 * 100 * In situations where there are no waiters and we don't submit any new fences, 101 * fence objects may not be signaled. This is perfectly OK, since there are 102 * no consumers of the signaled data, but that is NOT ok when there are fence 103 * actions attached to a fence. The fencing subsystem then makes use of the 104 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence 105 * which has an action attached, and each time vmw_fences_update is called, 106 * the subsystem makes sure the fence goal seqno is updated. 107 * 108 * The fence goal seqno irq is on as long as there are unsignaled fence 109 * objects with actions attached to them. 110 */ 111 112 static void vmw_fence_obj_destroy(struct fence *f) 113 { 114 struct vmw_fence_obj *fence = 115 container_of(f, struct vmw_fence_obj, base); 116 117 struct vmw_fence_manager *fman = fman_from_fence(fence); 118 unsigned long irq_flags; 119 120 spin_lock_irqsave(&fman->lock, irq_flags); 121 list_del_init(&fence->head); 122 --fman->num_fence_objects; 123 spin_unlock_irqrestore(&fman->lock, irq_flags); 124 fence->destroy(fence); 125 } 126 127 static const char *vmw_fence_get_driver_name(struct fence *f) 128 { 129 return "vmwgfx"; 130 } 131 132 static const char *vmw_fence_get_timeline_name(struct fence *f) 133 { 134 return "svga"; 135 } 136 137 static void vmw_fence_ping_func(struct work_struct *work) 138 { 139 struct vmw_fence_manager *fman = 140 container_of(work, struct vmw_fence_manager, ping_work); 141 142 vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC); 143 } 144 145 static bool vmw_fence_enable_signaling(struct fence *f) 146 { 147 struct vmw_fence_obj *fence = 148 container_of(f, struct vmw_fence_obj, base); 149 150 struct vmw_fence_manager *fman = fman_from_fence(fence); 151 struct vmw_private *dev_priv = fman->dev_priv; 152 153 __le32 __iomem *fifo_mem = dev_priv->mmio_virt; 154 u32 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 155 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) 156 return false; 157 158 if (mutex_trylock(&dev_priv->hw_mutex)) { 159 vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC); 160 mutex_unlock(&dev_priv->hw_mutex); 161 } else 162 schedule_work(&fman->ping_work); 163 164 return true; 165 } 166 167 struct vmwgfx_wait_cb { 168 struct fence_cb base; 169 struct task_struct *task; 170 }; 171 172 static void 173 vmwgfx_wait_cb(struct fence *fence, struct fence_cb *cb) 174 { 175 struct vmwgfx_wait_cb *wait = 176 container_of(cb, struct vmwgfx_wait_cb, base); 177 178 wake_up_process(wait->task); 179 } 180 181 static void __vmw_fences_update(struct vmw_fence_manager *fman); 182 183 static long vmw_fence_wait(struct fence *f, bool intr, signed long timeout) 184 { 185 struct vmw_fence_obj *fence = 186 container_of(f, struct vmw_fence_obj, base); 187 188 struct vmw_fence_manager *fman = fman_from_fence(fence); 189 struct vmw_private *dev_priv = fman->dev_priv; 190 struct vmwgfx_wait_cb cb; 191 long ret = timeout; 192 unsigned long irq_flags; 193 194 if (likely(vmw_fence_obj_signaled(fence))) 195 return timeout; 196 197 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 198 vmw_seqno_waiter_add(dev_priv); 199 200 spin_lock_irqsave(f->lock, irq_flags); 201 202 if (intr && signal_pending(current)) { 203 ret = -ERESTARTSYS; 204 goto out; 205 } 206 207 cb.base.func = vmwgfx_wait_cb; 208 cb.task = current; 209 list_add(&cb.base.node, &f->cb_list); 210 211 while (ret > 0) { 212 __vmw_fences_update(fman); 213 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &f->flags)) 214 break; 215 216 if (intr) 217 __set_current_state(TASK_INTERRUPTIBLE); 218 else 219 __set_current_state(TASK_UNINTERRUPTIBLE); 220 spin_unlock_irqrestore(f->lock, irq_flags); 221 222 ret = schedule_timeout(ret); 223 224 spin_lock_irqsave(f->lock, irq_flags); 225 if (ret > 0 && intr && signal_pending(current)) 226 ret = -ERESTARTSYS; 227 } 228 229 if (!list_empty(&cb.base.node)) 230 list_del(&cb.base.node); 231 __set_current_state(TASK_RUNNING); 232 233 out: 234 spin_unlock_irqrestore(f->lock, irq_flags); 235 236 vmw_seqno_waiter_remove(dev_priv); 237 238 return ret; 239 } 240 241 static struct fence_ops vmw_fence_ops = { 242 .get_driver_name = vmw_fence_get_driver_name, 243 .get_timeline_name = vmw_fence_get_timeline_name, 244 .enable_signaling = vmw_fence_enable_signaling, 245 .wait = vmw_fence_wait, 246 .release = vmw_fence_obj_destroy, 247 }; 248 249 250 /** 251 * Execute signal actions on fences recently signaled. 252 * This is done from a workqueue so we don't have to execute 253 * signal actions from atomic context. 254 */ 255 256 static void vmw_fence_work_func(struct work_struct *work) 257 { 258 struct vmw_fence_manager *fman = 259 container_of(work, struct vmw_fence_manager, work); 260 struct list_head list; 261 struct vmw_fence_action *action, *next_action; 262 bool seqno_valid; 263 264 do { 265 INIT_LIST_HEAD(&list); 266 mutex_lock(&fman->goal_irq_mutex); 267 268 spin_lock_irq(&fman->lock); 269 list_splice_init(&fman->cleanup_list, &list); 270 seqno_valid = fman->seqno_valid; 271 spin_unlock_irq(&fman->lock); 272 273 if (!seqno_valid && fman->goal_irq_on) { 274 fman->goal_irq_on = false; 275 vmw_goal_waiter_remove(fman->dev_priv); 276 } 277 mutex_unlock(&fman->goal_irq_mutex); 278 279 if (list_empty(&list)) 280 return; 281 282 /* 283 * At this point, only we should be able to manipulate the 284 * list heads of the actions we have on the private list. 285 * hence fman::lock not held. 286 */ 287 288 list_for_each_entry_safe(action, next_action, &list, head) { 289 list_del_init(&action->head); 290 if (action->cleanup) 291 action->cleanup(action); 292 } 293 } while (1); 294 } 295 296 struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv) 297 { 298 struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL); 299 300 if (unlikely(fman == NULL)) 301 return NULL; 302 303 fman->dev_priv = dev_priv; 304 spin_lock_init(&fman->lock); 305 INIT_LIST_HEAD(&fman->fence_list); 306 INIT_LIST_HEAD(&fman->cleanup_list); 307 INIT_WORK(&fman->work, &vmw_fence_work_func); 308 INIT_WORK(&fman->ping_work, &vmw_fence_ping_func); 309 fman->fifo_down = true; 310 fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence)); 311 fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj)); 312 fman->event_fence_action_size = 313 ttm_round_pot(sizeof(struct vmw_event_fence_action)); 314 mutex_init(&fman->goal_irq_mutex); 315 fman->ctx = fence_context_alloc(1); 316 317 return fman; 318 } 319 320 void vmw_fence_manager_takedown(struct vmw_fence_manager *fman) 321 { 322 unsigned long irq_flags; 323 bool lists_empty; 324 325 (void) cancel_work_sync(&fman->work); 326 (void) cancel_work_sync(&fman->ping_work); 327 328 spin_lock_irqsave(&fman->lock, irq_flags); 329 lists_empty = list_empty(&fman->fence_list) && 330 list_empty(&fman->cleanup_list); 331 spin_unlock_irqrestore(&fman->lock, irq_flags); 332 333 BUG_ON(!lists_empty); 334 kfree(fman); 335 } 336 337 static int vmw_fence_obj_init(struct vmw_fence_manager *fman, 338 struct vmw_fence_obj *fence, u32 seqno, 339 void (*destroy) (struct vmw_fence_obj *fence)) 340 { 341 unsigned long irq_flags; 342 int ret = 0; 343 344 fence_init(&fence->base, &vmw_fence_ops, &fman->lock, 345 fman->ctx, seqno); 346 INIT_LIST_HEAD(&fence->seq_passed_actions); 347 fence->destroy = destroy; 348 349 spin_lock_irqsave(&fman->lock, irq_flags); 350 if (unlikely(fman->fifo_down)) { 351 ret = -EBUSY; 352 goto out_unlock; 353 } 354 list_add_tail(&fence->head, &fman->fence_list); 355 ++fman->num_fence_objects; 356 357 out_unlock: 358 spin_unlock_irqrestore(&fman->lock, irq_flags); 359 return ret; 360 361 } 362 363 static void vmw_fences_perform_actions(struct vmw_fence_manager *fman, 364 struct list_head *list) 365 { 366 struct vmw_fence_action *action, *next_action; 367 368 list_for_each_entry_safe(action, next_action, list, head) { 369 list_del_init(&action->head); 370 fman->pending_actions[action->type]--; 371 if (action->seq_passed != NULL) 372 action->seq_passed(action); 373 374 /* 375 * Add the cleanup action to the cleanup list so that 376 * it will be performed by a worker task. 377 */ 378 379 list_add_tail(&action->head, &fman->cleanup_list); 380 } 381 } 382 383 /** 384 * vmw_fence_goal_new_locked - Figure out a new device fence goal 385 * seqno if needed. 386 * 387 * @fman: Pointer to a fence manager. 388 * @passed_seqno: The seqno the device currently signals as passed. 389 * 390 * This function should be called with the fence manager lock held. 391 * It is typically called when we have a new passed_seqno, and 392 * we might need to update the fence goal. It checks to see whether 393 * the current fence goal has already passed, and, in that case, 394 * scans through all unsignaled fences to get the next fence object with an 395 * action attached, and sets the seqno of that fence as a new fence goal. 396 * 397 * returns true if the device goal seqno was updated. False otherwise. 398 */ 399 static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman, 400 u32 passed_seqno) 401 { 402 u32 goal_seqno; 403 __le32 __iomem *fifo_mem; 404 struct vmw_fence_obj *fence; 405 406 if (likely(!fman->seqno_valid)) 407 return false; 408 409 fifo_mem = fman->dev_priv->mmio_virt; 410 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 411 if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP)) 412 return false; 413 414 fman->seqno_valid = false; 415 list_for_each_entry(fence, &fman->fence_list, head) { 416 if (!list_empty(&fence->seq_passed_actions)) { 417 fman->seqno_valid = true; 418 iowrite32(fence->base.seqno, 419 fifo_mem + SVGA_FIFO_FENCE_GOAL); 420 break; 421 } 422 } 423 424 return true; 425 } 426 427 428 /** 429 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if 430 * needed. 431 * 432 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be 433 * considered as a device fence goal. 434 * 435 * This function should be called with the fence manager lock held. 436 * It is typically called when an action has been attached to a fence to 437 * check whether the seqno of that fence should be used for a fence 438 * goal interrupt. This is typically needed if the current fence goal is 439 * invalid, or has a higher seqno than that of the current fence object. 440 * 441 * returns true if the device goal seqno was updated. False otherwise. 442 */ 443 static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence) 444 { 445 struct vmw_fence_manager *fman = fman_from_fence(fence); 446 u32 goal_seqno; 447 __le32 __iomem *fifo_mem; 448 449 if (fence_is_signaled_locked(&fence->base)) 450 return false; 451 452 fifo_mem = fman->dev_priv->mmio_virt; 453 goal_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE_GOAL); 454 if (likely(fman->seqno_valid && 455 goal_seqno - fence->base.seqno < VMW_FENCE_WRAP)) 456 return false; 457 458 iowrite32(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL); 459 fman->seqno_valid = true; 460 461 return true; 462 } 463 464 static void __vmw_fences_update(struct vmw_fence_manager *fman) 465 { 466 struct vmw_fence_obj *fence, *next_fence; 467 struct list_head action_list; 468 bool needs_rerun; 469 uint32_t seqno, new_seqno; 470 __le32 __iomem *fifo_mem = fman->dev_priv->mmio_virt; 471 472 seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 473 rerun: 474 list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) { 475 if (seqno - fence->base.seqno < VMW_FENCE_WRAP) { 476 list_del_init(&fence->head); 477 fence_signal_locked(&fence->base); 478 INIT_LIST_HEAD(&action_list); 479 list_splice_init(&fence->seq_passed_actions, 480 &action_list); 481 vmw_fences_perform_actions(fman, &action_list); 482 } else 483 break; 484 } 485 486 /* 487 * Rerun if the fence goal seqno was updated, and the 488 * hardware might have raced with that update, so that 489 * we missed a fence_goal irq. 490 */ 491 492 needs_rerun = vmw_fence_goal_new_locked(fman, seqno); 493 if (unlikely(needs_rerun)) { 494 new_seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE); 495 if (new_seqno != seqno) { 496 seqno = new_seqno; 497 goto rerun; 498 } 499 } 500 501 if (!list_empty(&fman->cleanup_list)) 502 (void) schedule_work(&fman->work); 503 } 504 505 void vmw_fences_update(struct vmw_fence_manager *fman) 506 { 507 unsigned long irq_flags; 508 509 spin_lock_irqsave(&fman->lock, irq_flags); 510 __vmw_fences_update(fman); 511 spin_unlock_irqrestore(&fman->lock, irq_flags); 512 } 513 514 bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence) 515 { 516 struct vmw_fence_manager *fman = fman_from_fence(fence); 517 518 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 519 return 1; 520 521 vmw_fences_update(fman); 522 523 return fence_is_signaled(&fence->base); 524 } 525 526 int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy, 527 bool interruptible, unsigned long timeout) 528 { 529 long ret = fence_wait_timeout(&fence->base, interruptible, timeout); 530 531 if (likely(ret > 0)) 532 return 0; 533 else if (ret == 0) 534 return -EBUSY; 535 else 536 return ret; 537 } 538 539 void vmw_fence_obj_flush(struct vmw_fence_obj *fence) 540 { 541 struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv; 542 543 vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); 544 } 545 546 static void vmw_fence_destroy(struct vmw_fence_obj *fence) 547 { 548 fence_free(&fence->base); 549 } 550 551 int vmw_fence_create(struct vmw_fence_manager *fman, 552 uint32_t seqno, 553 struct vmw_fence_obj **p_fence) 554 { 555 struct vmw_fence_obj *fence; 556 int ret; 557 558 fence = kzalloc(sizeof(*fence), GFP_KERNEL); 559 if (unlikely(fence == NULL)) 560 return -ENOMEM; 561 562 ret = vmw_fence_obj_init(fman, fence, seqno, 563 vmw_fence_destroy); 564 if (unlikely(ret != 0)) 565 goto out_err_init; 566 567 *p_fence = fence; 568 return 0; 569 570 out_err_init: 571 kfree(fence); 572 return ret; 573 } 574 575 576 static void vmw_user_fence_destroy(struct vmw_fence_obj *fence) 577 { 578 struct vmw_user_fence *ufence = 579 container_of(fence, struct vmw_user_fence, fence); 580 struct vmw_fence_manager *fman = fman_from_fence(fence); 581 582 ttm_base_object_kfree(ufence, base); 583 /* 584 * Free kernel space accounting. 585 */ 586 ttm_mem_global_free(vmw_mem_glob(fman->dev_priv), 587 fman->user_fence_size); 588 } 589 590 static void vmw_user_fence_base_release(struct ttm_base_object **p_base) 591 { 592 struct ttm_base_object *base = *p_base; 593 struct vmw_user_fence *ufence = 594 container_of(base, struct vmw_user_fence, base); 595 struct vmw_fence_obj *fence = &ufence->fence; 596 597 *p_base = NULL; 598 vmw_fence_obj_unreference(&fence); 599 } 600 601 int vmw_user_fence_create(struct drm_file *file_priv, 602 struct vmw_fence_manager *fman, 603 uint32_t seqno, 604 struct vmw_fence_obj **p_fence, 605 uint32_t *p_handle) 606 { 607 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 608 struct vmw_user_fence *ufence; 609 struct vmw_fence_obj *tmp; 610 struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv); 611 int ret; 612 613 /* 614 * Kernel memory space accounting, since this object may 615 * be created by a user-space request. 616 */ 617 618 ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size, 619 false, false); 620 if (unlikely(ret != 0)) 621 return ret; 622 623 ufence = kzalloc(sizeof(*ufence), GFP_KERNEL); 624 if (unlikely(ufence == NULL)) { 625 ret = -ENOMEM; 626 goto out_no_object; 627 } 628 629 ret = vmw_fence_obj_init(fman, &ufence->fence, seqno, 630 vmw_user_fence_destroy); 631 if (unlikely(ret != 0)) { 632 kfree(ufence); 633 goto out_no_object; 634 } 635 636 /* 637 * The base object holds a reference which is freed in 638 * vmw_user_fence_base_release. 639 */ 640 tmp = vmw_fence_obj_reference(&ufence->fence); 641 ret = ttm_base_object_init(tfile, &ufence->base, false, 642 VMW_RES_FENCE, 643 &vmw_user_fence_base_release, NULL); 644 645 646 if (unlikely(ret != 0)) { 647 /* 648 * Free the base object's reference 649 */ 650 vmw_fence_obj_unreference(&tmp); 651 goto out_err; 652 } 653 654 *p_fence = &ufence->fence; 655 *p_handle = ufence->base.hash.key; 656 657 return 0; 658 out_err: 659 tmp = &ufence->fence; 660 vmw_fence_obj_unreference(&tmp); 661 out_no_object: 662 ttm_mem_global_free(mem_glob, fman->user_fence_size); 663 return ret; 664 } 665 666 667 /** 668 * vmw_fence_fifo_down - signal all unsignaled fence objects. 669 */ 670 671 void vmw_fence_fifo_down(struct vmw_fence_manager *fman) 672 { 673 struct list_head action_list; 674 int ret; 675 676 /* 677 * The list may be altered while we traverse it, so always 678 * restart when we've released the fman->lock. 679 */ 680 681 spin_lock_irq(&fman->lock); 682 fman->fifo_down = true; 683 while (!list_empty(&fman->fence_list)) { 684 struct vmw_fence_obj *fence = 685 list_entry(fman->fence_list.prev, struct vmw_fence_obj, 686 head); 687 fence_get(&fence->base); 688 spin_unlock_irq(&fman->lock); 689 690 ret = vmw_fence_obj_wait(fence, false, false, 691 VMW_FENCE_WAIT_TIMEOUT); 692 693 if (unlikely(ret != 0)) { 694 list_del_init(&fence->head); 695 fence_signal(&fence->base); 696 INIT_LIST_HEAD(&action_list); 697 list_splice_init(&fence->seq_passed_actions, 698 &action_list); 699 vmw_fences_perform_actions(fman, &action_list); 700 } 701 702 BUG_ON(!list_empty(&fence->head)); 703 fence_put(&fence->base); 704 spin_lock_irq(&fman->lock); 705 } 706 spin_unlock_irq(&fman->lock); 707 } 708 709 void vmw_fence_fifo_up(struct vmw_fence_manager *fman) 710 { 711 unsigned long irq_flags; 712 713 spin_lock_irqsave(&fman->lock, irq_flags); 714 fman->fifo_down = false; 715 spin_unlock_irqrestore(&fman->lock, irq_flags); 716 } 717 718 719 int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data, 720 struct drm_file *file_priv) 721 { 722 struct drm_vmw_fence_wait_arg *arg = 723 (struct drm_vmw_fence_wait_arg *)data; 724 unsigned long timeout; 725 struct ttm_base_object *base; 726 struct vmw_fence_obj *fence; 727 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 728 int ret; 729 uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ); 730 731 /* 732 * 64-bit division not present on 32-bit systems, so do an 733 * approximation. (Divide by 1000000). 734 */ 735 736 wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) - 737 (wait_timeout >> 26); 738 739 if (!arg->cookie_valid) { 740 arg->cookie_valid = 1; 741 arg->kernel_cookie = jiffies + wait_timeout; 742 } 743 744 base = ttm_base_object_lookup(tfile, arg->handle); 745 if (unlikely(base == NULL)) { 746 printk(KERN_ERR "Wait invalid fence object handle " 747 "0x%08lx.\n", 748 (unsigned long)arg->handle); 749 return -EINVAL; 750 } 751 752 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 753 754 timeout = jiffies; 755 if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) { 756 ret = ((vmw_fence_obj_signaled(fence)) ? 757 0 : -EBUSY); 758 goto out; 759 } 760 761 timeout = (unsigned long)arg->kernel_cookie - timeout; 762 763 ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout); 764 765 out: 766 ttm_base_object_unref(&base); 767 768 /* 769 * Optionally unref the fence object. 770 */ 771 772 if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF)) 773 return ttm_ref_object_base_unref(tfile, arg->handle, 774 TTM_REF_USAGE); 775 return ret; 776 } 777 778 int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data, 779 struct drm_file *file_priv) 780 { 781 struct drm_vmw_fence_signaled_arg *arg = 782 (struct drm_vmw_fence_signaled_arg *) data; 783 struct ttm_base_object *base; 784 struct vmw_fence_obj *fence; 785 struct vmw_fence_manager *fman; 786 struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; 787 struct vmw_private *dev_priv = vmw_priv(dev); 788 789 base = ttm_base_object_lookup(tfile, arg->handle); 790 if (unlikely(base == NULL)) { 791 printk(KERN_ERR "Fence signaled invalid fence object handle " 792 "0x%08lx.\n", 793 (unsigned long)arg->handle); 794 return -EINVAL; 795 } 796 797 fence = &(container_of(base, struct vmw_user_fence, base)->fence); 798 fman = fman_from_fence(fence); 799 800 arg->signaled = vmw_fence_obj_signaled(fence); 801 802 arg->signaled_flags = arg->flags; 803 spin_lock_irq(&fman->lock); 804 arg->passed_seqno = dev_priv->last_read_seqno; 805 spin_unlock_irq(&fman->lock); 806 807 ttm_base_object_unref(&base); 808 809 return 0; 810 } 811 812 813 int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data, 814 struct drm_file *file_priv) 815 { 816 struct drm_vmw_fence_arg *arg = 817 (struct drm_vmw_fence_arg *) data; 818 819 return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 820 arg->handle, 821 TTM_REF_USAGE); 822 } 823 824 /** 825 * vmw_event_fence_fpriv_gone - Remove references to struct drm_file objects 826 * 827 * @fman: Pointer to a struct vmw_fence_manager 828 * @event_list: Pointer to linked list of struct vmw_event_fence_action objects 829 * with pointers to a struct drm_file object about to be closed. 830 * 831 * This function removes all pending fence events with references to a 832 * specific struct drm_file object about to be closed. The caller is required 833 * to pass a list of all struct vmw_event_fence_action objects with such 834 * events attached. This function is typically called before the 835 * struct drm_file object's event management is taken down. 836 */ 837 void vmw_event_fence_fpriv_gone(struct vmw_fence_manager *fman, 838 struct list_head *event_list) 839 { 840 struct vmw_event_fence_action *eaction; 841 struct drm_pending_event *event; 842 unsigned long irq_flags; 843 844 while (1) { 845 spin_lock_irqsave(&fman->lock, irq_flags); 846 if (list_empty(event_list)) 847 goto out_unlock; 848 eaction = list_first_entry(event_list, 849 struct vmw_event_fence_action, 850 fpriv_head); 851 list_del_init(&eaction->fpriv_head); 852 event = eaction->event; 853 eaction->event = NULL; 854 spin_unlock_irqrestore(&fman->lock, irq_flags); 855 event->destroy(event); 856 } 857 out_unlock: 858 spin_unlock_irqrestore(&fman->lock, irq_flags); 859 } 860 861 862 /** 863 * vmw_event_fence_action_seq_passed 864 * 865 * @action: The struct vmw_fence_action embedded in a struct 866 * vmw_event_fence_action. 867 * 868 * This function is called when the seqno of the fence where @action is 869 * attached has passed. It queues the event on the submitter's event list. 870 * This function is always called from atomic context, and may be called 871 * from irq context. 872 */ 873 static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action) 874 { 875 struct vmw_event_fence_action *eaction = 876 container_of(action, struct vmw_event_fence_action, action); 877 struct drm_device *dev = eaction->dev; 878 struct drm_pending_event *event = eaction->event; 879 struct drm_file *file_priv; 880 unsigned long irq_flags; 881 882 if (unlikely(event == NULL)) 883 return; 884 885 file_priv = event->file_priv; 886 spin_lock_irqsave(&dev->event_lock, irq_flags); 887 888 if (likely(eaction->tv_sec != NULL)) { 889 struct timeval tv; 890 891 do_gettimeofday(&tv); 892 *eaction->tv_sec = tv.tv_sec; 893 *eaction->tv_usec = tv.tv_usec; 894 } 895 896 list_del_init(&eaction->fpriv_head); 897 list_add_tail(&eaction->event->link, &file_priv->event_list); 898 eaction->event = NULL; 899 wake_up_all(&file_priv->event_wait); 900 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 901 } 902 903 /** 904 * vmw_event_fence_action_cleanup 905 * 906 * @action: The struct vmw_fence_action embedded in a struct 907 * vmw_event_fence_action. 908 * 909 * This function is the struct vmw_fence_action destructor. It's typically 910 * called from a workqueue. 911 */ 912 static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action) 913 { 914 struct vmw_event_fence_action *eaction = 915 container_of(action, struct vmw_event_fence_action, action); 916 struct vmw_fence_manager *fman = fman_from_fence(eaction->fence); 917 unsigned long irq_flags; 918 919 spin_lock_irqsave(&fman->lock, irq_flags); 920 list_del(&eaction->fpriv_head); 921 spin_unlock_irqrestore(&fman->lock, irq_flags); 922 923 vmw_fence_obj_unreference(&eaction->fence); 924 kfree(eaction); 925 } 926 927 928 /** 929 * vmw_fence_obj_add_action - Add an action to a fence object. 930 * 931 * @fence - The fence object. 932 * @action - The action to add. 933 * 934 * Note that the action callbacks may be executed before this function 935 * returns. 936 */ 937 static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence, 938 struct vmw_fence_action *action) 939 { 940 struct vmw_fence_manager *fman = fman_from_fence(fence); 941 unsigned long irq_flags; 942 bool run_update = false; 943 944 mutex_lock(&fman->goal_irq_mutex); 945 spin_lock_irqsave(&fman->lock, irq_flags); 946 947 fman->pending_actions[action->type]++; 948 if (fence_is_signaled_locked(&fence->base)) { 949 struct list_head action_list; 950 951 INIT_LIST_HEAD(&action_list); 952 list_add_tail(&action->head, &action_list); 953 vmw_fences_perform_actions(fman, &action_list); 954 } else { 955 list_add_tail(&action->head, &fence->seq_passed_actions); 956 957 /* 958 * This function may set fman::seqno_valid, so it must 959 * be run with the goal_irq_mutex held. 960 */ 961 run_update = vmw_fence_goal_check_locked(fence); 962 } 963 964 spin_unlock_irqrestore(&fman->lock, irq_flags); 965 966 if (run_update) { 967 if (!fman->goal_irq_on) { 968 fman->goal_irq_on = true; 969 vmw_goal_waiter_add(fman->dev_priv); 970 } 971 vmw_fences_update(fman); 972 } 973 mutex_unlock(&fman->goal_irq_mutex); 974 975 } 976 977 /** 978 * vmw_event_fence_action_create - Post an event for sending when a fence 979 * object seqno has passed. 980 * 981 * @file_priv: The file connection on which the event should be posted. 982 * @fence: The fence object on which to post the event. 983 * @event: Event to be posted. This event should've been alloced 984 * using k[mz]alloc, and should've been completely initialized. 985 * @interruptible: Interruptible waits if possible. 986 * 987 * As a side effect, the object pointed to by @event may have been 988 * freed when this function returns. If this function returns with 989 * an error code, the caller needs to free that object. 990 */ 991 992 int vmw_event_fence_action_queue(struct drm_file *file_priv, 993 struct vmw_fence_obj *fence, 994 struct drm_pending_event *event, 995 uint32_t *tv_sec, 996 uint32_t *tv_usec, 997 bool interruptible) 998 { 999 struct vmw_event_fence_action *eaction; 1000 struct vmw_fence_manager *fman = fman_from_fence(fence); 1001 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1002 unsigned long irq_flags; 1003 1004 eaction = kzalloc(sizeof(*eaction), GFP_KERNEL); 1005 if (unlikely(eaction == NULL)) 1006 return -ENOMEM; 1007 1008 eaction->event = event; 1009 1010 eaction->action.seq_passed = vmw_event_fence_action_seq_passed; 1011 eaction->action.cleanup = vmw_event_fence_action_cleanup; 1012 eaction->action.type = VMW_ACTION_EVENT; 1013 1014 eaction->fence = vmw_fence_obj_reference(fence); 1015 eaction->dev = fman->dev_priv->dev; 1016 eaction->tv_sec = tv_sec; 1017 eaction->tv_usec = tv_usec; 1018 1019 spin_lock_irqsave(&fman->lock, irq_flags); 1020 list_add_tail(&eaction->fpriv_head, &vmw_fp->fence_events); 1021 spin_unlock_irqrestore(&fman->lock, irq_flags); 1022 1023 vmw_fence_obj_add_action(fence, &eaction->action); 1024 1025 return 0; 1026 } 1027 1028 struct vmw_event_fence_pending { 1029 struct drm_pending_event base; 1030 struct drm_vmw_event_fence event; 1031 }; 1032 1033 static int vmw_event_fence_action_create(struct drm_file *file_priv, 1034 struct vmw_fence_obj *fence, 1035 uint32_t flags, 1036 uint64_t user_data, 1037 bool interruptible) 1038 { 1039 struct vmw_event_fence_pending *event; 1040 struct vmw_fence_manager *fman = fman_from_fence(fence); 1041 struct drm_device *dev = fman->dev_priv->dev; 1042 unsigned long irq_flags; 1043 int ret; 1044 1045 spin_lock_irqsave(&dev->event_lock, irq_flags); 1046 1047 ret = (file_priv->event_space < sizeof(event->event)) ? -EBUSY : 0; 1048 if (likely(ret == 0)) 1049 file_priv->event_space -= sizeof(event->event); 1050 1051 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 1052 1053 if (unlikely(ret != 0)) { 1054 DRM_ERROR("Failed to allocate event space for this file.\n"); 1055 goto out_no_space; 1056 } 1057 1058 1059 event = kzalloc(sizeof(*event), GFP_KERNEL); 1060 if (unlikely(event == NULL)) { 1061 DRM_ERROR("Failed to allocate an event.\n"); 1062 ret = -ENOMEM; 1063 goto out_no_event; 1064 } 1065 1066 event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED; 1067 event->event.base.length = sizeof(*event); 1068 event->event.user_data = user_data; 1069 1070 event->base.event = &event->event.base; 1071 event->base.file_priv = file_priv; 1072 event->base.destroy = (void (*) (struct drm_pending_event *)) kfree; 1073 1074 1075 if (flags & DRM_VMW_FE_FLAG_REQ_TIME) 1076 ret = vmw_event_fence_action_queue(file_priv, fence, 1077 &event->base, 1078 &event->event.tv_sec, 1079 &event->event.tv_usec, 1080 interruptible); 1081 else 1082 ret = vmw_event_fence_action_queue(file_priv, fence, 1083 &event->base, 1084 NULL, 1085 NULL, 1086 interruptible); 1087 if (ret != 0) 1088 goto out_no_queue; 1089 1090 return 0; 1091 1092 out_no_queue: 1093 event->base.destroy(&event->base); 1094 out_no_event: 1095 spin_lock_irqsave(&dev->event_lock, irq_flags); 1096 file_priv->event_space += sizeof(*event); 1097 spin_unlock_irqrestore(&dev->event_lock, irq_flags); 1098 out_no_space: 1099 return ret; 1100 } 1101 1102 int vmw_fence_event_ioctl(struct drm_device *dev, void *data, 1103 struct drm_file *file_priv) 1104 { 1105 struct vmw_private *dev_priv = vmw_priv(dev); 1106 struct drm_vmw_fence_event_arg *arg = 1107 (struct drm_vmw_fence_event_arg *) data; 1108 struct vmw_fence_obj *fence = NULL; 1109 struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); 1110 struct drm_vmw_fence_rep __user *user_fence_rep = 1111 (struct drm_vmw_fence_rep __user *)(unsigned long) 1112 arg->fence_rep; 1113 uint32_t handle; 1114 int ret; 1115 1116 /* 1117 * Look up an existing fence object, 1118 * and if user-space wants a new reference, 1119 * add one. 1120 */ 1121 if (arg->handle) { 1122 struct ttm_base_object *base = 1123 ttm_base_object_lookup_for_ref(dev_priv->tdev, 1124 arg->handle); 1125 1126 if (unlikely(base == NULL)) { 1127 DRM_ERROR("Fence event invalid fence object handle " 1128 "0x%08lx.\n", 1129 (unsigned long)arg->handle); 1130 return -EINVAL; 1131 } 1132 fence = &(container_of(base, struct vmw_user_fence, 1133 base)->fence); 1134 (void) vmw_fence_obj_reference(fence); 1135 1136 if (user_fence_rep != NULL) { 1137 bool existed; 1138 1139 ret = ttm_ref_object_add(vmw_fp->tfile, base, 1140 TTM_REF_USAGE, &existed); 1141 if (unlikely(ret != 0)) { 1142 DRM_ERROR("Failed to reference a fence " 1143 "object.\n"); 1144 goto out_no_ref_obj; 1145 } 1146 handle = base->hash.key; 1147 } 1148 ttm_base_object_unref(&base); 1149 } 1150 1151 /* 1152 * Create a new fence object. 1153 */ 1154 if (!fence) { 1155 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 1156 &fence, 1157 (user_fence_rep) ? 1158 &handle : NULL); 1159 if (unlikely(ret != 0)) { 1160 DRM_ERROR("Fence event failed to create fence.\n"); 1161 return ret; 1162 } 1163 } 1164 1165 BUG_ON(fence == NULL); 1166 1167 ret = vmw_event_fence_action_create(file_priv, fence, 1168 arg->flags, 1169 arg->user_data, 1170 true); 1171 if (unlikely(ret != 0)) { 1172 if (ret != -ERESTARTSYS) 1173 DRM_ERROR("Failed to attach event to fence.\n"); 1174 goto out_no_create; 1175 } 1176 1177 vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence, 1178 handle); 1179 vmw_fence_obj_unreference(&fence); 1180 return 0; 1181 out_no_create: 1182 if (user_fence_rep != NULL) 1183 ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile, 1184 handle, TTM_REF_USAGE); 1185 out_no_ref_obj: 1186 vmw_fence_obj_unreference(&fence); 1187 return ret; 1188 } 1189