1 /************************************************************************** 2 * 3 * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA 4 * All Rights Reserved. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the 8 * "Software"), to deal in the Software without restriction, including 9 * without limitation the rights to use, copy, modify, merge, publish, 10 * distribute, sub license, and/or sell copies of the Software, and to 11 * permit persons to whom the Software is furnished to do so, subject to 12 * the following conditions: 13 * 14 * The above copyright notice and this permission notice (including the 15 * next paragraph) shall be included in all copies or substantial portions 16 * of the Software. 17 * 18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 24 * USE OR OTHER DEALINGS IN THE SOFTWARE. 25 * 26 **************************************************************************/ 27 /* 28 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <dev/drm2/drmP.h> 35 #include <dev/drm2/ttm/ttm_bo_driver.h> 36 #include <dev/drm2/ttm/ttm_placement.h> 37 #include <sys/sf_buf.h> 38 39 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 40 { 41 ttm_bo_mem_put(bo, &bo->mem); 42 } 43 44 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 45 bool evict, 46 bool no_wait_gpu, struct ttm_mem_reg *new_mem) 47 { 48 struct ttm_tt *ttm = bo->ttm; 49 struct ttm_mem_reg *old_mem = &bo->mem; 50 int ret; 51 52 if (old_mem->mem_type != TTM_PL_SYSTEM) { 53 ttm_tt_unbind(ttm); 54 ttm_bo_free_old_node(bo); 55 ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, 56 TTM_PL_MASK_MEM); 57 old_mem->mem_type = TTM_PL_SYSTEM; 58 } 59 60 ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); 61 if (unlikely(ret != 0)) 62 return ret; 63 64 if (new_mem->mem_type != TTM_PL_SYSTEM) { 65 ret = ttm_tt_bind(ttm, new_mem); 66 if (unlikely(ret != 0)) 67 return ret; 68 } 69 70 *old_mem = *new_mem; 71 new_mem->mm_node = NULL; 72 73 return 0; 74 } 75 76 int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) 77 { 78 if (likely(man->io_reserve_fastpath)) 79 return 0; 80 81 if (interruptible) { 82 if (sx_xlock_sig(&man->io_reserve_mutex)) 83 return (-EINTR); 84 else 85 return (0); 86 } 87 88 sx_xlock(&man->io_reserve_mutex); 89 return 0; 90 } 91 92 void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) 93 { 94 if (likely(man->io_reserve_fastpath)) 95 return; 96 97 sx_xunlock(&man->io_reserve_mutex); 98 } 99 100 static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) 101 { 102 struct ttm_buffer_object *bo; 103 104 if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) 105 return -EAGAIN; 106 107 bo = list_first_entry(&man->io_reserve_lru, 108 struct ttm_buffer_object, 109 io_reserve_lru); 110 list_del_init(&bo->io_reserve_lru); 111 ttm_bo_unmap_virtual_locked(bo); 112 113 return 0; 114 } 115 116 static int ttm_mem_io_reserve(struct ttm_bo_device *bdev, 117 struct ttm_mem_reg *mem) 118 { 119 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 120 int ret = 0; 121 122 if (!bdev->driver->io_mem_reserve) 123 return 0; 124 if (likely(man->io_reserve_fastpath)) 125 return bdev->driver->io_mem_reserve(bdev, mem); 126 127 if (bdev->driver->io_mem_reserve && 128 mem->bus.io_reserved_count++ == 0) { 129 retry: 130 ret = bdev->driver->io_mem_reserve(bdev, mem); 131 if (ret == -EAGAIN) { 132 ret = ttm_mem_io_evict(man); 133 if (ret == 0) 134 goto retry; 135 } 136 } 137 return ret; 138 } 139 140 static void ttm_mem_io_free(struct ttm_bo_device *bdev, 141 struct ttm_mem_reg *mem) 142 { 143 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 144 145 if (likely(man->io_reserve_fastpath)) 146 return; 147 148 if (bdev->driver->io_mem_reserve && 149 --mem->bus.io_reserved_count == 0 && 150 bdev->driver->io_mem_free) 151 bdev->driver->io_mem_free(bdev, mem); 152 153 } 154 155 int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) 156 { 157 struct ttm_mem_reg *mem = &bo->mem; 158 int ret; 159 160 if (!mem->bus.io_reserved_vm) { 161 struct ttm_mem_type_manager *man = 162 &bo->bdev->man[mem->mem_type]; 163 164 ret = ttm_mem_io_reserve(bo->bdev, mem); 165 if (unlikely(ret != 0)) 166 return ret; 167 mem->bus.io_reserved_vm = true; 168 if (man->use_io_reserve_lru) 169 list_add_tail(&bo->io_reserve_lru, 170 &man->io_reserve_lru); 171 } 172 return 0; 173 } 174 175 void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) 176 { 177 struct ttm_mem_reg *mem = &bo->mem; 178 179 if (mem->bus.io_reserved_vm) { 180 mem->bus.io_reserved_vm = false; 181 list_del_init(&bo->io_reserve_lru); 182 ttm_mem_io_free(bo->bdev, mem); 183 } 184 } 185 186 static 187 int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 188 void **virtual) 189 { 190 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 191 int ret; 192 void *addr; 193 194 *virtual = NULL; 195 (void) ttm_mem_io_lock(man, false); 196 ret = ttm_mem_io_reserve(bdev, mem); 197 ttm_mem_io_unlock(man); 198 if (ret || !mem->bus.is_iomem) 199 return ret; 200 201 if (mem->bus.addr) { 202 addr = mem->bus.addr; 203 } else { 204 addr = pmap_mapdev_attr(mem->bus.base + mem->bus.offset, 205 mem->bus.size, (mem->placement & TTM_PL_FLAG_WC) ? 206 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 207 if (!addr) { 208 (void) ttm_mem_io_lock(man, false); 209 ttm_mem_io_free(bdev, mem); 210 ttm_mem_io_unlock(man); 211 return -ENOMEM; 212 } 213 } 214 *virtual = addr; 215 return 0; 216 } 217 218 static 219 void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, 220 void *virtual) 221 { 222 struct ttm_mem_type_manager *man; 223 224 man = &bdev->man[mem->mem_type]; 225 226 if (virtual && mem->bus.addr == NULL) 227 pmap_unmapdev((vm_offset_t)virtual, mem->bus.size); 228 (void) ttm_mem_io_lock(man, false); 229 ttm_mem_io_free(bdev, mem); 230 ttm_mem_io_unlock(man); 231 } 232 233 static int ttm_copy_io_page(void *dst, void *src, unsigned long page) 234 { 235 uint32_t *dstP = 236 (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT)); 237 uint32_t *srcP = 238 (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT)); 239 240 int i; 241 for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i) 242 /* iowrite32(ioread32(srcP++), dstP++); */ 243 *dstP++ = *srcP++; 244 return 0; 245 } 246 247 static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, 248 unsigned long page, 249 vm_memattr_t prot) 250 { 251 vm_page_t d = ttm->pages[page]; 252 void *dst; 253 254 if (!d) 255 return -ENOMEM; 256 257 src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); 258 259 /* XXXKIB can't sleep ? */ 260 dst = pmap_mapdev_attr(VM_PAGE_TO_PHYS(d), PAGE_SIZE, prot); 261 if (!dst) 262 return -ENOMEM; 263 264 memcpy(dst, src, PAGE_SIZE); 265 266 pmap_unmapdev((vm_offset_t)dst, PAGE_SIZE); 267 268 return 0; 269 } 270 271 static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, 272 unsigned long page, 273 vm_memattr_t prot) 274 { 275 vm_page_t s = ttm->pages[page]; 276 void *src; 277 278 if (!s) 279 return -ENOMEM; 280 281 dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); 282 src = pmap_mapdev_attr(VM_PAGE_TO_PHYS(s), PAGE_SIZE, prot); 283 if (!src) 284 return -ENOMEM; 285 286 memcpy(dst, src, PAGE_SIZE); 287 288 pmap_unmapdev((vm_offset_t)src, PAGE_SIZE); 289 290 return 0; 291 } 292 293 int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, 294 bool evict, bool no_wait_gpu, 295 struct ttm_mem_reg *new_mem) 296 { 297 struct ttm_bo_device *bdev = bo->bdev; 298 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 299 struct ttm_tt *ttm = bo->ttm; 300 struct ttm_mem_reg *old_mem = &bo->mem; 301 struct ttm_mem_reg old_copy = *old_mem; 302 void *old_iomap; 303 void *new_iomap; 304 int ret; 305 unsigned long i; 306 unsigned long page; 307 unsigned long add = 0; 308 int dir; 309 310 ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap); 311 if (ret) 312 return ret; 313 ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap); 314 if (ret) 315 goto out; 316 317 if (old_iomap == NULL && new_iomap == NULL) 318 goto out2; 319 if (old_iomap == NULL && ttm == NULL) 320 goto out2; 321 322 if (ttm->state == tt_unpopulated) { 323 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 324 if (ret) 325 goto out1; 326 } 327 328 add = 0; 329 dir = 1; 330 331 if ((old_mem->mem_type == new_mem->mem_type) && 332 (new_mem->start < old_mem->start + old_mem->size)) { 333 dir = -1; 334 add = new_mem->num_pages - 1; 335 } 336 337 for (i = 0; i < new_mem->num_pages; ++i) { 338 page = i * dir + add; 339 if (old_iomap == NULL) { 340 vm_memattr_t prot = ttm_io_prot(old_mem->placement); 341 ret = ttm_copy_ttm_io_page(ttm, new_iomap, page, 342 prot); 343 } else if (new_iomap == NULL) { 344 vm_memattr_t prot = ttm_io_prot(new_mem->placement); 345 ret = ttm_copy_io_ttm_page(ttm, old_iomap, page, 346 prot); 347 } else 348 ret = ttm_copy_io_page(new_iomap, old_iomap, page); 349 if (ret) 350 goto out1; 351 } 352 mb(); 353 out2: 354 old_copy = *old_mem; 355 *old_mem = *new_mem; 356 new_mem->mm_node = NULL; 357 358 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { 359 ttm_tt_unbind(ttm); 360 ttm_tt_destroy(ttm); 361 bo->ttm = NULL; 362 } 363 364 out1: 365 ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); 366 out: 367 ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); 368 ttm_bo_mem_put(bo, &old_copy); 369 return ret; 370 } 371 372 MALLOC_DEFINE(M_TTM_TRANSF_OBJ, "ttm_transf_obj", "TTM Transfer Objects"); 373 374 static void ttm_transfered_destroy(struct ttm_buffer_object *bo) 375 { 376 free(bo, M_TTM_TRANSF_OBJ); 377 } 378 379 /** 380 * ttm_buffer_object_transfer 381 * 382 * @bo: A pointer to a struct ttm_buffer_object. 383 * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object, 384 * holding the data of @bo with the old placement. 385 * 386 * This is a utility function that may be called after an accelerated move 387 * has been scheduled. A new buffer object is created as a placeholder for 388 * the old data while it's being copied. When that buffer object is idle, 389 * it can be destroyed, releasing the space of the old placement. 390 * Returns: 391 * !0: Failure. 392 */ 393 394 static int 395 ttm_buffer_object_transfer(struct ttm_buffer_object *bo, 396 void *sync_obj, struct ttm_buffer_object **new_obj) 397 { 398 struct ttm_buffer_object *fbo; 399 400 fbo = malloc(sizeof(*fbo), M_TTM_TRANSF_OBJ, M_ZERO | M_WAITOK); 401 *fbo = *bo; 402 403 /** 404 * Fix up members that we shouldn't copy directly: 405 * TODO: Explicit member copy would probably be better here. 406 */ 407 408 INIT_LIST_HEAD(&fbo->ddestroy); 409 INIT_LIST_HEAD(&fbo->lru); 410 INIT_LIST_HEAD(&fbo->swap); 411 INIT_LIST_HEAD(&fbo->io_reserve_lru); 412 fbo->vm_node = NULL; 413 atomic_set(&fbo->cpu_writers, 0); 414 415 fbo->sync_obj = sync_obj; 416 refcount_init(&fbo->list_kref, 1); 417 refcount_init(&fbo->kref, 1); 418 fbo->destroy = &ttm_transfered_destroy; 419 fbo->acc_size = 0; 420 421 *new_obj = fbo; 422 return 0; 423 } 424 425 vm_memattr_t 426 ttm_io_prot(uint32_t caching_flags) 427 { 428 #if defined(__i386__) || defined(__amd64__) 429 if (caching_flags & TTM_PL_FLAG_WC) 430 return (VM_MEMATTR_WRITE_COMBINING); 431 else 432 /* 433 * We do not support i386, look at the linux source 434 * for the reason of the comment. 435 */ 436 return (VM_MEMATTR_UNCACHEABLE); 437 #else 438 #error Port me 439 #endif 440 } 441 442 static int ttm_bo_ioremap(struct ttm_buffer_object *bo, 443 unsigned long offset, 444 unsigned long size, 445 struct ttm_bo_kmap_obj *map) 446 { 447 struct ttm_mem_reg *mem = &bo->mem; 448 449 if (bo->mem.bus.addr) { 450 map->bo_kmap_type = ttm_bo_map_premapped; 451 map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); 452 } else { 453 map->bo_kmap_type = ttm_bo_map_iomap; 454 map->virtual = pmap_mapdev_attr(bo->mem.bus.base + 455 bo->mem.bus.offset + offset, size, 456 (mem->placement & TTM_PL_FLAG_WC) ? 457 VM_MEMATTR_WRITE_COMBINING : VM_MEMATTR_UNCACHEABLE); 458 map->size = size; 459 } 460 return (!map->virtual) ? -ENOMEM : 0; 461 } 462 463 static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, 464 unsigned long start_page, 465 unsigned long num_pages, 466 struct ttm_bo_kmap_obj *map) 467 { 468 struct ttm_mem_reg *mem = &bo->mem; 469 vm_memattr_t prot; 470 struct ttm_tt *ttm = bo->ttm; 471 int i, ret; 472 473 MPASS(ttm != NULL); 474 475 if (ttm->state == tt_unpopulated) { 476 ret = ttm->bdev->driver->ttm_tt_populate(ttm); 477 if (ret) 478 return ret; 479 } 480 481 if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { 482 /* 483 * We're mapping a single page, and the desired 484 * page protection is consistent with the bo. 485 */ 486 487 map->bo_kmap_type = ttm_bo_map_kmap; 488 map->page = ttm->pages[start_page]; 489 map->sf = sf_buf_alloc(map->page, 0); 490 map->virtual = (void *)sf_buf_kva(map->sf); 491 } else { 492 /* 493 * We need to use vmap to get the desired page protection 494 * or to make the buffer object look contiguous. 495 */ 496 prot = (mem->placement & TTM_PL_FLAG_CACHED) ? 497 VM_MEMATTR_WRITE_COMBINING : 498 ttm_io_prot(mem->placement); 499 map->bo_kmap_type = ttm_bo_map_vmap; 500 map->num_pages = num_pages; 501 map->virtual = (void *)kmem_alloc_nofault(kernel_map, 502 num_pages * PAGE_SIZE); 503 if (map->virtual != NULL) { 504 for (i = 0; i < num_pages; i++) { 505 /* XXXKIB hack */ 506 pmap_page_set_memattr(ttm->pages[start_page + 507 i], prot); 508 } 509 pmap_qenter((vm_offset_t)map->virtual, 510 &ttm->pages[start_page], num_pages); 511 } 512 } 513 return (!map->virtual) ? -ENOMEM : 0; 514 } 515 516 int ttm_bo_kmap(struct ttm_buffer_object *bo, 517 unsigned long start_page, unsigned long num_pages, 518 struct ttm_bo_kmap_obj *map) 519 { 520 struct ttm_mem_type_manager *man = 521 &bo->bdev->man[bo->mem.mem_type]; 522 unsigned long offset, size; 523 int ret; 524 525 MPASS(list_empty(&bo->swap)); 526 map->virtual = NULL; 527 map->bo = bo; 528 if (num_pages > bo->num_pages) 529 return -EINVAL; 530 if (start_page > bo->num_pages) 531 return -EINVAL; 532 #if 0 533 if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) 534 return -EPERM; 535 #endif 536 (void) ttm_mem_io_lock(man, false); 537 ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); 538 ttm_mem_io_unlock(man); 539 if (ret) 540 return ret; 541 if (!bo->mem.bus.is_iomem) { 542 return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); 543 } else { 544 offset = start_page << PAGE_SHIFT; 545 size = num_pages << PAGE_SHIFT; 546 return ttm_bo_ioremap(bo, offset, size, map); 547 } 548 } 549 550 void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) 551 { 552 struct ttm_buffer_object *bo = map->bo; 553 struct ttm_mem_type_manager *man = 554 &bo->bdev->man[bo->mem.mem_type]; 555 556 if (!map->virtual) 557 return; 558 switch (map->bo_kmap_type) { 559 case ttm_bo_map_iomap: 560 pmap_unmapdev((vm_offset_t)map->virtual, map->size); 561 break; 562 case ttm_bo_map_vmap: 563 pmap_qremove((vm_offset_t)(map->virtual), map->num_pages); 564 kmem_free(kernel_map, (vm_offset_t)map->virtual, 565 map->num_pages * PAGE_SIZE); 566 break; 567 case ttm_bo_map_kmap: 568 sf_buf_free(map->sf); 569 break; 570 case ttm_bo_map_premapped: 571 break; 572 default: 573 MPASS(0); 574 } 575 (void) ttm_mem_io_lock(man, false); 576 ttm_mem_io_free(map->bo->bdev, &map->bo->mem); 577 ttm_mem_io_unlock(man); 578 map->virtual = NULL; 579 map->page = NULL; 580 map->sf = NULL; 581 } 582 583 int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, 584 void *sync_obj, 585 bool evict, 586 bool no_wait_gpu, 587 struct ttm_mem_reg *new_mem) 588 { 589 struct ttm_bo_device *bdev = bo->bdev; 590 struct ttm_bo_driver *driver = bdev->driver; 591 struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; 592 struct ttm_mem_reg *old_mem = &bo->mem; 593 int ret; 594 struct ttm_buffer_object *ghost_obj; 595 void *tmp_obj = NULL; 596 void *sync_obj_ref; 597 598 mtx_lock(&bdev->fence_lock); 599 if (bo->sync_obj) { 600 tmp_obj = bo->sync_obj; 601 bo->sync_obj = NULL; 602 } 603 bo->sync_obj = driver->sync_obj_ref(sync_obj); 604 if (evict) { 605 ret = ttm_bo_wait(bo, false, false, false); 606 mtx_unlock(&bdev->fence_lock); 607 if (tmp_obj) 608 driver->sync_obj_unref(&tmp_obj); 609 if (ret) 610 return ret; 611 612 if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && 613 (bo->ttm != NULL)) { 614 ttm_tt_unbind(bo->ttm); 615 ttm_tt_destroy(bo->ttm); 616 bo->ttm = NULL; 617 } 618 ttm_bo_free_old_node(bo); 619 } else { 620 /** 621 * This should help pipeline ordinary buffer moves. 622 * 623 * Hang old buffer memory on a new buffer object, 624 * and leave it to be released when the GPU 625 * operation has completed. 626 */ 627 628 set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); 629 630 sync_obj_ref = bo->bdev->driver->sync_obj_ref(bo->sync_obj); 631 mtx_unlock(&bdev->fence_lock); 632 /* ttm_buffer_object_transfer accesses bo->sync_obj */ 633 ret = ttm_buffer_object_transfer(bo, sync_obj_ref, &ghost_obj); 634 if (tmp_obj) 635 driver->sync_obj_unref(&tmp_obj); 636 637 if (ret) 638 return ret; 639 640 /** 641 * If we're not moving to fixed memory, the TTM object 642 * needs to stay alive. Otherwhise hang it on the ghost 643 * bo to be unbound and destroyed. 644 */ 645 646 if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) 647 ghost_obj->ttm = NULL; 648 else 649 bo->ttm = NULL; 650 651 ttm_bo_unreserve(ghost_obj); 652 ttm_bo_unref(&ghost_obj); 653 } 654 655 *old_mem = *new_mem; 656 new_mem->mm_node = NULL; 657 658 return 0; 659 } 660