1 /* 2 * Copyright 2007 Dave Airlied 3 * All Rights Reserved. 4 * 5 * Permission is hereby granted, free of charge, to any person obtaining a 6 * copy of this software and associated documentation files (the "Software"), 7 * to deal in the Software without restriction, including without limitation 8 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 9 * and/or sell copies of the Software, and to permit persons to whom the 10 * Software is furnished to do so, subject to the following conditions: 11 * 12 * The above copyright notice and this permission notice (including the next 13 * paragraph) shall be included in all copies or substantial portions of the 14 * Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 */ 24 /* 25 * Authors: Dave Airlied <airlied@linux.ie> 26 * Ben Skeggs <darktama@iinet.net.au> 27 * Jeremy Kolb <jkolb@brandeis.edu> 28 */ 29 30 #include "drmP.h" 31 32 #include "nouveau_drm.h" 33 #include "nouveau_drv.h" 34 #include "nouveau_dma.h" 35 36 #include <linux/log2.h> 37 #include <linux/slab.h> 38 39 int 40 nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan) 41 { 42 struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; 43 int ret; 44 45 if (!prev_fence || nouveau_fence_channel(prev_fence) == chan) 46 return 0; 47 48 spin_lock(&nvbo->bo.lock); 49 ret = ttm_bo_wait(&nvbo->bo, false, false, false); 50 spin_unlock(&nvbo->bo.lock); 51 return ret; 52 } 53 54 static void 55 nouveau_bo_del_ttm(struct ttm_buffer_object *bo) 56 { 57 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 58 struct drm_device *dev = dev_priv->dev; 59 struct nouveau_bo *nvbo = nouveau_bo(bo); 60 61 ttm_bo_kunmap(&nvbo->kmap); 62 63 if (unlikely(nvbo->gem)) 64 DRM_ERROR("bo %p still attached to GEM object\n", bo); 65 66 if (nvbo->tile) 67 nv10_mem_expire_tiling(dev, nvbo->tile, NULL); 68 69 kfree(nvbo); 70 } 71 72 static void 73 nouveau_bo_fixup_align(struct drm_device *dev, 74 uint32_t tile_mode, uint32_t tile_flags, 75 int *align, int *size) 76 { 77 struct drm_nouveau_private *dev_priv = dev->dev_private; 78 79 /* 80 * Some of the tile_flags have a periodic structure of N*4096 bytes, 81 * align to to that as well as the page size. Align the size to the 82 * appropriate boundaries. This does imply that sizes are rounded up 83 * 3-7 pages, so be aware of this and do not waste memory by allocating 84 * many small buffers. 85 */ 86 if (dev_priv->card_type == NV_50) { 87 uint32_t block_size = dev_priv->vram_size >> 15; 88 int i; 89 90 switch (tile_flags) { 91 case 0x1800: 92 case 0x2800: 93 case 0x4800: 94 case 0x7a00: 95 if (is_power_of_2(block_size)) { 96 for (i = 1; i < 10; i++) { 97 *align = 12 * i * block_size; 98 if (!(*align % 65536)) 99 break; 100 } 101 } else { 102 for (i = 1; i < 10; i++) { 103 *align = 8 * i * block_size; 104 if (!(*align % 65536)) 105 break; 106 } 107 } 108 *size = roundup(*size, *align); 109 break; 110 default: 111 break; 112 } 113 114 } else { 115 if (tile_mode) { 116 if (dev_priv->chipset >= 0x40) { 117 *align = 65536; 118 *size = roundup(*size, 64 * tile_mode); 119 120 } else if (dev_priv->chipset >= 0x30) { 121 *align = 32768; 122 *size = roundup(*size, 64 * tile_mode); 123 124 } else if (dev_priv->chipset >= 0x20) { 125 *align = 16384; 126 *size = roundup(*size, 64 * tile_mode); 127 128 } else if (dev_priv->chipset >= 0x10) { 129 *align = 16384; 130 *size = roundup(*size, 32 * tile_mode); 131 } 132 } 133 } 134 135 /* ALIGN works only on powers of two. */ 136 *size = roundup(*size, PAGE_SIZE); 137 138 if (dev_priv->card_type == NV_50) { 139 *size = roundup(*size, 65536); 140 *align = max(65536, *align); 141 } 142 } 143 144 int 145 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, 146 int size, int align, uint32_t flags, uint32_t tile_mode, 147 uint32_t tile_flags, bool no_vm, bool mappable, 148 struct nouveau_bo **pnvbo) 149 { 150 struct drm_nouveau_private *dev_priv = dev->dev_private; 151 struct nouveau_bo *nvbo; 152 int ret = 0; 153 154 nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); 155 if (!nvbo) 156 return -ENOMEM; 157 INIT_LIST_HEAD(&nvbo->head); 158 INIT_LIST_HEAD(&nvbo->entry); 159 nvbo->mappable = mappable; 160 nvbo->no_vm = no_vm; 161 nvbo->tile_mode = tile_mode; 162 nvbo->tile_flags = tile_flags; 163 164 nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size); 165 align >>= PAGE_SHIFT; 166 167 nvbo->placement.fpfn = 0; 168 nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0; 169 nouveau_bo_placement_set(nvbo, flags, 0); 170 171 nvbo->channel = chan; 172 ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, 173 ttm_bo_type_device, &nvbo->placement, align, 0, 174 false, NULL, size, nouveau_bo_del_ttm); 175 if (ret) { 176 /* ttm will call nouveau_bo_del_ttm if it fails.. */ 177 return ret; 178 } 179 nvbo->channel = NULL; 180 181 *pnvbo = nvbo; 182 return 0; 183 } 184 185 static void 186 set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) 187 { 188 *n = 0; 189 190 if (type & TTM_PL_FLAG_VRAM) 191 pl[(*n)++] = TTM_PL_FLAG_VRAM | flags; 192 if (type & TTM_PL_FLAG_TT) 193 pl[(*n)++] = TTM_PL_FLAG_TT | flags; 194 if (type & TTM_PL_FLAG_SYSTEM) 195 pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags; 196 } 197 198 void 199 nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) 200 { 201 struct ttm_placement *pl = &nvbo->placement; 202 uint32_t flags = TTM_PL_MASK_CACHING | 203 (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0); 204 205 pl->placement = nvbo->placements; 206 set_placement_list(nvbo->placements, &pl->num_placement, 207 type, flags); 208 209 pl->busy_placement = nvbo->busy_placements; 210 set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, 211 type | busy, flags); 212 } 213 214 int 215 nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) 216 { 217 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 218 struct ttm_buffer_object *bo = &nvbo->bo; 219 int ret; 220 221 if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { 222 NV_ERROR(nouveau_bdev(bo->bdev)->dev, 223 "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 224 1 << bo->mem.mem_type, memtype); 225 return -EINVAL; 226 } 227 228 if (nvbo->pin_refcnt++) 229 return 0; 230 231 ret = ttm_bo_reserve(bo, false, false, false, 0); 232 if (ret) 233 goto out; 234 235 nouveau_bo_placement_set(nvbo, memtype, 0); 236 237 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 238 if (ret == 0) { 239 switch (bo->mem.mem_type) { 240 case TTM_PL_VRAM: 241 dev_priv->fb_aper_free -= bo->mem.size; 242 break; 243 case TTM_PL_TT: 244 dev_priv->gart_info.aper_free -= bo->mem.size; 245 break; 246 default: 247 break; 248 } 249 } 250 ttm_bo_unreserve(bo); 251 out: 252 if (unlikely(ret)) 253 nvbo->pin_refcnt--; 254 return ret; 255 } 256 257 int 258 nouveau_bo_unpin(struct nouveau_bo *nvbo) 259 { 260 struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); 261 struct ttm_buffer_object *bo = &nvbo->bo; 262 int ret; 263 264 if (--nvbo->pin_refcnt) 265 return 0; 266 267 ret = ttm_bo_reserve(bo, false, false, false, 0); 268 if (ret) 269 return ret; 270 271 nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); 272 273 ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); 274 if (ret == 0) { 275 switch (bo->mem.mem_type) { 276 case TTM_PL_VRAM: 277 dev_priv->fb_aper_free += bo->mem.size; 278 break; 279 case TTM_PL_TT: 280 dev_priv->gart_info.aper_free += bo->mem.size; 281 break; 282 default: 283 break; 284 } 285 } 286 287 ttm_bo_unreserve(bo); 288 return ret; 289 } 290 291 int 292 nouveau_bo_map(struct nouveau_bo *nvbo) 293 { 294 int ret; 295 296 ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); 297 if (ret) 298 return ret; 299 300 ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); 301 ttm_bo_unreserve(&nvbo->bo); 302 return ret; 303 } 304 305 void 306 nouveau_bo_unmap(struct nouveau_bo *nvbo) 307 { 308 ttm_bo_kunmap(&nvbo->kmap); 309 } 310 311 u16 312 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) 313 { 314 bool is_iomem; 315 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 316 mem = &mem[index]; 317 if (is_iomem) 318 return ioread16_native((void __force __iomem *)mem); 319 else 320 return *mem; 321 } 322 323 void 324 nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val) 325 { 326 bool is_iomem; 327 u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 328 mem = &mem[index]; 329 if (is_iomem) 330 iowrite16_native(val, (void __force __iomem *)mem); 331 else 332 *mem = val; 333 } 334 335 u32 336 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) 337 { 338 bool is_iomem; 339 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 340 mem = &mem[index]; 341 if (is_iomem) 342 return ioread32_native((void __force __iomem *)mem); 343 else 344 return *mem; 345 } 346 347 void 348 nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) 349 { 350 bool is_iomem; 351 u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); 352 mem = &mem[index]; 353 if (is_iomem) 354 iowrite32_native(val, (void __force __iomem *)mem); 355 else 356 *mem = val; 357 } 358 359 static struct ttm_backend * 360 nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) 361 { 362 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 363 struct drm_device *dev = dev_priv->dev; 364 365 switch (dev_priv->gart_info.type) { 366 #if __OS_HAS_AGP 367 case NOUVEAU_GART_AGP: 368 return ttm_agp_backend_init(bdev, dev->agp->bridge); 369 #endif 370 case NOUVEAU_GART_SGDMA: 371 return nouveau_sgdma_init_ttm(dev); 372 default: 373 NV_ERROR(dev, "Unknown GART type %d\n", 374 dev_priv->gart_info.type); 375 break; 376 } 377 378 return NULL; 379 } 380 381 static int 382 nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags) 383 { 384 /* We'll do this from user space. */ 385 return 0; 386 } 387 388 static int 389 nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, 390 struct ttm_mem_type_manager *man) 391 { 392 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 393 struct drm_device *dev = dev_priv->dev; 394 395 switch (type) { 396 case TTM_PL_SYSTEM: 397 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 398 man->available_caching = TTM_PL_MASK_CACHING; 399 man->default_caching = TTM_PL_FLAG_CACHED; 400 break; 401 case TTM_PL_VRAM: 402 man->flags = TTM_MEMTYPE_FLAG_FIXED | 403 TTM_MEMTYPE_FLAG_MAPPABLE; 404 man->available_caching = TTM_PL_FLAG_UNCACHED | 405 TTM_PL_FLAG_WC; 406 man->default_caching = TTM_PL_FLAG_WC; 407 man->gpu_offset = dev_priv->vm_vram_base; 408 break; 409 case TTM_PL_TT: 410 switch (dev_priv->gart_info.type) { 411 case NOUVEAU_GART_AGP: 412 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; 413 man->available_caching = TTM_PL_FLAG_UNCACHED; 414 man->default_caching = TTM_PL_FLAG_UNCACHED; 415 break; 416 case NOUVEAU_GART_SGDMA: 417 man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | 418 TTM_MEMTYPE_FLAG_CMA; 419 man->available_caching = TTM_PL_MASK_CACHING; 420 man->default_caching = TTM_PL_FLAG_CACHED; 421 break; 422 default: 423 NV_ERROR(dev, "Unknown GART type: %d\n", 424 dev_priv->gart_info.type); 425 return -EINVAL; 426 } 427 man->gpu_offset = dev_priv->vm_gart_base; 428 break; 429 default: 430 NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); 431 return -EINVAL; 432 } 433 return 0; 434 } 435 436 static void 437 nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) 438 { 439 struct nouveau_bo *nvbo = nouveau_bo(bo); 440 441 switch (bo->mem.mem_type) { 442 case TTM_PL_VRAM: 443 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT, 444 TTM_PL_FLAG_SYSTEM); 445 break; 446 default: 447 nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0); 448 break; 449 } 450 451 *pl = nvbo->placement; 452 } 453 454 455 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access 456 * TTM_PL_{VRAM,TT} directly. 457 */ 458 459 static int 460 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, 461 struct nouveau_bo *nvbo, bool evict, 462 bool no_wait_reserve, bool no_wait_gpu, 463 struct ttm_mem_reg *new_mem) 464 { 465 struct nouveau_fence *fence = NULL; 466 int ret; 467 468 ret = nouveau_fence_new(chan, &fence, true); 469 if (ret) 470 return ret; 471 472 ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, 473 evict || (nvbo->channel && 474 nvbo->channel != chan), 475 no_wait_reserve, no_wait_gpu, new_mem); 476 nouveau_fence_unref((void *)&fence); 477 return ret; 478 } 479 480 static inline uint32_t 481 nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan, 482 struct ttm_mem_reg *mem) 483 { 484 if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) { 485 if (mem->mem_type == TTM_PL_TT) 486 return NvDmaGART; 487 return NvDmaVRAM; 488 } 489 490 if (mem->mem_type == TTM_PL_TT) 491 return chan->gart_handle; 492 return chan->vram_handle; 493 } 494 495 static int 496 nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, 497 bool no_wait_reserve, bool no_wait_gpu, 498 struct ttm_mem_reg *new_mem) 499 { 500 struct nouveau_bo *nvbo = nouveau_bo(bo); 501 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 502 struct ttm_mem_reg *old_mem = &bo->mem; 503 struct nouveau_channel *chan; 504 uint64_t src_offset, dst_offset; 505 uint32_t page_count; 506 int ret; 507 508 chan = nvbo->channel; 509 if (!chan || nvbo->tile_flags || nvbo->no_vm) 510 chan = dev_priv->channel; 511 512 src_offset = old_mem->mm_node->start << PAGE_SHIFT; 513 dst_offset = new_mem->mm_node->start << PAGE_SHIFT; 514 if (chan != dev_priv->channel) { 515 if (old_mem->mem_type == TTM_PL_TT) 516 src_offset += dev_priv->vm_gart_base; 517 else 518 src_offset += dev_priv->vm_vram_base; 519 520 if (new_mem->mem_type == TTM_PL_TT) 521 dst_offset += dev_priv->vm_gart_base; 522 else 523 dst_offset += dev_priv->vm_vram_base; 524 } 525 526 ret = RING_SPACE(chan, 3); 527 if (ret) 528 return ret; 529 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); 530 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem)); 531 OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem)); 532 533 if (dev_priv->card_type >= NV_50) { 534 ret = RING_SPACE(chan, 4); 535 if (ret) 536 return ret; 537 BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); 538 OUT_RING(chan, 1); 539 BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); 540 OUT_RING(chan, 1); 541 } 542 543 page_count = new_mem->num_pages; 544 while (page_count) { 545 int line_count = (page_count > 2047) ? 2047 : page_count; 546 547 if (dev_priv->card_type >= NV_50) { 548 ret = RING_SPACE(chan, 3); 549 if (ret) 550 return ret; 551 BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); 552 OUT_RING(chan, upper_32_bits(src_offset)); 553 OUT_RING(chan, upper_32_bits(dst_offset)); 554 } 555 ret = RING_SPACE(chan, 11); 556 if (ret) 557 return ret; 558 BEGIN_RING(chan, NvSubM2MF, 559 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); 560 OUT_RING(chan, lower_32_bits(src_offset)); 561 OUT_RING(chan, lower_32_bits(dst_offset)); 562 OUT_RING(chan, PAGE_SIZE); /* src_pitch */ 563 OUT_RING(chan, PAGE_SIZE); /* dst_pitch */ 564 OUT_RING(chan, PAGE_SIZE); /* line_length */ 565 OUT_RING(chan, line_count); 566 OUT_RING(chan, (1<<8)|(1<<0)); 567 OUT_RING(chan, 0); 568 BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); 569 OUT_RING(chan, 0); 570 571 page_count -= line_count; 572 src_offset += (PAGE_SIZE * line_count); 573 dst_offset += (PAGE_SIZE * line_count); 574 } 575 576 return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); 577 } 578 579 static int 580 nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, 581 bool no_wait_reserve, bool no_wait_gpu, 582 struct ttm_mem_reg *new_mem) 583 { 584 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 585 struct ttm_placement placement; 586 struct ttm_mem_reg tmp_mem; 587 int ret; 588 589 placement.fpfn = placement.lpfn = 0; 590 placement.num_placement = placement.num_busy_placement = 1; 591 placement.placement = placement.busy_placement = &placement_memtype; 592 593 tmp_mem = *new_mem; 594 tmp_mem.mm_node = NULL; 595 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 596 if (ret) 597 return ret; 598 599 ret = ttm_tt_bind(bo->ttm, &tmp_mem); 600 if (ret) 601 goto out; 602 603 ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); 604 if (ret) 605 goto out; 606 607 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 608 out: 609 if (tmp_mem.mm_node) { 610 spin_lock(&bo->bdev->glob->lru_lock); 611 drm_mm_put_block(tmp_mem.mm_node); 612 spin_unlock(&bo->bdev->glob->lru_lock); 613 } 614 615 return ret; 616 } 617 618 static int 619 nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, 620 bool no_wait_reserve, bool no_wait_gpu, 621 struct ttm_mem_reg *new_mem) 622 { 623 u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; 624 struct ttm_placement placement; 625 struct ttm_mem_reg tmp_mem; 626 int ret; 627 628 placement.fpfn = placement.lpfn = 0; 629 placement.num_placement = placement.num_busy_placement = 1; 630 placement.placement = placement.busy_placement = &placement_memtype; 631 632 tmp_mem = *new_mem; 633 tmp_mem.mm_node = NULL; 634 ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); 635 if (ret) 636 return ret; 637 638 ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); 639 if (ret) 640 goto out; 641 642 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 643 if (ret) 644 goto out; 645 646 out: 647 if (tmp_mem.mm_node) { 648 spin_lock(&bo->bdev->glob->lru_lock); 649 drm_mm_put_block(tmp_mem.mm_node); 650 spin_unlock(&bo->bdev->glob->lru_lock); 651 } 652 653 return ret; 654 } 655 656 static int 657 nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, 658 struct nouveau_tile_reg **new_tile) 659 { 660 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 661 struct drm_device *dev = dev_priv->dev; 662 struct nouveau_bo *nvbo = nouveau_bo(bo); 663 uint64_t offset; 664 int ret; 665 666 if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { 667 /* Nothing to do. */ 668 *new_tile = NULL; 669 return 0; 670 } 671 672 offset = new_mem->mm_node->start << PAGE_SHIFT; 673 674 if (dev_priv->card_type == NV_50) { 675 ret = nv50_mem_vm_bind_linear(dev, 676 offset + dev_priv->vm_vram_base, 677 new_mem->size, nvbo->tile_flags, 678 offset); 679 if (ret) 680 return ret; 681 682 } else if (dev_priv->card_type >= NV_10) { 683 *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, 684 nvbo->tile_mode); 685 } 686 687 return 0; 688 } 689 690 static void 691 nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, 692 struct nouveau_tile_reg *new_tile, 693 struct nouveau_tile_reg **old_tile) 694 { 695 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 696 struct drm_device *dev = dev_priv->dev; 697 698 if (dev_priv->card_type >= NV_10 && 699 dev_priv->card_type < NV_50) { 700 if (*old_tile) 701 nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); 702 703 *old_tile = new_tile; 704 } 705 } 706 707 static int 708 nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, 709 bool no_wait_reserve, bool no_wait_gpu, 710 struct ttm_mem_reg *new_mem) 711 { 712 struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); 713 struct nouveau_bo *nvbo = nouveau_bo(bo); 714 struct ttm_mem_reg *old_mem = &bo->mem; 715 struct nouveau_tile_reg *new_tile = NULL; 716 int ret = 0; 717 718 ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); 719 if (ret) 720 return ret; 721 722 /* Software copy if the card isn't up and running yet. */ 723 if (!dev_priv->channel) { 724 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 725 goto out; 726 } 727 728 /* Fake bo copy. */ 729 if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { 730 BUG_ON(bo->mem.mm_node != NULL); 731 bo->mem = *new_mem; 732 new_mem->mm_node = NULL; 733 goto out; 734 } 735 736 /* Hardware assisted copy. */ 737 if (new_mem->mem_type == TTM_PL_SYSTEM) 738 ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 739 else if (old_mem->mem_type == TTM_PL_SYSTEM) 740 ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 741 else 742 ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); 743 744 if (!ret) 745 goto out; 746 747 /* Fallback to software copy. */ 748 ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); 749 750 out: 751 if (ret) 752 nouveau_bo_vm_cleanup(bo, NULL, &new_tile); 753 else 754 nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); 755 756 return ret; 757 } 758 759 static int 760 nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) 761 { 762 return 0; 763 } 764 765 static int 766 nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 767 { 768 struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; 769 struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); 770 struct drm_device *dev = dev_priv->dev; 771 772 mem->bus.addr = NULL; 773 mem->bus.offset = 0; 774 mem->bus.size = mem->num_pages << PAGE_SHIFT; 775 mem->bus.base = 0; 776 mem->bus.is_iomem = false; 777 if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) 778 return -EINVAL; 779 switch (mem->mem_type) { 780 case TTM_PL_SYSTEM: 781 /* System memory */ 782 return 0; 783 case TTM_PL_TT: 784 #if __OS_HAS_AGP 785 if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { 786 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 787 mem->bus.base = dev_priv->gart_info.aper_base; 788 mem->bus.is_iomem = true; 789 } 790 #endif 791 break; 792 case TTM_PL_VRAM: 793 mem->bus.offset = mem->mm_node->start << PAGE_SHIFT; 794 mem->bus.base = pci_resource_start(dev->pdev, 1); 795 mem->bus.is_iomem = true; 796 break; 797 default: 798 return -EINVAL; 799 } 800 return 0; 801 } 802 803 static void 804 nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) 805 { 806 } 807 808 static int 809 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) 810 { 811 return 0; 812 } 813 814 struct ttm_bo_driver nouveau_bo_driver = { 815 .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, 816 .invalidate_caches = nouveau_bo_invalidate_caches, 817 .init_mem_type = nouveau_bo_init_mem_type, 818 .evict_flags = nouveau_bo_evict_flags, 819 .move = nouveau_bo_move, 820 .verify_access = nouveau_bo_verify_access, 821 .sync_obj_signaled = nouveau_fence_signalled, 822 .sync_obj_wait = nouveau_fence_wait, 823 .sync_obj_flush = nouveau_fence_flush, 824 .sync_obj_unref = nouveau_fence_unref, 825 .sync_obj_ref = nouveau_fence_ref, 826 .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, 827 .io_mem_reserve = &nouveau_ttm_io_mem_reserve, 828 .io_mem_free = &nouveau_ttm_io_mem_free, 829 }; 830 831