1 /* 2 * Copyright 2008 Advanced Micro Devices, Inc. 3 * Copyright 2008 Red Hat Inc. 4 * Copyright 2009 Jerome Glisse. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Dave Airlie 25 * Alex Deucher 26 * Jerome Glisse 27 */ 28 #include <drm/drmP.h> 29 #include <drm/radeon_drm.h> 30 #include "radeon.h" 31 #include "radeon_trace.h" 32 33 /* 34 * GPUVM 35 * GPUVM is similar to the legacy gart on older asics, however 36 * rather than there being a single global gart table 37 * for the entire GPU, there are multiple VM page tables active 38 * at any given time. The VM page tables can contain a mix 39 * vram pages and system memory pages and system memory pages 40 * can be mapped as snooped (cached system pages) or unsnooped 41 * (uncached system pages). 42 * Each VM has an ID associated with it and there is a page table 43 * associated with each VMID. When execting a command buffer, 44 * the kernel tells the the ring what VMID to use for that command 45 * buffer. VMIDs are allocated dynamically as commands are submitted. 46 * The userspace drivers maintain their own address space and the kernel 47 * sets up their pages tables accordingly when they submit their 48 * command buffers and a VMID is assigned. 49 * Cayman/Trinity support up to 8 active VMs at any given time; 50 * SI supports 16. 51 */ 52 53 /** 54 * radeon_vm_num_pde - return the number of page directory entries 55 * 56 * @rdev: radeon_device pointer 57 * 58 * Calculate the number of page directory entries (cayman+). 59 */ 60 static unsigned radeon_vm_num_pdes(struct radeon_device *rdev) 61 { 62 return rdev->vm_manager.max_pfn >> radeon_vm_block_size; 63 } 64 65 /** 66 * radeon_vm_directory_size - returns the size of the page directory in bytes 67 * 68 * @rdev: radeon_device pointer 69 * 70 * Calculate the size of the page directory in bytes (cayman+). 71 */ 72 static unsigned radeon_vm_directory_size(struct radeon_device *rdev) 73 { 74 return RADEON_GPU_PAGE_ALIGN(radeon_vm_num_pdes(rdev) * 8); 75 } 76 77 /** 78 * radeon_vm_manager_init - init the vm manager 79 * 80 * @rdev: radeon_device pointer 81 * 82 * Init the vm manager (cayman+). 83 * Returns 0 for success, error for failure. 84 */ 85 int radeon_vm_manager_init(struct radeon_device *rdev) 86 { 87 int r; 88 89 if (!rdev->vm_manager.enabled) { 90 r = radeon_asic_vm_init(rdev); 91 if (r) 92 return r; 93 94 rdev->vm_manager.enabled = true; 95 } 96 return 0; 97 } 98 99 /** 100 * radeon_vm_manager_fini - tear down the vm manager 101 * 102 * @rdev: radeon_device pointer 103 * 104 * Tear down the VM manager (cayman+). 105 */ 106 void radeon_vm_manager_fini(struct radeon_device *rdev) 107 { 108 int i; 109 110 if (!rdev->vm_manager.enabled) 111 return; 112 113 for (i = 0; i < RADEON_NUM_VM; ++i) 114 radeon_fence_unref(&rdev->vm_manager.active[i]); 115 radeon_asic_vm_fini(rdev); 116 rdev->vm_manager.enabled = false; 117 } 118 119 /** 120 * radeon_vm_get_bos - add the vm BOs to a validation list 121 * 122 * @vm: vm providing the BOs 123 * @head: head of validation list 124 * 125 * Add the page directory to the list of BOs to 126 * validate for command submission (cayman+). 127 */ 128 struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev, 129 struct radeon_vm *vm, 130 struct list_head *head) 131 { 132 struct radeon_bo_list *list; 133 unsigned i, idx; 134 135 list = drm_malloc_ab(vm->max_pde_used + 2, 136 sizeof(struct radeon_bo_list)); 137 if (!list) 138 return NULL; 139 140 /* add the vm page table to the list */ 141 list[0].robj = vm->page_directory; 142 list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 143 list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 144 list[0].tv.bo = &vm->page_directory->tbo; 145 list[0].tv.shared = true; 146 list[0].tiling_flags = 0; 147 list_add(&list[0].tv.head, head); 148 149 for (i = 0, idx = 1; i <= vm->max_pde_used; i++) { 150 if (!vm->page_tables[i].bo) 151 continue; 152 153 list[idx].robj = vm->page_tables[i].bo; 154 list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM; 155 list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM; 156 list[idx].tv.bo = &list[idx].robj->tbo; 157 list[idx].tv.shared = true; 158 list[idx].tiling_flags = 0; 159 list_add(&list[idx++].tv.head, head); 160 } 161 162 return list; 163 } 164 165 /** 166 * radeon_vm_grab_id - allocate the next free VMID 167 * 168 * @rdev: radeon_device pointer 169 * @vm: vm to allocate id for 170 * @ring: ring we want to submit job to 171 * 172 * Allocate an id for the vm (cayman+). 173 * Returns the fence we need to sync to (if any). 174 * 175 * Global and local mutex must be locked! 176 */ 177 struct radeon_fence *radeon_vm_grab_id(struct radeon_device *rdev, 178 struct radeon_vm *vm, int ring) 179 { 180 struct radeon_fence *best[RADEON_NUM_RINGS] = {}; 181 struct radeon_vm_id *vm_id = &vm->ids[ring]; 182 183 unsigned choices[2] = {}; 184 unsigned i; 185 186 /* check if the id is still valid */ 187 if (vm_id->id && vm_id->last_id_use && 188 vm_id->last_id_use == rdev->vm_manager.active[vm_id->id]) 189 return NULL; 190 191 /* we definately need to flush */ 192 vm_id->pd_gpu_addr = ~0ll; 193 194 /* skip over VMID 0, since it is the system VM */ 195 for (i = 1; i < rdev->vm_manager.nvm; ++i) { 196 struct radeon_fence *fence = rdev->vm_manager.active[i]; 197 198 if (fence == NULL) { 199 /* found a free one */ 200 vm_id->id = i; 201 trace_radeon_vm_grab_id(i, ring); 202 return NULL; 203 } 204 205 if (radeon_fence_is_earlier(fence, best[fence->ring])) { 206 best[fence->ring] = fence; 207 choices[fence->ring == ring ? 0 : 1] = i; 208 } 209 } 210 211 for (i = 0; i < 2; ++i) { 212 if (choices[i]) { 213 vm_id->id = choices[i]; 214 trace_radeon_vm_grab_id(choices[i], ring); 215 return rdev->vm_manager.active[choices[i]]; 216 } 217 } 218 219 /* should never happen */ 220 BUG(); 221 return NULL; 222 } 223 224 /** 225 * radeon_vm_flush - hardware flush the vm 226 * 227 * @rdev: radeon_device pointer 228 * @vm: vm we want to flush 229 * @ring: ring to use for flush 230 * @updates: last vm update that is waited for 231 * 232 * Flush the vm (cayman+). 233 * 234 * Global and local mutex must be locked! 235 */ 236 void radeon_vm_flush(struct radeon_device *rdev, 237 struct radeon_vm *vm, 238 int ring, struct radeon_fence *updates) 239 { 240 uint64_t pd_addr = radeon_bo_gpu_offset(vm->page_directory); 241 struct radeon_vm_id *vm_id = &vm->ids[ring]; 242 243 if (pd_addr != vm_id->pd_gpu_addr || !vm_id->flushed_updates || 244 radeon_fence_is_earlier(vm_id->flushed_updates, updates)) { 245 246 trace_radeon_vm_flush(pd_addr, ring, vm->ids[ring].id); 247 radeon_fence_unref(&vm_id->flushed_updates); 248 vm_id->flushed_updates = radeon_fence_ref(updates); 249 vm_id->pd_gpu_addr = pd_addr; 250 radeon_ring_vm_flush(rdev, &rdev->ring[ring], 251 vm_id->id, vm_id->pd_gpu_addr); 252 253 } 254 } 255 256 /** 257 * radeon_vm_fence - remember fence for vm 258 * 259 * @rdev: radeon_device pointer 260 * @vm: vm we want to fence 261 * @fence: fence to remember 262 * 263 * Fence the vm (cayman+). 264 * Set the fence used to protect page table and id. 265 * 266 * Global and local mutex must be locked! 267 */ 268 void radeon_vm_fence(struct radeon_device *rdev, 269 struct radeon_vm *vm, 270 struct radeon_fence *fence) 271 { 272 unsigned vm_id = vm->ids[fence->ring].id; 273 274 radeon_fence_unref(&rdev->vm_manager.active[vm_id]); 275 rdev->vm_manager.active[vm_id] = radeon_fence_ref(fence); 276 277 radeon_fence_unref(&vm->ids[fence->ring].last_id_use); 278 vm->ids[fence->ring].last_id_use = radeon_fence_ref(fence); 279 } 280 281 /** 282 * radeon_vm_bo_find - find the bo_va for a specific vm & bo 283 * 284 * @vm: requested vm 285 * @bo: requested buffer object 286 * 287 * Find @bo inside the requested vm (cayman+). 288 * Search inside the @bos vm list for the requested vm 289 * Returns the found bo_va or NULL if none is found 290 * 291 * Object has to be reserved! 292 */ 293 struct radeon_bo_va *radeon_vm_bo_find(struct radeon_vm *vm, 294 struct radeon_bo *bo) 295 { 296 struct radeon_bo_va *bo_va; 297 298 list_for_each_entry(bo_va, &bo->va, bo_list) { 299 if (bo_va->vm == vm) { 300 return bo_va; 301 } 302 } 303 return NULL; 304 } 305 306 /** 307 * radeon_vm_bo_add - add a bo to a specific vm 308 * 309 * @rdev: radeon_device pointer 310 * @vm: requested vm 311 * @bo: radeon buffer object 312 * 313 * Add @bo into the requested vm (cayman+). 314 * Add @bo to the list of bos associated with the vm 315 * Returns newly added bo_va or NULL for failure 316 * 317 * Object has to be reserved! 318 */ 319 struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev, 320 struct radeon_vm *vm, 321 struct radeon_bo *bo) 322 { 323 struct radeon_bo_va *bo_va; 324 325 bo_va = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 326 if (bo_va == NULL) { 327 return NULL; 328 } 329 bo_va->vm = vm; 330 bo_va->bo = bo; 331 bo_va->it.start = 0; 332 bo_va->it.last = 0; 333 bo_va->flags = 0; 334 bo_va->ref_count = 1; 335 INIT_LIST_HEAD(&bo_va->bo_list); 336 INIT_LIST_HEAD(&bo_va->vm_status); 337 338 mutex_lock(&vm->mutex); 339 list_add_tail(&bo_va->bo_list, &bo->va); 340 mutex_unlock(&vm->mutex); 341 342 return bo_va; 343 } 344 345 /** 346 * radeon_vm_set_pages - helper to call the right asic function 347 * 348 * @rdev: radeon_device pointer 349 * @ib: indirect buffer to fill with commands 350 * @pe: addr of the page entry 351 * @addr: dst addr to write into pe 352 * @count: number of page entries to update 353 * @incr: increase next addr by incr bytes 354 * @flags: hw access flags 355 * 356 * Traces the parameters and calls the right asic functions 357 * to setup the page table using the DMA. 358 */ 359 static void radeon_vm_set_pages(struct radeon_device *rdev, 360 struct radeon_ib *ib, 361 uint64_t pe, 362 uint64_t addr, unsigned count, 363 uint32_t incr, uint32_t flags) 364 { 365 trace_radeon_vm_set_page(pe, addr, count, incr, flags); 366 367 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { 368 uint64_t src = rdev->gart.table_addr + (addr >> 12) * 8; 369 radeon_asic_vm_copy_pages(rdev, ib, pe, src, count); 370 371 } else if ((flags & R600_PTE_SYSTEM) || (count < 3)) { 372 radeon_asic_vm_write_pages(rdev, ib, pe, addr, 373 count, incr, flags); 374 375 } else { 376 radeon_asic_vm_set_pages(rdev, ib, pe, addr, 377 count, incr, flags); 378 } 379 } 380 381 /** 382 * radeon_vm_clear_bo - initially clear the page dir/table 383 * 384 * @rdev: radeon_device pointer 385 * @bo: bo to clear 386 */ 387 static int radeon_vm_clear_bo(struct radeon_device *rdev, 388 struct radeon_bo *bo) 389 { 390 struct radeon_ib ib; 391 unsigned entries; 392 uint64_t addr; 393 int r; 394 395 r = radeon_bo_reserve(bo, false); 396 if (r) 397 return r; 398 399 r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false); 400 if (r) 401 goto error_unreserve; 402 403 addr = radeon_bo_gpu_offset(bo); 404 entries = radeon_bo_size(bo) / 8; 405 406 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, 256); 407 if (r) 408 goto error_unreserve; 409 410 ib.length_dw = 0; 411 412 radeon_vm_set_pages(rdev, &ib, addr, 0, entries, 0, 0); 413 radeon_asic_vm_pad_ib(rdev, &ib); 414 WARN_ON(ib.length_dw > 64); 415 416 r = radeon_ib_schedule(rdev, &ib, NULL, false); 417 if (r) 418 goto error_free; 419 420 ib.fence->is_vm_update = true; 421 radeon_bo_fence(bo, ib.fence, false); 422 423 error_free: 424 radeon_ib_free(rdev, &ib); 425 426 error_unreserve: 427 radeon_bo_unreserve(bo); 428 return r; 429 } 430 431 /** 432 * radeon_vm_bo_set_addr - set bos virtual address inside a vm 433 * 434 * @rdev: radeon_device pointer 435 * @bo_va: bo_va to store the address 436 * @soffset: requested offset of the buffer in the VM address space 437 * @flags: attributes of pages (read/write/valid/etc.) 438 * 439 * Set offset of @bo_va (cayman+). 440 * Validate and set the offset requested within the vm address space. 441 * Returns 0 for success, error for failure. 442 * 443 * Object has to be reserved and gets unreserved by this function! 444 */ 445 int radeon_vm_bo_set_addr(struct radeon_device *rdev, 446 struct radeon_bo_va *bo_va, 447 uint64_t soffset, 448 uint32_t flags) 449 { 450 uint64_t size = radeon_bo_size(bo_va->bo); 451 struct radeon_vm *vm = bo_va->vm; 452 unsigned last_pfn, pt_idx; 453 uint64_t eoffset; 454 int r; 455 456 if (soffset) { 457 /* make sure object fit at this offset */ 458 eoffset = soffset + size; 459 if (soffset >= eoffset) { 460 r = -EINVAL; 461 goto error_unreserve; 462 } 463 464 last_pfn = eoffset / RADEON_GPU_PAGE_SIZE; 465 if (last_pfn > rdev->vm_manager.max_pfn) { 466 dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n", 467 last_pfn, rdev->vm_manager.max_pfn); 468 r = -EINVAL; 469 goto error_unreserve; 470 } 471 472 } else { 473 eoffset = last_pfn = 0; 474 } 475 476 mutex_lock(&vm->mutex); 477 soffset /= RADEON_GPU_PAGE_SIZE; 478 eoffset /= RADEON_GPU_PAGE_SIZE; 479 if (soffset || eoffset) { 480 struct interval_tree_node *it; 481 it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1); 482 if (it && it != &bo_va->it) { 483 struct radeon_bo_va *tmp; 484 tmp = container_of(it, struct radeon_bo_va, it); 485 /* bo and tmp overlap, invalid offset */ 486 dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with " 487 "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo, 488 soffset, tmp->bo, tmp->it.start, tmp->it.last); 489 mutex_unlock(&vm->mutex); 490 r = -EINVAL; 491 goto error_unreserve; 492 } 493 } 494 495 if (bo_va->it.start || bo_va->it.last) { 496 spin_lock(&vm->status_lock); 497 if (list_empty(&bo_va->vm_status)) { 498 /* add a clone of the bo_va to clear the old address */ 499 struct radeon_bo_va *tmp; 500 spin_unlock(&vm->status_lock); 501 tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL); 502 if (!tmp) { 503 mutex_unlock(&vm->mutex); 504 r = -ENOMEM; 505 goto error_unreserve; 506 } 507 tmp->it.start = bo_va->it.start; 508 tmp->it.last = bo_va->it.last; 509 tmp->vm = vm; 510 tmp->bo = radeon_bo_ref(bo_va->bo); 511 spin_lock(&vm->status_lock); 512 list_add(&tmp->vm_status, &vm->freed); 513 } 514 spin_unlock(&vm->status_lock); 515 516 interval_tree_remove(&bo_va->it, &vm->va); 517 bo_va->it.start = 0; 518 bo_va->it.last = 0; 519 } 520 521 if (soffset || eoffset) { 522 bo_va->it.start = soffset; 523 bo_va->it.last = eoffset - 1; 524 interval_tree_insert(&bo_va->it, &vm->va); 525 spin_lock(&vm->status_lock); 526 list_add(&bo_va->vm_status, &vm->cleared); 527 spin_unlock(&vm->status_lock); 528 } 529 530 bo_va->flags = flags; 531 532 soffset >>= radeon_vm_block_size; 533 eoffset >>= radeon_vm_block_size; 534 535 BUG_ON(eoffset >= radeon_vm_num_pdes(rdev)); 536 537 if (eoffset > vm->max_pde_used) 538 vm->max_pde_used = eoffset; 539 540 radeon_bo_unreserve(bo_va->bo); 541 542 /* walk over the address space and allocate the page tables */ 543 for (pt_idx = soffset; pt_idx <= eoffset; ++pt_idx) { 544 struct radeon_bo *pt; 545 546 if (vm->page_tables[pt_idx].bo) 547 continue; 548 549 /* drop mutex to allocate and clear page table */ 550 mutex_unlock(&vm->mutex); 551 552 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, 553 RADEON_GPU_PAGE_SIZE, true, 554 RADEON_GEM_DOMAIN_VRAM, 0, 555 NULL, NULL, &pt); 556 if (r) 557 return r; 558 559 r = radeon_vm_clear_bo(rdev, pt); 560 if (r) { 561 radeon_bo_unref(&pt); 562 return r; 563 } 564 565 /* aquire mutex again */ 566 mutex_lock(&vm->mutex); 567 if (vm->page_tables[pt_idx].bo) { 568 /* someone else allocated the pt in the meantime */ 569 mutex_unlock(&vm->mutex); 570 radeon_bo_unref(&pt); 571 mutex_lock(&vm->mutex); 572 continue; 573 } 574 575 vm->page_tables[pt_idx].addr = 0; 576 vm->page_tables[pt_idx].bo = pt; 577 } 578 579 mutex_unlock(&vm->mutex); 580 return 0; 581 582 error_unreserve: 583 radeon_bo_unreserve(bo_va->bo); 584 return r; 585 } 586 587 /** 588 * radeon_vm_map_gart - get the physical address of a gart page 589 * 590 * @rdev: radeon_device pointer 591 * @addr: the unmapped addr 592 * 593 * Look up the physical address of the page that the pte resolves 594 * to (cayman+). 595 * Returns the physical address of the page. 596 */ 597 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr) 598 { 599 uint64_t result; 600 601 /* page table offset */ 602 result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT]; 603 result &= ~RADEON_GPU_PAGE_MASK; 604 605 return result; 606 } 607 608 /** 609 * radeon_vm_page_flags - translate page flags to what the hw uses 610 * 611 * @flags: flags comming from userspace 612 * 613 * Translate the flags the userspace ABI uses to hw flags. 614 */ 615 static uint32_t radeon_vm_page_flags(uint32_t flags) 616 { 617 uint32_t hw_flags = 0; 618 hw_flags |= (flags & RADEON_VM_PAGE_VALID) ? R600_PTE_VALID : 0; 619 hw_flags |= (flags & RADEON_VM_PAGE_READABLE) ? R600_PTE_READABLE : 0; 620 hw_flags |= (flags & RADEON_VM_PAGE_WRITEABLE) ? R600_PTE_WRITEABLE : 0; 621 if (flags & RADEON_VM_PAGE_SYSTEM) { 622 hw_flags |= R600_PTE_SYSTEM; 623 hw_flags |= (flags & RADEON_VM_PAGE_SNOOPED) ? R600_PTE_SNOOPED : 0; 624 } 625 return hw_flags; 626 } 627 628 /** 629 * radeon_vm_update_pdes - make sure that page directory is valid 630 * 631 * @rdev: radeon_device pointer 632 * @vm: requested vm 633 * @start: start of GPU address range 634 * @end: end of GPU address range 635 * 636 * Allocates new page tables if necessary 637 * and updates the page directory (cayman+). 638 * Returns 0 for success, error for failure. 639 * 640 * Global and local mutex must be locked! 641 */ 642 int radeon_vm_update_page_directory(struct radeon_device *rdev, 643 struct radeon_vm *vm) 644 { 645 struct radeon_bo *pd = vm->page_directory; 646 uint64_t pd_addr = radeon_bo_gpu_offset(pd); 647 uint32_t incr = RADEON_VM_PTE_COUNT * 8; 648 uint64_t last_pde = ~0, last_pt = ~0; 649 unsigned count = 0, pt_idx, ndw; 650 struct radeon_ib ib; 651 int r; 652 653 /* padding, etc. */ 654 ndw = 64; 655 656 /* assume the worst case */ 657 ndw += vm->max_pde_used * 6; 658 659 /* update too big for an IB */ 660 if (ndw > 0xfffff) 661 return -ENOMEM; 662 663 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 664 if (r) 665 return r; 666 ib.length_dw = 0; 667 668 /* walk over the address space and update the page directory */ 669 for (pt_idx = 0; pt_idx <= vm->max_pde_used; ++pt_idx) { 670 struct radeon_bo *bo = vm->page_tables[pt_idx].bo; 671 uint64_t pde, pt; 672 673 if (bo == NULL) 674 continue; 675 676 pt = radeon_bo_gpu_offset(bo); 677 if (vm->page_tables[pt_idx].addr == pt) 678 continue; 679 vm->page_tables[pt_idx].addr = pt; 680 681 pde = pd_addr + pt_idx * 8; 682 if (((last_pde + 8 * count) != pde) || 683 ((last_pt + incr * count) != pt)) { 684 685 if (count) { 686 radeon_vm_set_pages(rdev, &ib, last_pde, 687 last_pt, count, incr, 688 R600_PTE_VALID); 689 } 690 691 count = 1; 692 last_pde = pde; 693 last_pt = pt; 694 } else { 695 ++count; 696 } 697 } 698 699 if (count) 700 radeon_vm_set_pages(rdev, &ib, last_pde, last_pt, count, 701 incr, R600_PTE_VALID); 702 703 if (ib.length_dw != 0) { 704 radeon_asic_vm_pad_ib(rdev, &ib); 705 706 radeon_sync_resv(rdev, &ib.sync, pd->tbo.resv, true); 707 WARN_ON(ib.length_dw > ndw); 708 r = radeon_ib_schedule(rdev, &ib, NULL, false); 709 if (r) { 710 radeon_ib_free(rdev, &ib); 711 return r; 712 } 713 ib.fence->is_vm_update = true; 714 radeon_bo_fence(pd, ib.fence, false); 715 } 716 radeon_ib_free(rdev, &ib); 717 718 return 0; 719 } 720 721 /** 722 * radeon_vm_frag_ptes - add fragment information to PTEs 723 * 724 * @rdev: radeon_device pointer 725 * @ib: IB for the update 726 * @pe_start: first PTE to handle 727 * @pe_end: last PTE to handle 728 * @addr: addr those PTEs should point to 729 * @flags: hw mapping flags 730 * 731 * Global and local mutex must be locked! 732 */ 733 static void radeon_vm_frag_ptes(struct radeon_device *rdev, 734 struct radeon_ib *ib, 735 uint64_t pe_start, uint64_t pe_end, 736 uint64_t addr, uint32_t flags) 737 { 738 /** 739 * The MC L1 TLB supports variable sized pages, based on a fragment 740 * field in the PTE. When this field is set to a non-zero value, page 741 * granularity is increased from 4KB to (1 << (12 + frag)). The PTE 742 * flags are considered valid for all PTEs within the fragment range 743 * and corresponding mappings are assumed to be physically contiguous. 744 * 745 * The L1 TLB can store a single PTE for the whole fragment, 746 * significantly increasing the space available for translation 747 * caching. This leads to large improvements in throughput when the 748 * TLB is under pressure. 749 * 750 * The L2 TLB distributes small and large fragments into two 751 * asymmetric partitions. The large fragment cache is significantly 752 * larger. Thus, we try to use large fragments wherever possible. 753 * Userspace can support this by aligning virtual base address and 754 * allocation size to the fragment size. 755 */ 756 757 /* NI is optimized for 256KB fragments, SI and newer for 64KB */ 758 uint64_t frag_flags = ((rdev->family == CHIP_CAYMAN) || 759 (rdev->family == CHIP_ARUBA)) ? 760 R600_PTE_FRAG_256KB : R600_PTE_FRAG_64KB; 761 uint64_t frag_align = ((rdev->family == CHIP_CAYMAN) || 762 (rdev->family == CHIP_ARUBA)) ? 0x200 : 0x80; 763 764 uint64_t frag_start = ALIGN(pe_start, frag_align); 765 uint64_t frag_end = pe_end & ~(frag_align - 1); 766 767 unsigned count; 768 769 /* system pages are non continuously */ 770 if ((flags & R600_PTE_SYSTEM) || !(flags & R600_PTE_VALID) || 771 (frag_start >= frag_end)) { 772 773 count = (pe_end - pe_start) / 8; 774 radeon_vm_set_pages(rdev, ib, pe_start, addr, count, 775 RADEON_GPU_PAGE_SIZE, flags); 776 return; 777 } 778 779 /* handle the 4K area at the beginning */ 780 if (pe_start != frag_start) { 781 count = (frag_start - pe_start) / 8; 782 radeon_vm_set_pages(rdev, ib, pe_start, addr, count, 783 RADEON_GPU_PAGE_SIZE, flags); 784 addr += RADEON_GPU_PAGE_SIZE * count; 785 } 786 787 /* handle the area in the middle */ 788 count = (frag_end - frag_start) / 8; 789 radeon_vm_set_pages(rdev, ib, frag_start, addr, count, 790 RADEON_GPU_PAGE_SIZE, flags | frag_flags); 791 792 /* handle the 4K area at the end */ 793 if (frag_end != pe_end) { 794 addr += RADEON_GPU_PAGE_SIZE * count; 795 count = (pe_end - frag_end) / 8; 796 radeon_vm_set_pages(rdev, ib, frag_end, addr, count, 797 RADEON_GPU_PAGE_SIZE, flags); 798 } 799 } 800 801 /** 802 * radeon_vm_update_ptes - make sure that page tables are valid 803 * 804 * @rdev: radeon_device pointer 805 * @vm: requested vm 806 * @start: start of GPU address range 807 * @end: end of GPU address range 808 * @dst: destination address to map to 809 * @flags: mapping flags 810 * 811 * Update the page tables in the range @start - @end (cayman+). 812 * 813 * Global and local mutex must be locked! 814 */ 815 static int radeon_vm_update_ptes(struct radeon_device *rdev, 816 struct radeon_vm *vm, 817 struct radeon_ib *ib, 818 uint64_t start, uint64_t end, 819 uint64_t dst, uint32_t flags) 820 { 821 uint64_t mask = RADEON_VM_PTE_COUNT - 1; 822 uint64_t last_pte = ~0, last_dst = ~0; 823 unsigned count = 0; 824 uint64_t addr; 825 826 /* walk over the address space and update the page tables */ 827 for (addr = start; addr < end; ) { 828 uint64_t pt_idx = addr >> radeon_vm_block_size; 829 struct radeon_bo *pt = vm->page_tables[pt_idx].bo; 830 unsigned nptes; 831 uint64_t pte; 832 int r; 833 834 radeon_sync_resv(rdev, &ib->sync, pt->tbo.resv, true); 835 r = reservation_object_reserve_shared(pt->tbo.resv); 836 if (r) 837 return r; 838 839 if ((addr & ~mask) == (end & ~mask)) 840 nptes = end - addr; 841 else 842 nptes = RADEON_VM_PTE_COUNT - (addr & mask); 843 844 pte = radeon_bo_gpu_offset(pt); 845 pte += (addr & mask) * 8; 846 847 if ((last_pte + 8 * count) != pte) { 848 849 if (count) { 850 radeon_vm_frag_ptes(rdev, ib, last_pte, 851 last_pte + 8 * count, 852 last_dst, flags); 853 } 854 855 count = nptes; 856 last_pte = pte; 857 last_dst = dst; 858 } else { 859 count += nptes; 860 } 861 862 addr += nptes; 863 dst += nptes * RADEON_GPU_PAGE_SIZE; 864 } 865 866 if (count) { 867 radeon_vm_frag_ptes(rdev, ib, last_pte, 868 last_pte + 8 * count, 869 last_dst, flags); 870 } 871 872 return 0; 873 } 874 875 /** 876 * radeon_vm_fence_pts - fence page tables after an update 877 * 878 * @vm: requested vm 879 * @start: start of GPU address range 880 * @end: end of GPU address range 881 * @fence: fence to use 882 * 883 * Fence the page tables in the range @start - @end (cayman+). 884 * 885 * Global and local mutex must be locked! 886 */ 887 static void radeon_vm_fence_pts(struct radeon_vm *vm, 888 uint64_t start, uint64_t end, 889 struct radeon_fence *fence) 890 { 891 unsigned i; 892 893 start >>= radeon_vm_block_size; 894 end >>= radeon_vm_block_size; 895 896 for (i = start; i <= end; ++i) 897 radeon_bo_fence(vm->page_tables[i].bo, fence, true); 898 } 899 900 /** 901 * radeon_vm_bo_update - map a bo into the vm page table 902 * 903 * @rdev: radeon_device pointer 904 * @vm: requested vm 905 * @bo: radeon buffer object 906 * @mem: ttm mem 907 * 908 * Fill in the page table entries for @bo (cayman+). 909 * Returns 0 for success, -EINVAL for failure. 910 * 911 * Object have to be reserved and mutex must be locked! 912 */ 913 int radeon_vm_bo_update(struct radeon_device *rdev, 914 struct radeon_bo_va *bo_va, 915 struct ttm_mem_reg *mem) 916 { 917 struct radeon_vm *vm = bo_va->vm; 918 struct radeon_ib ib; 919 unsigned nptes, ncmds, ndw; 920 uint64_t addr; 921 uint32_t flags; 922 int r; 923 924 if (!bo_va->it.start) { 925 dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n", 926 bo_va->bo, vm); 927 return -EINVAL; 928 } 929 930 spin_lock(&vm->status_lock); 931 if (mem) { 932 if (list_empty(&bo_va->vm_status)) { 933 spin_unlock(&vm->status_lock); 934 return 0; 935 } 936 list_del_init(&bo_va->vm_status); 937 } else { 938 list_del(&bo_va->vm_status); 939 list_add(&bo_va->vm_status, &vm->cleared); 940 } 941 spin_unlock(&vm->status_lock); 942 943 bo_va->flags &= ~RADEON_VM_PAGE_VALID; 944 bo_va->flags &= ~RADEON_VM_PAGE_SYSTEM; 945 bo_va->flags &= ~RADEON_VM_PAGE_SNOOPED; 946 if (bo_va->bo && radeon_ttm_tt_is_readonly(bo_va->bo->tbo.ttm)) 947 bo_va->flags &= ~RADEON_VM_PAGE_WRITEABLE; 948 949 if (mem) { 950 addr = mem->start << PAGE_SHIFT; 951 if (mem->mem_type != TTM_PL_SYSTEM) { 952 bo_va->flags |= RADEON_VM_PAGE_VALID; 953 } 954 if (mem->mem_type == TTM_PL_TT) { 955 bo_va->flags |= RADEON_VM_PAGE_SYSTEM; 956 if (!(bo_va->bo->flags & (RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC))) 957 bo_va->flags |= RADEON_VM_PAGE_SNOOPED; 958 959 } else { 960 addr += rdev->vm_manager.vram_base_offset; 961 } 962 } else { 963 addr = 0; 964 } 965 966 trace_radeon_vm_bo_update(bo_va); 967 968 nptes = bo_va->it.last - bo_va->it.start + 1; 969 970 /* reserve space for one command every (1 << BLOCK_SIZE) entries 971 or 2k dwords (whatever is smaller) */ 972 ncmds = (nptes >> min(radeon_vm_block_size, 11)) + 1; 973 974 /* padding, etc. */ 975 ndw = 64; 976 977 flags = radeon_vm_page_flags(bo_va->flags); 978 if ((flags & R600_PTE_GART_MASK) == R600_PTE_GART_MASK) { 979 /* only copy commands needed */ 980 ndw += ncmds * 7; 981 982 } else if (flags & R600_PTE_SYSTEM) { 983 /* header for write data commands */ 984 ndw += ncmds * 4; 985 986 /* body of write data command */ 987 ndw += nptes * 2; 988 989 } else { 990 /* set page commands needed */ 991 ndw += ncmds * 10; 992 993 /* two extra commands for begin/end of fragment */ 994 ndw += 2 * 10; 995 } 996 997 /* update too big for an IB */ 998 if (ndw > 0xfffff) 999 return -ENOMEM; 1000 1001 r = radeon_ib_get(rdev, R600_RING_TYPE_DMA_INDEX, &ib, NULL, ndw * 4); 1002 if (r) 1003 return r; 1004 ib.length_dw = 0; 1005 1006 if (!(bo_va->flags & RADEON_VM_PAGE_VALID)) { 1007 unsigned i; 1008 1009 for (i = 0; i < RADEON_NUM_RINGS; ++i) 1010 radeon_sync_fence(&ib.sync, vm->ids[i].last_id_use); 1011 } 1012 1013 r = radeon_vm_update_ptes(rdev, vm, &ib, bo_va->it.start, 1014 bo_va->it.last + 1, addr, 1015 radeon_vm_page_flags(bo_va->flags)); 1016 if (r) { 1017 radeon_ib_free(rdev, &ib); 1018 return r; 1019 } 1020 1021 radeon_asic_vm_pad_ib(rdev, &ib); 1022 WARN_ON(ib.length_dw > ndw); 1023 1024 r = radeon_ib_schedule(rdev, &ib, NULL, false); 1025 if (r) { 1026 radeon_ib_free(rdev, &ib); 1027 return r; 1028 } 1029 ib.fence->is_vm_update = true; 1030 radeon_vm_fence_pts(vm, bo_va->it.start, bo_va->it.last + 1, ib.fence); 1031 radeon_fence_unref(&bo_va->last_pt_update); 1032 bo_va->last_pt_update = radeon_fence_ref(ib.fence); 1033 radeon_ib_free(rdev, &ib); 1034 1035 return 0; 1036 } 1037 1038 /** 1039 * radeon_vm_clear_freed - clear freed BOs in the PT 1040 * 1041 * @rdev: radeon_device pointer 1042 * @vm: requested vm 1043 * 1044 * Make sure all freed BOs are cleared in the PT. 1045 * Returns 0 for success. 1046 * 1047 * PTs have to be reserved and mutex must be locked! 1048 */ 1049 int radeon_vm_clear_freed(struct radeon_device *rdev, 1050 struct radeon_vm *vm) 1051 { 1052 struct radeon_bo_va *bo_va; 1053 int r = 0; 1054 1055 spin_lock(&vm->status_lock); 1056 while (!list_empty(&vm->freed)) { 1057 bo_va = list_first_entry(&vm->freed, 1058 struct radeon_bo_va, vm_status); 1059 spin_unlock(&vm->status_lock); 1060 1061 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1062 radeon_bo_unref(&bo_va->bo); 1063 radeon_fence_unref(&bo_va->last_pt_update); 1064 spin_lock(&vm->status_lock); 1065 list_del(&bo_va->vm_status); 1066 kfree(bo_va); 1067 if (r) 1068 break; 1069 1070 } 1071 spin_unlock(&vm->status_lock); 1072 return r; 1073 1074 } 1075 1076 /** 1077 * radeon_vm_clear_invalids - clear invalidated BOs in the PT 1078 * 1079 * @rdev: radeon_device pointer 1080 * @vm: requested vm 1081 * 1082 * Make sure all invalidated BOs are cleared in the PT. 1083 * Returns 0 for success. 1084 * 1085 * PTs have to be reserved and mutex must be locked! 1086 */ 1087 int radeon_vm_clear_invalids(struct radeon_device *rdev, 1088 struct radeon_vm *vm) 1089 { 1090 struct radeon_bo_va *bo_va; 1091 int r; 1092 1093 spin_lock(&vm->status_lock); 1094 while (!list_empty(&vm->invalidated)) { 1095 bo_va = list_first_entry(&vm->invalidated, 1096 struct radeon_bo_va, vm_status); 1097 spin_unlock(&vm->status_lock); 1098 1099 r = radeon_vm_bo_update(rdev, bo_va, NULL); 1100 if (r) 1101 return r; 1102 1103 spin_lock(&vm->status_lock); 1104 } 1105 spin_unlock(&vm->status_lock); 1106 1107 return 0; 1108 } 1109 1110 /** 1111 * radeon_vm_bo_rmv - remove a bo to a specific vm 1112 * 1113 * @rdev: radeon_device pointer 1114 * @bo_va: requested bo_va 1115 * 1116 * Remove @bo_va->bo from the requested vm (cayman+). 1117 * 1118 * Object have to be reserved! 1119 */ 1120 void radeon_vm_bo_rmv(struct radeon_device *rdev, 1121 struct radeon_bo_va *bo_va) 1122 { 1123 struct radeon_vm *vm = bo_va->vm; 1124 1125 list_del(&bo_va->bo_list); 1126 1127 mutex_lock(&vm->mutex); 1128 if (bo_va->it.start || bo_va->it.last) 1129 interval_tree_remove(&bo_va->it, &vm->va); 1130 1131 spin_lock(&vm->status_lock); 1132 list_del(&bo_va->vm_status); 1133 if (bo_va->it.start || bo_va->it.last) { 1134 bo_va->bo = radeon_bo_ref(bo_va->bo); 1135 list_add(&bo_va->vm_status, &vm->freed); 1136 } else { 1137 radeon_fence_unref(&bo_va->last_pt_update); 1138 kfree(bo_va); 1139 } 1140 spin_unlock(&vm->status_lock); 1141 1142 mutex_unlock(&vm->mutex); 1143 } 1144 1145 /** 1146 * radeon_vm_bo_invalidate - mark the bo as invalid 1147 * 1148 * @rdev: radeon_device pointer 1149 * @vm: requested vm 1150 * @bo: radeon buffer object 1151 * 1152 * Mark @bo as invalid (cayman+). 1153 */ 1154 void radeon_vm_bo_invalidate(struct radeon_device *rdev, 1155 struct radeon_bo *bo) 1156 { 1157 struct radeon_bo_va *bo_va; 1158 1159 list_for_each_entry(bo_va, &bo->va, bo_list) { 1160 spin_lock(&bo_va->vm->status_lock); 1161 if (list_empty(&bo_va->vm_status)) 1162 list_add(&bo_va->vm_status, &bo_va->vm->invalidated); 1163 spin_unlock(&bo_va->vm->status_lock); 1164 } 1165 } 1166 1167 /** 1168 * radeon_vm_init - initialize a vm instance 1169 * 1170 * @rdev: radeon_device pointer 1171 * @vm: requested vm 1172 * 1173 * Init @vm fields (cayman+). 1174 */ 1175 int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) 1176 { 1177 const unsigned align = min(RADEON_VM_PTB_ALIGN_SIZE, 1178 RADEON_VM_PTE_COUNT * 8); 1179 unsigned pd_size, pd_entries, pts_size; 1180 int i, r; 1181 1182 vm->ib_bo_va = NULL; 1183 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1184 vm->ids[i].id = 0; 1185 vm->ids[i].flushed_updates = NULL; 1186 vm->ids[i].last_id_use = NULL; 1187 } 1188 mutex_init(&vm->mutex); 1189 vm->va = RB_ROOT; 1190 spin_lock_init(&vm->status_lock); 1191 INIT_LIST_HEAD(&vm->invalidated); 1192 INIT_LIST_HEAD(&vm->freed); 1193 INIT_LIST_HEAD(&vm->cleared); 1194 1195 pd_size = radeon_vm_directory_size(rdev); 1196 pd_entries = radeon_vm_num_pdes(rdev); 1197 1198 /* allocate page table array */ 1199 pts_size = pd_entries * sizeof(struct radeon_vm_pt); 1200 vm->page_tables = kzalloc(pts_size, GFP_KERNEL); 1201 if (vm->page_tables == NULL) { 1202 DRM_ERROR("Cannot allocate memory for page table array\n"); 1203 return -ENOMEM; 1204 } 1205 1206 r = radeon_bo_create(rdev, pd_size, align, true, 1207 RADEON_GEM_DOMAIN_VRAM, 0, NULL, 1208 NULL, &vm->page_directory); 1209 if (r) 1210 return r; 1211 1212 r = radeon_vm_clear_bo(rdev, vm->page_directory); 1213 if (r) { 1214 radeon_bo_unref(&vm->page_directory); 1215 vm->page_directory = NULL; 1216 return r; 1217 } 1218 1219 return 0; 1220 } 1221 1222 /** 1223 * radeon_vm_fini - tear down a vm instance 1224 * 1225 * @rdev: radeon_device pointer 1226 * @vm: requested vm 1227 * 1228 * Tear down @vm (cayman+). 1229 * Unbind the VM and remove all bos from the vm bo list 1230 */ 1231 void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm) 1232 { 1233 struct radeon_bo_va *bo_va, *tmp; 1234 int i, r; 1235 1236 if (!RB_EMPTY_ROOT(&vm->va)) { 1237 dev_err(rdev->dev, "still active bo inside vm\n"); 1238 } 1239 rbtree_postorder_for_each_entry_safe(bo_va, tmp, &vm->va, it.rb) { 1240 interval_tree_remove(&bo_va->it, &vm->va); 1241 r = radeon_bo_reserve(bo_va->bo, false); 1242 if (!r) { 1243 list_del_init(&bo_va->bo_list); 1244 radeon_bo_unreserve(bo_va->bo); 1245 radeon_fence_unref(&bo_va->last_pt_update); 1246 kfree(bo_va); 1247 } 1248 } 1249 list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) { 1250 radeon_bo_unref(&bo_va->bo); 1251 radeon_fence_unref(&bo_va->last_pt_update); 1252 kfree(bo_va); 1253 } 1254 1255 for (i = 0; i < radeon_vm_num_pdes(rdev); i++) 1256 radeon_bo_unref(&vm->page_tables[i].bo); 1257 kfree(vm->page_tables); 1258 1259 radeon_bo_unref(&vm->page_directory); 1260 1261 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 1262 radeon_fence_unref(&vm->ids[i].flushed_updates); 1263 radeon_fence_unref(&vm->ids[i].last_id_use); 1264 } 1265 1266 mutex_destroy(&vm->mutex); 1267 } 1268