1 /* 2 * Copyright 2017 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 */ 23 #include "amdgpu_ids.h" 24 25 #include <linux/idr.h> 26 #include <linux/dma-fence-array.h> 27 28 29 #include "amdgpu.h" 30 #include "amdgpu_trace.h" 31 32 /* 33 * PASID manager 34 * 35 * PASIDs are global address space identifiers that can be shared 36 * between the GPU, an IOMMU and the driver. VMs on different devices 37 * may use the same PASID if they share the same address 38 * space. Therefore PASIDs are allocated using a global IDA. VMs are 39 * looked up from the PASID per amdgpu_device. 40 */ 41 static DEFINE_IDA(amdgpu_pasid_ida); 42 43 /* Helper to free pasid from a fence callback */ 44 struct amdgpu_pasid_cb { 45 struct dma_fence_cb cb; 46 u32 pasid; 47 }; 48 49 /** 50 * amdgpu_pasid_alloc - Allocate a PASID 51 * @bits: Maximum width of the PASID in bits, must be at least 1 52 * 53 * Allocates a PASID of the given width while keeping smaller PASIDs 54 * available if possible. 55 * 56 * Returns a positive integer on success. Returns %-EINVAL if bits==0. 57 * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on 58 * memory allocation failure. 59 */ 60 int amdgpu_pasid_alloc(unsigned int bits) 61 { 62 int pasid = -EINVAL; 63 64 for (bits = min(bits, 31U); bits > 0; bits--) { 65 pasid = ida_alloc_range(&amdgpu_pasid_ida, 1U << (bits - 1), 66 (1U << bits) - 1, GFP_KERNEL); 67 if (pasid != -ENOSPC) 68 break; 69 } 70 71 if (pasid >= 0) 72 trace_amdgpu_pasid_allocated(pasid); 73 74 return pasid; 75 } 76 77 /** 78 * amdgpu_pasid_free - Free a PASID 79 * @pasid: PASID to free 80 */ 81 void amdgpu_pasid_free(u32 pasid) 82 { 83 trace_amdgpu_pasid_freed(pasid); 84 ida_free(&amdgpu_pasid_ida, pasid); 85 } 86 87 static void amdgpu_pasid_free_cb(struct dma_fence *fence, 88 struct dma_fence_cb *_cb) 89 { 90 struct amdgpu_pasid_cb *cb = 91 container_of(_cb, struct amdgpu_pasid_cb, cb); 92 93 amdgpu_pasid_free(cb->pasid); 94 dma_fence_put(fence); 95 kfree(cb); 96 } 97 98 /** 99 * amdgpu_pasid_free_delayed - free pasid when fences signal 100 * 101 * @resv: reservation object with the fences to wait for 102 * @pasid: pasid to free 103 * 104 * Free the pasid only after all the fences in resv are signaled. 105 */ 106 void amdgpu_pasid_free_delayed(struct dma_resv *resv, 107 u32 pasid) 108 { 109 struct amdgpu_pasid_cb *cb; 110 struct dma_fence *fence; 111 int r; 112 113 r = dma_resv_get_singleton(resv, DMA_RESV_USAGE_BOOKKEEP, &fence); 114 if (r) 115 goto fallback; 116 117 if (!fence) { 118 amdgpu_pasid_free(pasid); 119 return; 120 } 121 122 cb = kmalloc(sizeof(*cb), GFP_KERNEL); 123 if (!cb) { 124 /* Last resort when we are OOM */ 125 dma_fence_wait(fence, false); 126 dma_fence_put(fence); 127 amdgpu_pasid_free(pasid); 128 } else { 129 cb->pasid = pasid; 130 if (dma_fence_add_callback(fence, &cb->cb, 131 amdgpu_pasid_free_cb)) 132 amdgpu_pasid_free_cb(fence, &cb->cb); 133 } 134 135 return; 136 137 fallback: 138 /* Not enough memory for the delayed delete, as last resort 139 * block for all the fences to complete. 140 */ 141 dma_resv_wait_timeout(resv, DMA_RESV_USAGE_BOOKKEEP, 142 false, MAX_SCHEDULE_TIMEOUT); 143 amdgpu_pasid_free(pasid); 144 } 145 146 /* 147 * VMID manager 148 * 149 * VMIDs are a per VMHUB identifier for page tables handling. 150 */ 151 152 /** 153 * amdgpu_vmid_had_gpu_reset - check if reset occured since last use 154 * 155 * @adev: amdgpu_device pointer 156 * @id: VMID structure 157 * 158 * Check if GPU reset occured since last use of the VMID. 159 */ 160 bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, 161 struct amdgpu_vmid *id) 162 { 163 return id->current_gpu_reset_count != 164 atomic_read(&adev->gpu_reset_counter); 165 } 166 167 /* Check if we need to switch to another set of resources */ 168 static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, 169 struct amdgpu_job *job) 170 { 171 return id->gds_base != job->gds_base || 172 id->gds_size != job->gds_size || 173 id->gws_base != job->gws_base || 174 id->gws_size != job->gws_size || 175 id->oa_base != job->oa_base || 176 id->oa_size != job->oa_size; 177 } 178 179 /* Check if the id is compatible with the job */ 180 static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, 181 struct amdgpu_job *job) 182 { 183 return id->pd_gpu_addr == job->vm_pd_addr && 184 !amdgpu_vmid_gds_switch_needed(id, job); 185 } 186 187 /** 188 * amdgpu_vmid_grab_idle - grab idle VMID 189 * 190 * @ring: ring we want to submit job to 191 * @idle: resulting idle VMID 192 * @fence: fence to wait for if no id could be grabbed 193 * 194 * Try to find an idle VMID, if none is idle add a fence to wait to the sync 195 * object. Returns -ENOMEM when we are out of memory. 196 */ 197 static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, 198 struct amdgpu_vmid **idle, 199 struct dma_fence **fence) 200 { 201 struct amdgpu_device *adev = ring->adev; 202 unsigned vmhub = ring->vm_hub; 203 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 204 struct dma_fence **fences; 205 unsigned i; 206 207 if (!dma_fence_is_signaled(ring->vmid_wait)) { 208 *fence = dma_fence_get(ring->vmid_wait); 209 return 0; 210 } 211 212 fences = kmalloc_array(id_mgr->num_ids, sizeof(void *), GFP_KERNEL); 213 if (!fences) 214 return -ENOMEM; 215 216 /* Check if we have an idle VMID */ 217 i = 0; 218 list_for_each_entry((*idle), &id_mgr->ids_lru, list) { 219 /* Don't use per engine and per process VMID at the same time */ 220 struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? 221 NULL : ring; 222 223 fences[i] = amdgpu_sync_peek_fence(&(*idle)->active, r); 224 if (!fences[i]) 225 break; 226 ++i; 227 } 228 229 /* If we can't find a idle VMID to use, wait till one becomes available */ 230 if (&(*idle)->list == &id_mgr->ids_lru) { 231 u64 fence_context = adev->vm_manager.fence_context + ring->idx; 232 unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; 233 struct dma_fence_array *array; 234 unsigned j; 235 236 *idle = NULL; 237 for (j = 0; j < i; ++j) 238 dma_fence_get(fences[j]); 239 240 array = dma_fence_array_create(i, fences, fence_context, 241 seqno, true); 242 if (!array) { 243 for (j = 0; j < i; ++j) 244 dma_fence_put(fences[j]); 245 kfree(fences); 246 return -ENOMEM; 247 } 248 249 *fence = dma_fence_get(&array->base); 250 dma_fence_put(ring->vmid_wait); 251 ring->vmid_wait = &array->base; 252 return 0; 253 } 254 kfree(fences); 255 256 return 0; 257 } 258 259 /** 260 * amdgpu_vmid_grab_reserved - try to assign reserved VMID 261 * 262 * @vm: vm to allocate id for 263 * @ring: ring we want to submit job to 264 * @job: job who wants to use the VMID 265 * @id: resulting VMID 266 * @fence: fence to wait for if no id could be grabbed 267 * 268 * Try to assign a reserved VMID. 269 */ 270 static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, 271 struct amdgpu_ring *ring, 272 struct amdgpu_job *job, 273 struct amdgpu_vmid **id, 274 struct dma_fence **fence) 275 { 276 struct amdgpu_device *adev = ring->adev; 277 unsigned vmhub = ring->vm_hub; 278 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 279 uint64_t fence_context = adev->fence_context + ring->idx; 280 bool needs_flush = vm->use_cpu_for_update; 281 uint64_t updates = amdgpu_vm_tlb_seq(vm); 282 int r; 283 284 *id = id_mgr->reserved; 285 if ((*id)->owner != vm->immediate.fence_context || 286 !amdgpu_vmid_compatible(*id, job) || 287 (*id)->flushed_updates < updates || 288 !(*id)->last_flush || 289 ((*id)->last_flush->context != fence_context && 290 !dma_fence_is_signaled((*id)->last_flush))) { 291 struct dma_fence *tmp; 292 293 /* Wait for the gang to be assembled before using a 294 * reserved VMID or otherwise the gang could deadlock. 295 */ 296 tmp = amdgpu_device_get_gang(adev); 297 if (!dma_fence_is_signaled(tmp) && tmp != job->gang_submit) { 298 *id = NULL; 299 *fence = tmp; 300 return 0; 301 } 302 dma_fence_put(tmp); 303 304 /* Make sure the id is owned by the gang before proceeding */ 305 if (!job->gang_submit || 306 (*id)->owner != vm->immediate.fence_context) { 307 308 /* Don't use per engine and per process VMID at the 309 * same time 310 */ 311 if (adev->vm_manager.concurrent_flush) 312 ring = NULL; 313 314 /* to prevent one context starved by another context */ 315 (*id)->pd_gpu_addr = 0; 316 tmp = amdgpu_sync_peek_fence(&(*id)->active, ring); 317 if (tmp) { 318 *id = NULL; 319 *fence = dma_fence_get(tmp); 320 return 0; 321 } 322 } 323 needs_flush = true; 324 } 325 326 /* Good we can use this VMID. Remember this submission as 327 * user of the VMID. 328 */ 329 r = amdgpu_sync_fence(&(*id)->active, &job->base.s_fence->finished); 330 if (r) 331 return r; 332 333 job->vm_needs_flush = needs_flush; 334 job->spm_update_needed = true; 335 return 0; 336 } 337 338 /** 339 * amdgpu_vmid_grab_used - try to reuse a VMID 340 * 341 * @vm: vm to allocate id for 342 * @ring: ring we want to submit job to 343 * @job: job who wants to use the VMID 344 * @id: resulting VMID 345 * @fence: fence to wait for if no id could be grabbed 346 * 347 * Try to reuse a VMID for this submission. 348 */ 349 static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, 350 struct amdgpu_ring *ring, 351 struct amdgpu_job *job, 352 struct amdgpu_vmid **id, 353 struct dma_fence **fence) 354 { 355 struct amdgpu_device *adev = ring->adev; 356 unsigned vmhub = ring->vm_hub; 357 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 358 uint64_t fence_context = adev->fence_context + ring->idx; 359 uint64_t updates = amdgpu_vm_tlb_seq(vm); 360 int r; 361 362 job->vm_needs_flush = vm->use_cpu_for_update; 363 364 /* Check if we can use a VMID already assigned to this VM */ 365 list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { 366 bool needs_flush = vm->use_cpu_for_update; 367 368 /* Check all the prerequisites to using this VMID */ 369 if ((*id)->owner != vm->immediate.fence_context) 370 continue; 371 372 if (!amdgpu_vmid_compatible(*id, job)) 373 continue; 374 375 if (!(*id)->last_flush || 376 ((*id)->last_flush->context != fence_context && 377 !dma_fence_is_signaled((*id)->last_flush))) 378 needs_flush = true; 379 380 if ((*id)->flushed_updates < updates) 381 needs_flush = true; 382 383 if (needs_flush && !adev->vm_manager.concurrent_flush) 384 continue; 385 386 /* Good, we can use this VMID. Remember this submission as 387 * user of the VMID. 388 */ 389 r = amdgpu_sync_fence(&(*id)->active, 390 &job->base.s_fence->finished); 391 if (r) 392 return r; 393 394 job->vm_needs_flush |= needs_flush; 395 return 0; 396 } 397 398 *id = NULL; 399 return 0; 400 } 401 402 /** 403 * amdgpu_vmid_grab - allocate the next free VMID 404 * 405 * @vm: vm to allocate id for 406 * @ring: ring we want to submit job to 407 * @job: job who wants to use the VMID 408 * @fence: fence to wait for if no id could be grabbed 409 * 410 * Allocate an id for the vm, adding fences to the sync obj as necessary. 411 */ 412 int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, 413 struct amdgpu_job *job, struct dma_fence **fence) 414 { 415 struct amdgpu_device *adev = ring->adev; 416 unsigned vmhub = ring->vm_hub; 417 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 418 struct amdgpu_vmid *idle = NULL; 419 struct amdgpu_vmid *id = NULL; 420 int r = 0; 421 422 mutex_lock(&id_mgr->lock); 423 r = amdgpu_vmid_grab_idle(ring, &idle, fence); 424 if (r || !idle) 425 goto error; 426 427 if (amdgpu_vmid_uses_reserved(adev, vm, vmhub)) { 428 r = amdgpu_vmid_grab_reserved(vm, ring, job, &id, fence); 429 if (r || !id) 430 goto error; 431 } else { 432 r = amdgpu_vmid_grab_used(vm, ring, job, &id, fence); 433 if (r) 434 goto error; 435 436 if (!id) { 437 /* Still no ID to use? Then use the idle one found earlier */ 438 id = idle; 439 440 /* Remember this submission as user of the VMID */ 441 r = amdgpu_sync_fence(&id->active, 442 &job->base.s_fence->finished); 443 if (r) 444 goto error; 445 446 job->vm_needs_flush = true; 447 } 448 449 list_move_tail(&id->list, &id_mgr->ids_lru); 450 } 451 452 job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); 453 if (job->vm_needs_flush) { 454 id->flushed_updates = amdgpu_vm_tlb_seq(vm); 455 dma_fence_put(id->last_flush); 456 id->last_flush = NULL; 457 } 458 job->vmid = id - id_mgr->ids; 459 job->pasid = vm->pasid; 460 461 id->gds_base = job->gds_base; 462 id->gds_size = job->gds_size; 463 id->gws_base = job->gws_base; 464 id->gws_size = job->gws_size; 465 id->oa_base = job->oa_base; 466 id->oa_size = job->oa_size; 467 id->pd_gpu_addr = job->vm_pd_addr; 468 id->owner = vm->immediate.fence_context; 469 470 trace_amdgpu_vm_grab_id(vm, ring, job); 471 472 error: 473 mutex_unlock(&id_mgr->lock); 474 return r; 475 } 476 477 /* 478 * amdgpu_vmid_uses_reserved - check if a VM will use a reserved VMID 479 * @adev: amdgpu_device pointer 480 * @vm: the VM to check 481 * @vmhub: the VMHUB which will be used 482 * 483 * Returns: True if the VM will use a reserved VMID. 484 */ 485 bool amdgpu_vmid_uses_reserved(struct amdgpu_device *adev, 486 struct amdgpu_vm *vm, unsigned int vmhub) 487 { 488 return vm->reserved_vmid[vmhub] || 489 (adev->enforce_isolation[(vm->root.bo->xcp_id != AMDGPU_XCP_NO_PARTITION) ? 490 vm->root.bo->xcp_id : 0] && 491 AMDGPU_IS_GFXHUB(vmhub)); 492 } 493 494 int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, 495 unsigned vmhub) 496 { 497 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 498 499 mutex_lock(&id_mgr->lock); 500 501 ++id_mgr->reserved_use_count; 502 if (!id_mgr->reserved) { 503 struct amdgpu_vmid *id; 504 505 id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, 506 list); 507 /* Remove from normal round robin handling */ 508 list_del_init(&id->list); 509 id_mgr->reserved = id; 510 } 511 512 mutex_unlock(&id_mgr->lock); 513 return 0; 514 } 515 516 void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, 517 unsigned vmhub) 518 { 519 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 520 521 mutex_lock(&id_mgr->lock); 522 if (!--id_mgr->reserved_use_count) { 523 /* give the reserved ID back to normal round robin */ 524 list_add(&id_mgr->reserved->list, &id_mgr->ids_lru); 525 id_mgr->reserved = NULL; 526 } 527 528 mutex_unlock(&id_mgr->lock); 529 } 530 531 /** 532 * amdgpu_vmid_reset - reset VMID to zero 533 * 534 * @adev: amdgpu device structure 535 * @vmhub: vmhub type 536 * @vmid: vmid number to use 537 * 538 * Reset saved GDW, GWS and OA to force switch on next flush. 539 */ 540 void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, 541 unsigned vmid) 542 { 543 struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; 544 struct amdgpu_vmid *id = &id_mgr->ids[vmid]; 545 546 mutex_lock(&id_mgr->lock); 547 id->owner = 0; 548 id->gds_base = 0; 549 id->gds_size = 0; 550 id->gws_base = 0; 551 id->gws_size = 0; 552 id->oa_base = 0; 553 id->oa_size = 0; 554 mutex_unlock(&id_mgr->lock); 555 } 556 557 /** 558 * amdgpu_vmid_reset_all - reset VMID to zero 559 * 560 * @adev: amdgpu device structure 561 * 562 * Reset VMID to force flush on next use 563 */ 564 void amdgpu_vmid_reset_all(struct amdgpu_device *adev) 565 { 566 unsigned i, j; 567 568 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 569 struct amdgpu_vmid_mgr *id_mgr = 570 &adev->vm_manager.id_mgr[i]; 571 572 for (j = 1; j < id_mgr->num_ids; ++j) 573 amdgpu_vmid_reset(adev, i, j); 574 } 575 } 576 577 /** 578 * amdgpu_vmid_mgr_init - init the VMID manager 579 * 580 * @adev: amdgpu_device pointer 581 * 582 * Initialize the VM manager structures 583 */ 584 void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) 585 { 586 unsigned i, j; 587 588 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 589 struct amdgpu_vmid_mgr *id_mgr = 590 &adev->vm_manager.id_mgr[i]; 591 592 mutex_init(&id_mgr->lock); 593 INIT_LIST_HEAD(&id_mgr->ids_lru); 594 id_mgr->reserved_use_count = 0; 595 596 /* manage only VMIDs not used by KFD */ 597 id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; 598 599 /* skip over VMID 0, since it is the system VM */ 600 for (j = 1; j < id_mgr->num_ids; ++j) { 601 amdgpu_vmid_reset(adev, i, j); 602 amdgpu_sync_create(&id_mgr->ids[j].active); 603 list_add_tail(&id_mgr->ids[j].list, &id_mgr->ids_lru); 604 } 605 } 606 /* alloc a default reserved vmid to enforce isolation */ 607 for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) { 608 if (adev->enforce_isolation[i]) 609 amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i)); 610 } 611 } 612 613 /** 614 * amdgpu_vmid_mgr_fini - cleanup VM manager 615 * 616 * @adev: amdgpu_device pointer 617 * 618 * Cleanup the VM manager and free resources. 619 */ 620 void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) 621 { 622 unsigned i, j; 623 624 for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { 625 struct amdgpu_vmid_mgr *id_mgr = 626 &adev->vm_manager.id_mgr[i]; 627 628 mutex_destroy(&id_mgr->lock); 629 for (j = 0; j < AMDGPU_NUM_VMID; ++j) { 630 struct amdgpu_vmid *id = &id_mgr->ids[j]; 631 632 amdgpu_sync_free(&id->active); 633 dma_fence_put(id->last_flush); 634 dma_fence_put(id->pasid_mapping); 635 } 636 } 637 } 638