1 /* 2 * Copyright 2019 Advanced Micro Devices, Inc. 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice shall be included in 12 * all copies or substantial portions of the Software. 13 * 14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20 * OTHER DEALINGS IN THE SOFTWARE. 21 * 22 * based on nouveau_prime.c 23 * 24 * Authors: Alex Deucher 25 */ 26 27 /** 28 * DOC: PRIME Buffer Sharing 29 * 30 * The following callback implementations are used for :ref:`sharing GEM buffer 31 * objects between different devices via PRIME <prime_buffer_sharing>`. 32 */ 33 34 #include "amdgpu.h" 35 #include "amdgpu_display.h" 36 #include "amdgpu_gem.h" 37 #include "amdgpu_dma_buf.h" 38 #include "amdgpu_xgmi.h" 39 #include "amdgpu_vm.h" 40 #include "amdgpu_ttm.h" 41 #include <drm/amdgpu_drm.h> 42 #include <drm/ttm/ttm_tt.h> 43 #include <linux/dma-buf.h> 44 #include <linux/dma-fence-array.h> 45 #include <linux/pci-p2pdma.h> 46 47 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops; 48 49 /** 50 * dma_buf_attach_adev - Helper to get adev of an attachment 51 * 52 * @attach: attachment 53 * 54 * Returns: 55 * A struct amdgpu_device * if the attaching device is an amdgpu device or 56 * partition, NULL otherwise. 57 */ 58 static struct amdgpu_device *dma_buf_attach_adev(struct dma_buf_attachment *attach) 59 { 60 if (attach->importer_ops == &amdgpu_dma_buf_attach_ops) { 61 struct drm_gem_object *obj = attach->importer_priv; 62 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 63 64 return amdgpu_ttm_adev(bo->tbo.bdev); 65 } 66 67 return NULL; 68 } 69 70 /** 71 * amdgpu_dma_buf_attach - &dma_buf_ops.attach implementation 72 * 73 * @dmabuf: DMA-buf where we attach to 74 * @attach: attachment to add 75 * 76 * Add the attachment as user to the exported DMA-buf. 77 */ 78 static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf, 79 struct dma_buf_attachment *attach) 80 { 81 struct amdgpu_device *attach_adev = dma_buf_attach_adev(attach); 82 struct drm_gem_object *obj = dmabuf->priv; 83 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 84 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 85 int r; 86 87 /* 88 * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. 89 * Such buffers cannot be safely accessed over P2P due to device-local 90 * compression metadata. Fallback to system-memory path instead. 91 * Device supports GFX12 (GC 12.x or newer) 92 * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag 93 * 94 */ 95 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && 96 bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) 97 attach->peer2peer = false; 98 99 /* 100 * Disable peer-to-peer access for DCC-enabled VRAM surfaces on GFX12+. 101 * Such buffers cannot be safely accessed over P2P due to device-local 102 * compression metadata. Fallback to system-memory path instead. 103 * Device supports GFX12 (GC 12.x or newer) 104 * BO was created with the AMDGPU_GEM_CREATE_GFX12_DCC flag 105 * 106 */ 107 if (amdgpu_ip_version(adev, GC_HWIP, 0) >= IP_VERSION(12, 0, 0) && 108 bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC) 109 attach->peer2peer = false; 110 111 if (!amdgpu_dmabuf_is_xgmi_accessible(attach_adev, bo) && 112 pci_p2pdma_distance(adev->pdev, attach->dev, false) < 0) 113 attach->peer2peer = false; 114 115 r = dma_resv_lock(bo->tbo.base.resv, NULL); 116 if (r) 117 return r; 118 119 amdgpu_vm_bo_update_shared(bo); 120 121 dma_resv_unlock(bo->tbo.base.resv); 122 123 return 0; 124 } 125 126 /** 127 * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation 128 * 129 * @attach: attachment to pin down 130 * 131 * Pin the BO which is backing the DMA-buf so that it can't move any more. 132 */ 133 static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach) 134 { 135 struct dma_buf *dmabuf = attach->dmabuf; 136 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dmabuf->priv); 137 u32 domains = bo->allowed_domains; 138 139 dma_resv_assert_held(dmabuf->resv); 140 141 /* Try pinning into VRAM to allow P2P with RDMA NICs without ODP 142 * support if all attachments can do P2P. If any attachment can't do 143 * P2P just pin into GTT instead. 144 * 145 * To avoid with conflicting pinnings between GPUs and RDMA when move 146 * notifiers are disabled, only allow pinning in VRAM when move 147 * notiers are enabled. 148 */ 149 if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) { 150 domains &= ~AMDGPU_GEM_DOMAIN_VRAM; 151 } else { 152 list_for_each_entry(attach, &dmabuf->attachments, node) 153 if (!attach->peer2peer) 154 domains &= ~AMDGPU_GEM_DOMAIN_VRAM; 155 } 156 157 if (domains & AMDGPU_GEM_DOMAIN_VRAM) 158 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 159 160 if (WARN_ON(!domains)) 161 return -EINVAL; 162 163 return amdgpu_bo_pin(bo, domains); 164 } 165 166 /** 167 * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation 168 * 169 * @attach: attachment to unpin 170 * 171 * Unpin a previously pinned BO to make it movable again. 172 */ 173 static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach) 174 { 175 struct drm_gem_object *obj = attach->dmabuf->priv; 176 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 177 178 amdgpu_bo_unpin(bo); 179 } 180 181 /** 182 * amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation 183 * @attach: DMA-buf attachment 184 * @dir: DMA direction 185 * 186 * Makes sure that the shared DMA buffer can be accessed by the target device. 187 * For now, simply pins it to the GTT domain, where it should be accessible by 188 * all DMA devices. 189 * 190 * Returns: 191 * sg_table filled with the DMA addresses to use or ERR_PRT with negative error 192 * code. 193 */ 194 static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach, 195 enum dma_data_direction dir) 196 { 197 struct dma_buf *dma_buf = attach->dmabuf; 198 struct drm_gem_object *obj = dma_buf->priv; 199 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 200 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 201 struct sg_table *sgt; 202 long r; 203 204 if (!bo->tbo.pin_count) { 205 /* move buffer into GTT or VRAM */ 206 struct ttm_operation_ctx ctx = { false, false }; 207 unsigned int domains = AMDGPU_GEM_DOMAIN_GTT; 208 209 if (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM && 210 attach->peer2peer) { 211 bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; 212 domains |= AMDGPU_GEM_DOMAIN_VRAM; 213 } 214 amdgpu_bo_placement_from_domain(bo, domains); 215 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 216 if (r) 217 return ERR_PTR(r); 218 } 219 220 switch (bo->tbo.resource->mem_type) { 221 case TTM_PL_TT: 222 sgt = drm_prime_pages_to_sg(obj->dev, 223 bo->tbo.ttm->pages, 224 bo->tbo.ttm->num_pages); 225 if (IS_ERR(sgt)) 226 return sgt; 227 228 if (dma_map_sgtable(attach->dev, sgt, dir, 229 DMA_ATTR_SKIP_CPU_SYNC)) 230 goto error_free; 231 break; 232 233 case TTM_PL_VRAM: 234 /* XGMI-accessible memory should never be DMA-mapped */ 235 if (WARN_ON(amdgpu_dmabuf_is_xgmi_accessible( 236 dma_buf_attach_adev(attach), bo))) 237 return ERR_PTR(-EINVAL); 238 239 r = amdgpu_vram_mgr_alloc_sgt(adev, bo->tbo.resource, 0, 240 bo->tbo.base.size, attach->dev, 241 dir, &sgt); 242 if (r) 243 return ERR_PTR(r); 244 break; 245 246 case AMDGPU_PL_MMIO_REMAP: 247 r = amdgpu_ttm_mmio_remap_alloc_sgt(adev, bo->tbo.resource, 248 attach->dev, dir, &sgt); 249 if (r) 250 return ERR_PTR(r); 251 break; 252 253 default: 254 return ERR_PTR(-EINVAL); 255 } 256 257 return sgt; 258 259 error_free: 260 sg_free_table(sgt); 261 kfree(sgt); 262 return ERR_PTR(-EBUSY); 263 } 264 265 /** 266 * amdgpu_dma_buf_unmap - &dma_buf_ops.unmap_dma_buf implementation 267 * @attach: DMA-buf attachment 268 * @sgt: sg_table to unmap 269 * @dir: DMA direction 270 * 271 * This is called when a shared DMA buffer no longer needs to be accessible by 272 * another device. For now, simply unpins the buffer from GTT. 273 */ 274 static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach, 275 struct sg_table *sgt, 276 enum dma_data_direction dir) 277 { 278 struct drm_gem_object *obj = attach->dmabuf->priv; 279 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 280 281 if (bo->tbo.resource && 282 bo->tbo.resource->mem_type == AMDGPU_PL_MMIO_REMAP) { 283 amdgpu_ttm_mmio_remap_free_sgt(attach->dev, dir, sgt); 284 return; 285 } 286 287 if (sg_page(sgt->sgl)) { 288 dma_unmap_sgtable(attach->dev, sgt, dir, 0); 289 sg_free_table(sgt); 290 kfree(sgt); 291 } else { 292 amdgpu_vram_mgr_free_sgt(attach->dev, dir, sgt); 293 } 294 } 295 296 /** 297 * amdgpu_dma_buf_begin_cpu_access - &dma_buf_ops.begin_cpu_access implementation 298 * @dma_buf: Shared DMA buffer 299 * @direction: Direction of DMA transfer 300 * 301 * This is called before CPU access to the shared DMA buffer's memory. If it's 302 * a read access, the buffer is moved to the GTT domain if possible, for optimal 303 * CPU read performance. 304 * 305 * Returns: 306 * 0 on success or a negative error code on failure. 307 */ 308 static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf, 309 enum dma_data_direction direction) 310 { 311 struct amdgpu_bo *bo = gem_to_amdgpu_bo(dma_buf->priv); 312 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 313 struct ttm_operation_ctx ctx = { true, false }; 314 u32 domain = amdgpu_display_supported_domains(adev, bo->flags); 315 int ret; 316 bool reads = (direction == DMA_BIDIRECTIONAL || 317 direction == DMA_FROM_DEVICE); 318 319 if (!reads || !(domain & AMDGPU_GEM_DOMAIN_GTT)) 320 return 0; 321 322 /* move to gtt */ 323 ret = amdgpu_bo_reserve(bo, false); 324 if (unlikely(ret != 0)) 325 return ret; 326 327 if (!bo->tbo.pin_count && 328 (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) { 329 amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT); 330 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 331 } 332 333 amdgpu_bo_unreserve(bo); 334 return ret; 335 } 336 337 static int amdgpu_dma_buf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) 338 { 339 struct drm_gem_object *obj = dma_buf->priv; 340 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 341 int ret; 342 343 /* 344 * Pin to keep buffer in place while it's vmap'ed. The actual 345 * domain is not that important as long as it's mapable. Using 346 * GTT and VRAM should be compatible with most use cases. 347 */ 348 ret = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT | AMDGPU_GEM_DOMAIN_VRAM); 349 if (ret) 350 return ret; 351 ret = drm_gem_dmabuf_vmap(dma_buf, map); 352 if (ret) 353 amdgpu_bo_unpin(bo); 354 355 return ret; 356 } 357 358 static void amdgpu_dma_buf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) 359 { 360 struct drm_gem_object *obj = dma_buf->priv; 361 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 362 363 drm_gem_dmabuf_vunmap(dma_buf, map); 364 amdgpu_bo_unpin(bo); 365 } 366 367 const struct dma_buf_ops amdgpu_dmabuf_ops = { 368 .attach = amdgpu_dma_buf_attach, 369 .pin = amdgpu_dma_buf_pin, 370 .unpin = amdgpu_dma_buf_unpin, 371 .map_dma_buf = amdgpu_dma_buf_map, 372 .unmap_dma_buf = amdgpu_dma_buf_unmap, 373 .release = drm_gem_dmabuf_release, 374 .begin_cpu_access = amdgpu_dma_buf_begin_cpu_access, 375 .mmap = drm_gem_dmabuf_mmap, 376 .vmap = amdgpu_dma_buf_vmap, 377 .vunmap = amdgpu_dma_buf_vunmap, 378 }; 379 380 /** 381 * amdgpu_gem_prime_export - &drm_driver.gem_prime_export implementation 382 * @gobj: GEM BO 383 * @flags: Flags such as DRM_CLOEXEC and DRM_RDWR. 384 * 385 * The main work is done by the &drm_gem_prime_export helper. 386 * 387 * Returns: 388 * Shared DMA buffer representing the GEM BO from the given device. 389 */ 390 struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, 391 int flags) 392 { 393 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); 394 struct dma_buf *buf; 395 struct ttm_operation_ctx ctx = { 396 .interruptible = true, 397 .no_wait_gpu = true, 398 /* We opt to avoid OOM on system pages allocations */ 399 .gfp_retry_mayfail = true, 400 .allow_res_evict = false, 401 }; 402 int ret; 403 404 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 405 bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) 406 return ERR_PTR(-EPERM); 407 408 ret = ttm_bo_setup_export(&bo->tbo, &ctx); 409 if (ret) 410 return ERR_PTR(ret); 411 412 buf = drm_gem_prime_export(gobj, flags); 413 if (!IS_ERR(buf)) 414 buf->ops = &amdgpu_dmabuf_ops; 415 416 return buf; 417 } 418 419 /** 420 * amdgpu_dma_buf_create_obj - create BO for DMA-buf import 421 * 422 * @dev: DRM device 423 * @dma_buf: DMA-buf 424 * 425 * Creates an empty SG BO for DMA-buf import. 426 * 427 * Returns: 428 * A new GEM BO of the given DRM device, representing the memory 429 * described by the given DMA-buf attachment and scatter/gather table. 430 */ 431 static struct drm_gem_object * 432 amdgpu_dma_buf_create_obj(struct drm_device *dev, struct dma_buf *dma_buf) 433 { 434 struct dma_resv *resv = dma_buf->resv; 435 struct amdgpu_device *adev = drm_to_adev(dev); 436 struct drm_gem_object *gobj; 437 struct amdgpu_bo *bo; 438 uint64_t flags = 0; 439 int ret; 440 441 dma_resv_lock(resv, NULL); 442 443 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 444 struct amdgpu_bo *other = gem_to_amdgpu_bo(dma_buf->priv); 445 446 flags |= other->flags & (AMDGPU_GEM_CREATE_CPU_GTT_USWC | 447 AMDGPU_GEM_CREATE_COHERENT | 448 AMDGPU_GEM_CREATE_EXT_COHERENT | 449 AMDGPU_GEM_CREATE_UNCACHED); 450 } 451 452 ret = amdgpu_gem_object_create(adev, dma_buf->size, PAGE_SIZE, 453 AMDGPU_GEM_DOMAIN_CPU, flags, 454 ttm_bo_type_sg, resv, &gobj, 0); 455 if (ret) 456 goto error; 457 458 bo = gem_to_amdgpu_bo(gobj); 459 bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; 460 bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; 461 462 dma_resv_unlock(resv); 463 return gobj; 464 465 error: 466 dma_resv_unlock(resv); 467 return ERR_PTR(ret); 468 } 469 470 /** 471 * amdgpu_dma_buf_move_notify - &attach.move_notify implementation 472 * 473 * @attach: the DMA-buf attachment 474 * 475 * Invalidate the DMA-buf attachment, making sure that the we re-create the 476 * mapping before the next use. 477 */ 478 static void 479 amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach) 480 { 481 struct drm_gem_object *obj = attach->importer_priv; 482 struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv); 483 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); 484 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 485 struct ttm_operation_ctx ctx = { false, false }; 486 struct ttm_placement placement = {}; 487 struct amdgpu_vm_bo_base *bo_base; 488 int r; 489 490 /* FIXME: This should be after the "if", but needs a fix to make sure 491 * DMABuf imports are initialized in the right VM list. 492 */ 493 amdgpu_vm_bo_invalidate(bo, false); 494 if (!bo->tbo.resource || bo->tbo.resource->mem_type == TTM_PL_SYSTEM) 495 return; 496 497 r = ttm_bo_validate(&bo->tbo, &placement, &ctx); 498 if (r) { 499 DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r); 500 return; 501 } 502 503 for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) { 504 struct amdgpu_vm *vm = bo_base->vm; 505 struct dma_resv *resv = vm->root.bo->tbo.base.resv; 506 507 if (ticket) { 508 /* When we get an error here it means that somebody 509 * else is holding the VM lock and updating page tables 510 * So we can just continue here. 511 */ 512 r = dma_resv_lock(resv, ticket); 513 if (r) 514 continue; 515 516 } else { 517 /* TODO: This is more problematic and we actually need 518 * to allow page tables updates without holding the 519 * lock. 520 */ 521 if (!dma_resv_trylock(resv)) 522 continue; 523 } 524 525 /* Reserve fences for two SDMA page table updates */ 526 r = dma_resv_reserve_fences(resv, 2); 527 if (!r) 528 r = amdgpu_vm_clear_freed(adev, vm, NULL); 529 if (!r) 530 r = amdgpu_vm_handle_moved(adev, vm, ticket); 531 532 if (r && r != -EBUSY) 533 DRM_ERROR("Failed to invalidate VM page tables (%d))\n", 534 r); 535 536 dma_resv_unlock(resv); 537 } 538 } 539 540 static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = { 541 .allow_peer2peer = true, 542 .move_notify = amdgpu_dma_buf_move_notify 543 }; 544 545 /** 546 * amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation 547 * @dev: DRM device 548 * @dma_buf: Shared DMA buffer 549 * 550 * Import a dma_buf into a the driver and potentially create a new GEM object. 551 * 552 * Returns: 553 * GEM BO representing the shared DMA buffer for the given device. 554 */ 555 struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev, 556 struct dma_buf *dma_buf) 557 { 558 struct dma_buf_attachment *attach; 559 struct drm_gem_object *obj; 560 561 if (dma_buf->ops == &amdgpu_dmabuf_ops) { 562 obj = dma_buf->priv; 563 if (obj->dev == dev) { 564 /* 565 * Importing dmabuf exported from out own gem increases 566 * refcount on gem itself instead of f_count of dmabuf. 567 */ 568 drm_gem_object_get(obj); 569 return obj; 570 } 571 } 572 573 obj = amdgpu_dma_buf_create_obj(dev, dma_buf); 574 if (IS_ERR(obj)) 575 return obj; 576 577 attach = dma_buf_dynamic_attach(dma_buf, dev->dev, 578 &amdgpu_dma_buf_attach_ops, obj); 579 if (IS_ERR(attach)) { 580 drm_gem_object_put(obj); 581 return ERR_CAST(attach); 582 } 583 584 get_dma_buf(dma_buf); 585 obj->import_attach = attach; 586 return obj; 587 } 588 589 /** 590 * amdgpu_dmabuf_is_xgmi_accessible - Check if xgmi available for P2P transfer 591 * 592 * @adev: amdgpu_device pointer of the importer 593 * @bo: amdgpu buffer object 594 * 595 * Returns: 596 * True if dmabuf accessible over xgmi, false otherwise. 597 */ 598 bool amdgpu_dmabuf_is_xgmi_accessible(struct amdgpu_device *adev, 599 struct amdgpu_bo *bo) 600 { 601 struct drm_gem_object *obj = &bo->tbo.base; 602 struct drm_gem_object *gobj; 603 604 if (!adev) 605 return false; 606 607 if (drm_gem_is_imported(obj)) { 608 struct dma_buf *dma_buf = obj->import_attach->dmabuf; 609 610 if (dma_buf->ops != &amdgpu_dmabuf_ops) 611 /* No XGMI with non AMD GPUs */ 612 return false; 613 614 gobj = dma_buf->priv; 615 bo = gem_to_amdgpu_bo(gobj); 616 } 617 618 if (amdgpu_xgmi_same_hive(adev, amdgpu_ttm_adev(bo->tbo.bdev)) && 619 (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM)) 620 return true; 621 622 return false; 623 } 624