1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "xe_migrate.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/sizes.h> 10 11 #include <drm/drm_managed.h> 12 #include <drm/drm_pagemap.h> 13 #include <drm/ttm/ttm_tt.h> 14 #include <uapi/drm/xe_drm.h> 15 16 #include <generated/xe_wa_oob.h> 17 18 #include "instructions/xe_gpu_commands.h" 19 #include "instructions/xe_mi_commands.h" 20 #include "regs/xe_gtt_defs.h" 21 #include "tests/xe_test.h" 22 #include "xe_assert.h" 23 #include "xe_bb.h" 24 #include "xe_bo.h" 25 #include "xe_exec_queue.h" 26 #include "xe_ggtt.h" 27 #include "xe_gt.h" 28 #include "xe_hw_engine.h" 29 #include "xe_lrc.h" 30 #include "xe_map.h" 31 #include "xe_mocs.h" 32 #include "xe_printk.h" 33 #include "xe_pt.h" 34 #include "xe_res_cursor.h" 35 #include "xe_sa.h" 36 #include "xe_sched_job.h" 37 #include "xe_sriov_vf_ccs.h" 38 #include "xe_svm.h" 39 #include "xe_sync.h" 40 #include "xe_trace_bo.h" 41 #include "xe_validation.h" 42 #include "xe_vm.h" 43 #include "xe_vram.h" 44 45 /** 46 * struct xe_migrate - migrate context. 47 */ 48 struct xe_migrate { 49 /** @q: Default exec queue used for migration */ 50 struct xe_exec_queue *q; 51 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */ 52 struct xe_tile *tile; 53 /** @job_mutex: Timeline mutex for @eng. */ 54 struct mutex job_mutex; 55 /** @pt_bo: Page-table buffer object. */ 56 struct xe_bo *pt_bo; 57 /** @batch_base_ofs: VM offset of the migration batch buffer */ 58 u64 batch_base_ofs; 59 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */ 60 u64 usm_batch_base_ofs; 61 /** @cleared_mem_ofs: VM offset of @cleared_bo. */ 62 u64 cleared_mem_ofs; 63 /** @large_page_copy_ofs: VM offset of 2M pages used for large copies */ 64 u64 large_page_copy_ofs; 65 /** 66 * @large_page_copy_pdes: BO offset to writeout 2M pages (PDEs) used for 67 * large copies 68 */ 69 u64 large_page_copy_pdes; 70 /** 71 * @fence: dma-fence representing the last migration job batch. 72 * Protected by @job_mutex. 73 */ 74 struct dma_fence *fence; 75 /** 76 * @vm_update_sa: For integrated, used to suballocate page-tables 77 * out of the pt_bo. 78 */ 79 struct drm_suballoc_manager vm_update_sa; 80 /** @min_chunk_size: For dgfx, Minimum chunk size */ 81 u64 min_chunk_size; 82 }; 83 84 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */ 85 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */ 86 #define NUM_KERNEL_PDE 15 87 #define NUM_PT_SLOTS 32 88 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M 89 #define MAX_NUM_PTE 512 90 #define IDENTITY_OFFSET 256ULL 91 92 /* 93 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest 94 * legal value accepted. Since that instruction field is always stored in 95 * (val-2) format, this translates to 0x400 dwords for the true maximum length 96 * of the instruction. Subtracting the instruction header (1 dword) and 97 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values. 98 */ 99 #define MAX_PTE_PER_SDI 0x1FEU 100 101 static void xe_migrate_fini(void *arg) 102 { 103 struct xe_migrate *m = arg; 104 105 xe_vm_lock(m->q->vm, false); 106 xe_bo_unpin(m->pt_bo); 107 xe_vm_unlock(m->q->vm); 108 109 dma_fence_put(m->fence); 110 xe_bo_put(m->pt_bo); 111 drm_suballoc_manager_fini(&m->vm_update_sa); 112 mutex_destroy(&m->job_mutex); 113 xe_vm_close_and_put(m->q->vm); 114 xe_exec_queue_put(m->q); 115 } 116 117 static u64 xe_migrate_vm_addr(u64 slot, u32 level) 118 { 119 XE_WARN_ON(slot >= NUM_PT_SLOTS); 120 121 /* First slot is reserved for mapping of PT bo and bb, start from 1 */ 122 return (slot + 1ULL) << xe_pt_shift(level + 1); 123 } 124 125 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) 126 { 127 /* 128 * Remove the DPA to get a correct offset into identity table for the 129 * migrate offset 130 */ 131 u64 identity_offset = IDENTITY_OFFSET; 132 133 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) 134 identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size 135 (xe->mem.vram), SZ_1G); 136 137 addr -= xe_vram_region_dpa_base(xe->mem.vram); 138 return addr + (identity_offset << xe_pt_shift(2)); 139 } 140 141 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, 142 u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs) 143 { 144 struct xe_vram_region *vram = xe->mem.vram; 145 resource_size_t dpa_base = xe_vram_region_dpa_base(vram); 146 u64 pos, ofs, flags; 147 u64 entry; 148 /* XXX: Unclear if this should be usable_size? */ 149 u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base; 150 u32 level = 2; 151 152 ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8; 153 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, 154 true, 0); 155 156 xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M)); 157 158 /* 159 * Use 1GB pages when possible, last chunk always use 2M 160 * pages as mixing reserved memory (stolen, WOCPM) with a single 161 * mapping is not allowed on certain platforms. 162 */ 163 for (pos = dpa_base; pos < vram_limit; 164 pos += SZ_1G, ofs += 8) { 165 if (pos + SZ_1G >= vram_limit) { 166 entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs); 167 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); 168 169 flags = vm->pt_ops->pte_encode_addr(xe, 0, 170 pat_index, 171 level - 1, 172 true, 0); 173 174 for (ofs = pt_2m_ofs; pos < vram_limit; 175 pos += SZ_2M, ofs += 8) 176 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); 177 break; /* Ensure pos == vram_limit assert correct */ 178 } 179 180 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); 181 } 182 183 xe_assert(xe, pos == vram_limit); 184 } 185 186 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, 187 struct xe_vm *vm, struct drm_exec *exec) 188 { 189 struct xe_device *xe = tile_to_xe(tile); 190 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 191 u8 id = tile->id; 192 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; 193 #define VRAM_IDENTITY_MAP_COUNT 2 194 u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT; 195 #undef VRAM_IDENTITY_MAP_COUNT 196 u32 map_ofs, level, i; 197 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo; 198 u64 entry, pt29_ofs; 199 200 /* Can't bump NUM_PT_SLOTS too high */ 201 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE); 202 /* Must be a multiple of 64K to support all platforms */ 203 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K); 204 /* And one slot reserved for the 4KiB page table updates */ 205 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); 206 207 /* Need to be sure everything fits in the first PT, or create more */ 208 xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M); 209 210 bo = xe_bo_create_pin_map(vm->xe, tile, vm, 211 num_entries * XE_PAGE_SIZE, 212 ttm_bo_type_kernel, 213 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 214 XE_BO_FLAG_PAGETABLE, exec); 215 if (IS_ERR(bo)) 216 return PTR_ERR(bo); 217 218 /* PT30 & PT31 reserved for 2M identity map */ 219 pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE; 220 entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs); 221 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); 222 223 map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE; 224 225 /* Map the entire BO in our level 0 pt */ 226 for (i = 0, level = 0; i < num_entries; level++) { 227 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE, 228 pat_index, 0); 229 230 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); 231 232 if (vm->flags & XE_VM_FLAG_64K) 233 i += 16; 234 else 235 i += 1; 236 } 237 238 if (!IS_DGFX(xe)) { 239 /* Write out batch too */ 240 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; 241 for (i = 0; i < xe_bo_size(batch); 242 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 243 XE_PAGE_SIZE) { 244 entry = vm->pt_ops->pte_encode_bo(batch, i, 245 pat_index, 0); 246 247 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 248 entry); 249 level++; 250 } 251 if (xe->info.has_usm) { 252 xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M); 253 254 batch = tile->primary_gt->usm.bb_pool->bo; 255 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; 256 xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K); 257 258 for (i = 0; i < xe_bo_size(batch); 259 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 260 XE_PAGE_SIZE) { 261 entry = vm->pt_ops->pte_encode_bo(batch, i, 262 pat_index, 0); 263 264 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 265 entry); 266 level++; 267 } 268 } 269 } else { 270 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); 271 272 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); 273 274 if (xe->info.has_usm) { 275 batch = tile->primary_gt->usm.bb_pool->bo; 276 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); 277 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); 278 } 279 } 280 281 for (level = 1; level < num_level; level++) { 282 u32 flags = 0; 283 284 if (vm->flags & XE_VM_FLAG_64K && level == 1) 285 flags = XE_PDE_64K; 286 287 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) * 288 XE_PAGE_SIZE); 289 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, 290 entry | flags); 291 } 292 293 /* Write PDE's that point to our BO. */ 294 for (i = 0; i < map_ofs / XE_PAGE_SIZE; i++) { 295 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE); 296 297 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + 298 (i + 1) * 8, u64, entry); 299 } 300 301 /* Reserve 2M PDEs */ 302 level = 1; 303 m->large_page_copy_ofs = NUM_PT_SLOTS << xe_pt_shift(level); 304 m->large_page_copy_pdes = map_ofs + XE_PAGE_SIZE * level + 305 NUM_PT_SLOTS * 8; 306 307 /* Set up a 1GiB NULL mapping at 255GiB offset. */ 308 level = 2; 309 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64, 310 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) 311 | XE_PTE_NULL); 312 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level)); 313 314 /* Identity map the entire vram at 256GiB offset */ 315 if (IS_DGFX(xe)) { 316 u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE; 317 resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram); 318 319 xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, 320 pat_index, pt30_ofs); 321 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G); 322 323 /* 324 * Identity map the entire vram for compressed pat_index for xe2+ 325 * if flat ccs is enabled. 326 */ 327 if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) { 328 u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; 329 u64 vram_offset = IDENTITY_OFFSET + 330 DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G); 331 u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE; 332 333 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET - 334 IDENTITY_OFFSET / 2) * SZ_1G); 335 xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset, 336 comp_pat_index, pt31_ofs); 337 } 338 } 339 340 /* 341 * Example layout created above, with root level = 3: 342 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's 343 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's 344 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's 345 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map] 346 * 347 * This makes the lowest part of the VM point to the pagetables. 348 * Hence the lowest 2M in the vm should point to itself, with a few writes 349 * and flushes, other parts of the VM can be used either for copying and 350 * clearing. 351 * 352 * For performance, the kernel reserves PDE's, so about 20 are left 353 * for async VM updates. 354 * 355 * To make it easier to work, each scratch PT is put in slot (1 + PT #) 356 * everywhere, this allows lockless updates to scratch pages by using 357 * the different addresses in VM. 358 */ 359 #define NUM_VMUSA_UNIT_PER_PAGE 32 360 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE) 361 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64)) 362 drm_suballoc_manager_init(&m->vm_update_sa, 363 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) * 364 NUM_VMUSA_UNIT_PER_PAGE, 0); 365 366 m->pt_bo = bo; 367 return 0; 368 } 369 370 /* 371 * Including the reserved copy engine is required to avoid deadlocks due to 372 * migrate jobs servicing the faults gets stuck behind the job that faulted. 373 */ 374 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) 375 { 376 u32 logical_mask = 0; 377 struct xe_hw_engine *hwe; 378 enum xe_hw_engine_id id; 379 380 for_each_hw_engine(hwe, gt, id) { 381 if (hwe->class != XE_ENGINE_CLASS_COPY) 382 continue; 383 384 if (xe_gt_is_usm_hwe(gt, hwe)) 385 logical_mask |= BIT(hwe->logical_instance); 386 } 387 388 return logical_mask; 389 } 390 391 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe) 392 { 393 return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)); 394 } 395 396 /** 397 * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile 398 * @tile: &xe_tile 399 * 400 * Allocates a &xe_migrate for a given tile. 401 * 402 * Return: &xe_migrate on success, or NULL when out of memory. 403 */ 404 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile) 405 { 406 struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL); 407 408 if (m) 409 m->tile = tile; 410 return m; 411 } 412 413 static int xe_migrate_lock_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, struct xe_vm *vm) 414 { 415 struct xe_device *xe = tile_to_xe(tile); 416 struct xe_validation_ctx ctx; 417 struct drm_exec exec; 418 int err = 0; 419 420 xe_validation_guard(&ctx, &xe->val, &exec, (struct xe_val_flags) {}, err) { 421 err = xe_vm_drm_exec_lock(vm, &exec); 422 drm_exec_retry_on_contention(&exec); 423 err = xe_migrate_prepare_vm(tile, m, vm, &exec); 424 drm_exec_retry_on_contention(&exec); 425 xe_validation_retry_on_oom(&ctx, &err); 426 } 427 428 return err; 429 } 430 431 /** 432 * xe_migrate_init() - Initialize a migrate context 433 * @m: The migration context 434 * 435 * Return: 0 if successful, negative error code on failure 436 */ 437 int xe_migrate_init(struct xe_migrate *m) 438 { 439 struct xe_tile *tile = m->tile; 440 struct xe_gt *primary_gt = tile->primary_gt; 441 struct xe_device *xe = tile_to_xe(tile); 442 struct xe_vm *vm; 443 int err; 444 445 /* Special layout, prepared below.. */ 446 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | 447 XE_VM_FLAG_SET_TILE_ID(tile), NULL); 448 if (IS_ERR(vm)) 449 return PTR_ERR(vm); 450 451 err = xe_migrate_lock_prepare_vm(tile, m, vm); 452 if (err) 453 goto err_out; 454 455 if (xe->info.has_usm) { 456 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt, 457 XE_ENGINE_CLASS_COPY, 458 primary_gt->usm.reserved_bcs_instance, 459 false); 460 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt); 461 462 if (!hwe || !logical_mask) { 463 err = -EINVAL; 464 goto err_out; 465 } 466 467 /* 468 * XXX: Currently only reserving 1 (likely slow) BCS instance on 469 * PVC, may want to revisit if performance is needed. 470 */ 471 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, 472 EXEC_QUEUE_FLAG_KERNEL | 473 EXEC_QUEUE_FLAG_PERMANENT | 474 EXEC_QUEUE_FLAG_HIGH_PRIORITY | 475 EXEC_QUEUE_FLAG_MIGRATE | 476 EXEC_QUEUE_FLAG_LOW_LATENCY, 0); 477 } else { 478 m->q = xe_exec_queue_create_class(xe, primary_gt, vm, 479 XE_ENGINE_CLASS_COPY, 480 EXEC_QUEUE_FLAG_KERNEL | 481 EXEC_QUEUE_FLAG_PERMANENT | 482 EXEC_QUEUE_FLAG_MIGRATE, 0); 483 } 484 if (IS_ERR(m->q)) { 485 err = PTR_ERR(m->q); 486 goto err_out; 487 } 488 489 mutex_init(&m->job_mutex); 490 fs_reclaim_acquire(GFP_KERNEL); 491 might_lock(&m->job_mutex); 492 fs_reclaim_release(GFP_KERNEL); 493 494 err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m); 495 if (err) 496 return err; 497 498 if (IS_DGFX(xe)) { 499 if (xe_migrate_needs_ccs_emit(xe)) 500 /* min chunk size corresponds to 4K of CCS Metadata */ 501 m->min_chunk_size = SZ_4K * SZ_64K / 502 xe_device_ccs_bytes(xe, SZ_64K); 503 else 504 /* Somewhat arbitrary to avoid a huge amount of blits */ 505 m->min_chunk_size = SZ_64K; 506 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size); 507 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n", 508 (unsigned long long)m->min_chunk_size); 509 } 510 511 return err; 512 513 err_out: 514 xe_vm_close_and_put(vm); 515 return err; 516 517 } 518 519 static u64 max_mem_transfer_per_pass(struct xe_device *xe) 520 { 521 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe)) 522 return MAX_CCS_LIMITED_TRANSFER; 523 524 return MAX_PREEMPTDISABLE_TRANSFER; 525 } 526 527 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur) 528 { 529 struct xe_device *xe = tile_to_xe(m->tile); 530 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining); 531 532 if (mem_type_is_vram(cur->mem_type)) { 533 /* 534 * VRAM we want to blit in chunks with sizes aligned to 535 * min_chunk_size in order for the offset to CCS metadata to be 536 * page-aligned. If it's the last chunk it may be smaller. 537 * 538 * Another constraint is that we need to limit the blit to 539 * the VRAM block size, unless size is smaller than 540 * min_chunk_size. 541 */ 542 u64 chunk = max_t(u64, cur->size, m->min_chunk_size); 543 544 size = min_t(u64, size, chunk); 545 if (size > m->min_chunk_size) 546 size = round_down(size, m->min_chunk_size); 547 } 548 549 return size; 550 } 551 552 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur) 553 { 554 /* If the chunk is not fragmented, allow identity map. */ 555 return cur->size >= size; 556 } 557 558 #define PTE_UPDATE_FLAG_IS_VRAM BIT(0) 559 #define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1) 560 561 static u32 pte_update_size(struct xe_migrate *m, 562 u32 flags, 563 struct ttm_resource *res, 564 struct xe_res_cursor *cur, 565 u64 *L0, u64 *L0_ofs, u32 *L0_pt, 566 u32 cmd_size, u32 pt_ofs, u32 avail_pts) 567 { 568 u32 cmds = 0; 569 bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags; 570 bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags; 571 572 *L0_pt = pt_ofs; 573 if (is_vram && xe_migrate_allow_identity(*L0, cur)) { 574 /* Offset into identity map. */ 575 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), 576 cur->start + vram_region_gpu_offset(res), 577 is_comp_pte); 578 cmds += cmd_size; 579 } else { 580 /* Clip L0 to available size */ 581 u64 size = min(*L0, (u64)avail_pts * SZ_2M); 582 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT; 583 584 *L0 = size; 585 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0); 586 587 /* MI_STORE_DATA_IMM */ 588 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI); 589 590 /* PDE qwords */ 591 cmds += num_4k_pages * 2; 592 593 /* Each chunk has a single blit command */ 594 cmds += cmd_size; 595 } 596 597 return cmds; 598 } 599 600 static void emit_pte(struct xe_migrate *m, 601 struct xe_bb *bb, u32 at_pt, 602 bool is_vram, bool is_comp_pte, 603 struct xe_res_cursor *cur, 604 u32 size, struct ttm_resource *res) 605 { 606 struct xe_device *xe = tile_to_xe(m->tile); 607 struct xe_vm *vm = m->q->vm; 608 u16 pat_index; 609 u32 ptes; 610 u64 ofs = (u64)at_pt * XE_PAGE_SIZE; 611 u64 cur_ofs; 612 613 /* Indirect access needs compression enabled uncached PAT index */ 614 if (GRAPHICS_VERx100(xe) >= 2000) 615 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] : 616 xe->pat.idx[XE_CACHE_WB]; 617 else 618 pat_index = xe->pat.idx[XE_CACHE_WB]; 619 620 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); 621 622 while (ptes) { 623 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 624 625 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 626 bb->cs[bb->len++] = ofs; 627 bb->cs[bb->len++] = 0; 628 629 cur_ofs = ofs; 630 ofs += chunk * 8; 631 ptes -= chunk; 632 633 while (chunk--) { 634 u64 addr, flags = 0; 635 bool devmem = false; 636 637 addr = xe_res_dma(cur) & PAGE_MASK; 638 if (is_vram) { 639 if (vm->flags & XE_VM_FLAG_64K) { 640 u64 va = cur_ofs * XE_PAGE_SIZE / 8; 641 642 xe_assert(xe, (va & (SZ_64K - 1)) == 643 (addr & (SZ_64K - 1))); 644 645 flags |= XE_PTE_PS64; 646 } 647 648 addr += vram_region_gpu_offset(res); 649 devmem = true; 650 } 651 652 addr = vm->pt_ops->pte_encode_addr(m->tile->xe, 653 addr, pat_index, 654 0, devmem, flags); 655 bb->cs[bb->len++] = lower_32_bits(addr); 656 bb->cs[bb->len++] = upper_32_bits(addr); 657 658 xe_res_next(cur, min_t(u32, size, PAGE_SIZE)); 659 cur_ofs += 8; 660 } 661 } 662 } 663 664 #define EMIT_COPY_CCS_DW 5 665 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, 666 u64 dst_ofs, bool dst_is_indirect, 667 u64 src_ofs, bool src_is_indirect, 668 u32 size) 669 { 670 struct xe_device *xe = gt_to_xe(gt); 671 u32 *cs = bb->cs + bb->len; 672 u32 num_ccs_blks; 673 u32 num_pages; 674 u32 ccs_copy_size; 675 u32 mocs; 676 677 if (GRAPHICS_VERx100(xe) >= 2000) { 678 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE); 679 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1)); 680 681 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1); 682 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index); 683 684 } else { 685 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size), 686 NUM_CCS_BYTES_PER_BLOCK); 687 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1)); 688 689 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1); 690 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index); 691 } 692 693 *cs++ = XY_CTRL_SURF_COPY_BLT | 694 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT | 695 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT | 696 ccs_copy_size; 697 *cs++ = lower_32_bits(src_ofs); 698 *cs++ = upper_32_bits(src_ofs) | mocs; 699 *cs++ = lower_32_bits(dst_ofs); 700 *cs++ = upper_32_bits(dst_ofs) | mocs; 701 702 bb->len = cs - bb->cs; 703 } 704 705 #define EMIT_COPY_DW 10 706 static void emit_xy_fast_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 707 u64 dst_ofs, unsigned int size, 708 unsigned int pitch) 709 { 710 struct xe_device *xe = gt_to_xe(gt); 711 u32 mocs = 0; 712 u32 tile_y = 0; 713 714 xe_gt_assert(gt, !(pitch & 3)); 715 xe_gt_assert(gt, size / pitch <= S16_MAX); 716 xe_gt_assert(gt, pitch / 4 <= S16_MAX); 717 xe_gt_assert(gt, pitch <= U16_MAX); 718 719 if (GRAPHICS_VER(xe) >= 20) 720 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index); 721 722 if (GRAPHICS_VERx100(xe) >= 1250) 723 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4; 724 725 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); 726 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs; 727 bb->cs[bb->len++] = 0; 728 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4; 729 bb->cs[bb->len++] = lower_32_bits(dst_ofs); 730 bb->cs[bb->len++] = upper_32_bits(dst_ofs); 731 bb->cs[bb->len++] = 0; 732 bb->cs[bb->len++] = pitch | mocs; 733 bb->cs[bb->len++] = lower_32_bits(src_ofs); 734 bb->cs[bb->len++] = upper_32_bits(src_ofs); 735 } 736 737 #define PAGE_COPY_MODE_PS SZ_256 /* hw uses 256 bytes as the page-size */ 738 static void emit_mem_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 739 u64 dst_ofs, unsigned int size, unsigned int pitch) 740 { 741 u32 mode, copy_type, width; 742 743 xe_gt_assert(gt, IS_ALIGNED(size, pitch)); 744 xe_gt_assert(gt, pitch <= U16_MAX); 745 xe_gt_assert(gt, pitch); 746 xe_gt_assert(gt, size); 747 748 if (IS_ALIGNED(size, PAGE_COPY_MODE_PS) && 749 IS_ALIGNED(lower_32_bits(src_ofs), PAGE_COPY_MODE_PS) && 750 IS_ALIGNED(lower_32_bits(dst_ofs), PAGE_COPY_MODE_PS)) { 751 mode = MEM_COPY_PAGE_COPY_MODE; 752 copy_type = 0; /* linear copy */ 753 width = size / PAGE_COPY_MODE_PS; 754 } else if (pitch > 1) { 755 xe_gt_assert(gt, size / pitch <= U16_MAX); 756 mode = 0; /* BYTE_COPY */ 757 copy_type = MEM_COPY_MATRIX_COPY; 758 width = pitch; 759 } else { 760 mode = 0; /* BYTE_COPY */ 761 copy_type = 0; /* linear copy */ 762 width = size; 763 } 764 765 xe_gt_assert(gt, width <= U16_MAX); 766 767 bb->cs[bb->len++] = MEM_COPY_CMD | mode | copy_type; 768 bb->cs[bb->len++] = width - 1; 769 bb->cs[bb->len++] = size / pitch - 1; /* ignored by hw for page-copy/linear above */ 770 bb->cs[bb->len++] = pitch - 1; 771 bb->cs[bb->len++] = pitch - 1; 772 bb->cs[bb->len++] = lower_32_bits(src_ofs); 773 bb->cs[bb->len++] = upper_32_bits(src_ofs); 774 bb->cs[bb->len++] = lower_32_bits(dst_ofs); 775 bb->cs[bb->len++] = upper_32_bits(dst_ofs); 776 bb->cs[bb->len++] = FIELD_PREP(MEM_COPY_SRC_MOCS_INDEX_MASK, gt->mocs.uc_index) | 777 FIELD_PREP(MEM_COPY_DST_MOCS_INDEX_MASK, gt->mocs.uc_index); 778 } 779 780 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, 781 u64 src_ofs, u64 dst_ofs, unsigned int size, 782 unsigned int pitch) 783 { 784 struct xe_device *xe = gt_to_xe(gt); 785 786 if (xe->info.has_mem_copy_instr) 787 emit_mem_copy(gt, bb, src_ofs, dst_ofs, size, pitch); 788 else 789 emit_xy_fast_copy(gt, bb, src_ofs, dst_ofs, size, pitch); 790 } 791 792 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm) 793 { 794 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs; 795 } 796 797 static u32 xe_migrate_ccs_copy(struct xe_migrate *m, 798 struct xe_bb *bb, 799 u64 src_ofs, bool src_is_indirect, 800 u64 dst_ofs, bool dst_is_indirect, u32 dst_size, 801 u64 ccs_ofs, bool copy_ccs) 802 { 803 struct xe_gt *gt = m->tile->primary_gt; 804 u32 flush_flags = 0; 805 806 if (!copy_ccs && dst_is_indirect) { 807 /* 808 * If the src is already in vram, then it should already 809 * have been cleared by us, or has been populated by the 810 * user. Make sure we copy the CCS aux state as-is. 811 * 812 * Otherwise if the bo doesn't have any CCS metadata attached, 813 * we still need to clear it for security reasons. 814 */ 815 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs; 816 817 emit_copy_ccs(gt, bb, 818 dst_ofs, true, 819 ccs_src_ofs, src_is_indirect, dst_size); 820 821 flush_flags = MI_FLUSH_DW_CCS; 822 } else if (copy_ccs) { 823 if (!src_is_indirect) 824 src_ofs = ccs_ofs; 825 else if (!dst_is_indirect) 826 dst_ofs = ccs_ofs; 827 828 xe_gt_assert(gt, src_is_indirect || dst_is_indirect); 829 830 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs, 831 src_is_indirect, dst_size); 832 if (dst_is_indirect) 833 flush_flags = MI_FLUSH_DW_CCS; 834 } 835 836 return flush_flags; 837 } 838 839 /** 840 * xe_migrate_copy() - Copy content of TTM resources. 841 * @m: The migration context. 842 * @src_bo: The buffer object @src is currently bound to. 843 * @dst_bo: If copying between resources created for the same bo, set this to 844 * the same value as @src_bo. If copying between buffer objects, set it to 845 * the buffer object @dst is currently bound to. 846 * @src: The source TTM resource. 847 * @dst: The dst TTM resource. 848 * @copy_only_ccs: If true copy only CCS metadata 849 * 850 * Copies the contents of @src to @dst: On flat CCS devices, 851 * the CCS metadata is copied as well if needed, or if not present, 852 * the CCS metadata of @dst is cleared for security reasons. 853 * 854 * Return: Pointer to a dma_fence representing the last copy batch, or 855 * an error pointer on failure. If there is a failure, any copy operation 856 * started by the function call has been synced. 857 */ 858 struct dma_fence *xe_migrate_copy(struct xe_migrate *m, 859 struct xe_bo *src_bo, 860 struct xe_bo *dst_bo, 861 struct ttm_resource *src, 862 struct ttm_resource *dst, 863 bool copy_only_ccs) 864 { 865 struct xe_gt *gt = m->tile->primary_gt; 866 struct xe_device *xe = gt_to_xe(gt); 867 struct dma_fence *fence = NULL; 868 u64 size = xe_bo_size(src_bo); 869 struct xe_res_cursor src_it, dst_it, ccs_it; 870 u64 src_L0_ofs, dst_L0_ofs; 871 u32 src_L0_pt, dst_L0_pt; 872 u64 src_L0, dst_L0; 873 int pass = 0; 874 int err; 875 bool src_is_pltt = src->mem_type == XE_PL_TT; 876 bool dst_is_pltt = dst->mem_type == XE_PL_TT; 877 bool src_is_vram = mem_type_is_vram(src->mem_type); 878 bool dst_is_vram = mem_type_is_vram(dst->mem_type); 879 bool type_device = src_bo->ttm.type == ttm_bo_type_device; 880 bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe); 881 bool copy_ccs = xe_device_has_flat_ccs(xe) && 882 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); 883 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); 884 bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) && 885 GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; 886 887 /* Copying CCS between two different BOs is not supported yet. */ 888 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) 889 return ERR_PTR(-EINVAL); 890 891 if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo))) 892 return ERR_PTR(-EINVAL); 893 894 if (!src_is_vram) 895 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it); 896 else 897 xe_res_first(src, 0, size, &src_it); 898 if (!dst_is_vram) 899 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it); 900 else 901 xe_res_first(dst, 0, size, &dst_it); 902 903 if (copy_system_ccs) 904 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo), 905 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), 906 &ccs_it); 907 908 while (size) { 909 u32 batch_size = 1; /* MI_BATCH_BUFFER_END */ 910 struct xe_sched_job *job; 911 struct xe_bb *bb; 912 u32 flush_flags = 0; 913 u32 update_idx; 914 u64 ccs_ofs, ccs_size; 915 u32 ccs_pt; 916 u32 pte_flags; 917 918 bool usm = xe->info.has_usm; 919 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 920 921 src_L0 = xe_migrate_res_sizes(m, &src_it); 922 dst_L0 = xe_migrate_res_sizes(m, &dst_it); 923 924 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n", 925 pass++, src_L0, dst_L0); 926 927 src_L0 = min(src_L0, dst_L0); 928 929 pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 930 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; 931 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, 932 &src_L0_ofs, &src_L0_pt, 0, 0, 933 avail_pts); 934 if (copy_only_ccs) { 935 dst_L0_ofs = src_L0_ofs; 936 } else { 937 pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 938 batch_size += pte_update_size(m, pte_flags, dst, 939 &dst_it, &src_L0, 940 &dst_L0_ofs, &dst_L0_pt, 941 0, avail_pts, avail_pts); 942 } 943 944 if (copy_system_ccs) { 945 xe_assert(xe, type_device); 946 ccs_size = xe_device_ccs_bytes(xe, src_L0); 947 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, 948 &ccs_ofs, &ccs_pt, 0, 949 2 * avail_pts, 950 avail_pts); 951 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 952 } 953 954 /* Add copy commands size here */ 955 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + 956 ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0)); 957 958 bb = xe_bb_new(gt, batch_size, usm); 959 if (IS_ERR(bb)) { 960 err = PTR_ERR(bb); 961 goto err_sync; 962 } 963 964 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) 965 xe_res_next(&src_it, src_L0); 966 else 967 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, 968 &src_it, src_L0, src); 969 970 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) 971 xe_res_next(&dst_it, src_L0); 972 else if (!copy_only_ccs) 973 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, 974 &dst_it, src_L0, dst); 975 976 if (copy_system_ccs) 977 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); 978 979 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 980 update_idx = bb->len; 981 982 if (!copy_only_ccs) 983 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); 984 985 if (needs_ccs_emit) 986 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, 987 IS_DGFX(xe) ? src_is_vram : src_is_pltt, 988 dst_L0_ofs, 989 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, 990 src_L0, ccs_ofs, copy_ccs); 991 992 job = xe_bb_create_migration_job(m->q, bb, 993 xe_migrate_batch_base(m, usm), 994 update_idx); 995 if (IS_ERR(job)) { 996 err = PTR_ERR(job); 997 goto err; 998 } 999 1000 xe_sched_job_add_migrate_flush(job, flush_flags | MI_INVALIDATE_TLB); 1001 if (!fence) { 1002 err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv, 1003 DMA_RESV_USAGE_BOOKKEEP); 1004 if (!err && src_bo->ttm.base.resv != dst_bo->ttm.base.resv) 1005 err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv, 1006 DMA_RESV_USAGE_BOOKKEEP); 1007 if (err) 1008 goto err_job; 1009 } 1010 1011 mutex_lock(&m->job_mutex); 1012 xe_sched_job_arm(job); 1013 dma_fence_put(fence); 1014 fence = dma_fence_get(&job->drm.s_fence->finished); 1015 xe_sched_job_push(job); 1016 1017 dma_fence_put(m->fence); 1018 m->fence = dma_fence_get(fence); 1019 1020 mutex_unlock(&m->job_mutex); 1021 1022 xe_bb_free(bb, fence); 1023 size -= src_L0; 1024 continue; 1025 1026 err_job: 1027 xe_sched_job_put(job); 1028 err: 1029 xe_bb_free(bb, NULL); 1030 1031 err_sync: 1032 /* Sync partial copy if any. FIXME: under job_mutex? */ 1033 if (fence) { 1034 dma_fence_wait(fence, false); 1035 dma_fence_put(fence); 1036 } 1037 1038 return ERR_PTR(err); 1039 } 1040 1041 return fence; 1042 } 1043 1044 /** 1045 * xe_migrate_lrc() - Get the LRC from migrate context. 1046 * @migrate: Migrate context. 1047 * 1048 * Return: Pointer to LRC on success, error on failure 1049 */ 1050 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate) 1051 { 1052 return migrate->q->lrc[0]; 1053 } 1054 1055 static u64 migrate_vm_ppgtt_addr_tlb_inval(void) 1056 { 1057 /* 1058 * The migrate VM is self-referential so it can modify its own PTEs (see 1059 * pte_update_size() or emit_pte() functions). We reserve NUM_KERNEL_PDE 1060 * entries for kernel operations (copies, clears, CCS migrate), and 1061 * suballocate the rest to user operations (binds/unbinds). With 1062 * NUM_KERNEL_PDE = 15, NUM_KERNEL_PDE - 1 is already used for PTE updates, 1063 * so assign NUM_KERNEL_PDE - 2 for TLB invalidation. 1064 */ 1065 return (NUM_KERNEL_PDE - 2) * XE_PAGE_SIZE; 1066 } 1067 1068 static int emit_flush_invalidate(u32 *dw, int i, u32 flags) 1069 { 1070 u64 addr = migrate_vm_ppgtt_addr_tlb_inval(); 1071 1072 dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | 1073 MI_FLUSH_IMM_DW | flags; 1074 dw[i++] = lower_32_bits(addr); 1075 dw[i++] = upper_32_bits(addr); 1076 dw[i++] = MI_NOOP; 1077 dw[i++] = MI_NOOP; 1078 1079 return i; 1080 } 1081 1082 /** 1083 * xe_migrate_ccs_rw_copy() - Copy content of TTM resources. 1084 * @tile: Tile whose migration context to be used. 1085 * @q : Execution to be used along with migration context. 1086 * @src_bo: The buffer object @src is currently bound to. 1087 * @read_write : Creates BB commands for CCS read/write. 1088 * 1089 * Creates batch buffer instructions to copy CCS metadata from CCS pool to 1090 * memory and vice versa. 1091 * 1092 * This function should only be called for IGPU. 1093 * 1094 * Return: 0 if successful, negative error code on failure. 1095 */ 1096 int xe_migrate_ccs_rw_copy(struct xe_tile *tile, struct xe_exec_queue *q, 1097 struct xe_bo *src_bo, 1098 enum xe_sriov_vf_ccs_rw_ctxs read_write) 1099 1100 { 1101 bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX; 1102 bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX; 1103 struct ttm_resource *src = src_bo->ttm.resource; 1104 struct xe_migrate *m = tile->migrate; 1105 struct xe_gt *gt = tile->primary_gt; 1106 u32 batch_size, batch_size_allocated; 1107 struct xe_device *xe = gt_to_xe(gt); 1108 struct xe_res_cursor src_it, ccs_it; 1109 struct xe_sriov_vf_ccs_ctx *ctx; 1110 struct xe_sa_manager *bb_pool; 1111 u64 size = xe_bo_size(src_bo); 1112 struct xe_bb *bb = NULL; 1113 u64 src_L0, src_L0_ofs; 1114 u32 src_L0_pt; 1115 int err; 1116 1117 ctx = &xe->sriov.vf.ccs.contexts[read_write]; 1118 1119 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it); 1120 1121 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo), 1122 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), 1123 &ccs_it); 1124 1125 /* Calculate Batch buffer size */ 1126 batch_size = 0; 1127 while (size) { 1128 batch_size += 10; /* Flush + ggtt addr + 2 NOP */ 1129 u64 ccs_ofs, ccs_size; 1130 u32 ccs_pt; 1131 1132 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1133 1134 src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size); 1135 1136 batch_size += pte_update_size(m, false, src, &src_it, &src_L0, 1137 &src_L0_ofs, &src_L0_pt, 0, 0, 1138 avail_pts); 1139 1140 ccs_size = xe_device_ccs_bytes(xe, src_L0); 1141 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, 1142 &ccs_pt, 0, avail_pts, avail_pts); 1143 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 1144 1145 /* Add copy commands size here */ 1146 batch_size += EMIT_COPY_CCS_DW; 1147 1148 size -= src_L0; 1149 } 1150 1151 bb_pool = ctx->mem.ccs_bb_pool; 1152 guard(mutex) (xe_sa_bo_swap_guard(bb_pool)); 1153 xe_sa_bo_swap_shadow(bb_pool); 1154 1155 bb = xe_bb_ccs_new(gt, batch_size, read_write); 1156 if (IS_ERR(bb)) { 1157 drm_err(&xe->drm, "BB allocation failed.\n"); 1158 err = PTR_ERR(bb); 1159 return err; 1160 } 1161 1162 batch_size_allocated = batch_size; 1163 size = xe_bo_size(src_bo); 1164 batch_size = 0; 1165 1166 /* 1167 * Emit PTE and copy commands here. 1168 * The CCS copy command can only support limited size. If the size to be 1169 * copied is more than the limit, divide copy into chunks. So, calculate 1170 * sizes here again before copy command is emitted. 1171 */ 1172 while (size) { 1173 batch_size += 10; /* Flush + ggtt addr + 2 NOP */ 1174 u32 flush_flags = 0; 1175 u64 ccs_ofs, ccs_size; 1176 u32 ccs_pt; 1177 1178 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1179 1180 src_L0 = xe_migrate_res_sizes(m, &src_it); 1181 1182 batch_size += pte_update_size(m, false, src, &src_it, &src_L0, 1183 &src_L0_ofs, &src_L0_pt, 0, 0, 1184 avail_pts); 1185 1186 ccs_size = xe_device_ccs_bytes(xe, src_L0); 1187 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, 1188 &ccs_pt, 0, avail_pts, avail_pts); 1189 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 1190 batch_size += EMIT_COPY_CCS_DW; 1191 1192 emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src); 1193 1194 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); 1195 1196 bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); 1197 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt, 1198 src_L0_ofs, dst_is_pltt, 1199 src_L0, ccs_ofs, true); 1200 bb->len = emit_flush_invalidate(bb->cs, bb->len, flush_flags); 1201 1202 size -= src_L0; 1203 } 1204 1205 xe_assert(xe, (batch_size_allocated == bb->len)); 1206 src_bo->bb_ccs[read_write] = bb; 1207 1208 xe_sriov_vf_ccs_rw_update_bb_addr(ctx); 1209 xe_sa_bo_sync_shadow(bb->bo); 1210 return 0; 1211 } 1212 1213 /** 1214 * xe_migrate_ccs_rw_copy_clear() - Clear the CCS read/write batch buffer 1215 * content. 1216 * @src_bo: The buffer object @src is currently bound to. 1217 * @read_write : Creates BB commands for CCS read/write. 1218 * 1219 * Directly clearing the BB lacks atomicity and can lead to undefined 1220 * behavior if the vCPU is halted mid-operation during the clearing 1221 * process. To avoid this issue, we use a shadow buffer object approach. 1222 * 1223 * First swap the SA BO address with the shadow BO, perform the clearing 1224 * operation on the BB, update the shadow BO in the ring buffer, then 1225 * sync the shadow and the actual buffer to maintain consistency. 1226 * 1227 * Returns: None. 1228 */ 1229 void xe_migrate_ccs_rw_copy_clear(struct xe_bo *src_bo, 1230 enum xe_sriov_vf_ccs_rw_ctxs read_write) 1231 { 1232 struct xe_bb *bb = src_bo->bb_ccs[read_write]; 1233 struct xe_device *xe = xe_bo_device(src_bo); 1234 struct xe_sriov_vf_ccs_ctx *ctx; 1235 struct xe_sa_manager *bb_pool; 1236 u32 *cs; 1237 1238 xe_assert(xe, IS_SRIOV_VF(xe)); 1239 1240 ctx = &xe->sriov.vf.ccs.contexts[read_write]; 1241 bb_pool = ctx->mem.ccs_bb_pool; 1242 1243 guard(mutex) (xe_sa_bo_swap_guard(bb_pool)); 1244 xe_sa_bo_swap_shadow(bb_pool); 1245 1246 cs = xe_sa_bo_cpu_addr(bb->bo); 1247 memset(cs, MI_NOOP, bb->len * sizeof(u32)); 1248 xe_sriov_vf_ccs_rw_update_bb_addr(ctx); 1249 1250 xe_sa_bo_sync_shadow(bb->bo); 1251 1252 xe_bb_free(bb, NULL); 1253 src_bo->bb_ccs[read_write] = NULL; 1254 } 1255 1256 /** 1257 * xe_migrate_exec_queue() - Get the execution queue from migrate context. 1258 * @migrate: Migrate context. 1259 * 1260 * Return: Pointer to execution queue on success, error on failure 1261 */ 1262 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate) 1263 { 1264 return migrate->q; 1265 } 1266 1267 /** 1268 * xe_migrate_vram_copy_chunk() - Copy a chunk of a VRAM buffer object. 1269 * @vram_bo: The VRAM buffer object. 1270 * @vram_offset: The VRAM offset. 1271 * @sysmem_bo: The sysmem buffer object. 1272 * @sysmem_offset: The sysmem offset. 1273 * @size: The size of VRAM chunk to copy. 1274 * @dir: The direction of the copy operation. 1275 * 1276 * Copies a portion of a buffer object between VRAM and system memory. 1277 * On Xe2 platforms that support flat CCS, VRAM data is decompressed when 1278 * copying to system memory. 1279 * 1280 * Return: Pointer to a dma_fence representing the last copy batch, or 1281 * an error pointer on failure. If there is a failure, any copy operation 1282 * started by the function call has been synced. 1283 */ 1284 struct dma_fence *xe_migrate_vram_copy_chunk(struct xe_bo *vram_bo, u64 vram_offset, 1285 struct xe_bo *sysmem_bo, u64 sysmem_offset, 1286 u64 size, enum xe_migrate_copy_dir dir) 1287 { 1288 struct xe_device *xe = xe_bo_device(vram_bo); 1289 struct xe_tile *tile = vram_bo->tile; 1290 struct xe_gt *gt = tile->primary_gt; 1291 struct xe_migrate *m = tile->migrate; 1292 struct dma_fence *fence = NULL; 1293 struct ttm_resource *vram = vram_bo->ttm.resource; 1294 struct ttm_resource *sysmem = sysmem_bo->ttm.resource; 1295 struct xe_res_cursor vram_it, sysmem_it; 1296 u64 vram_L0_ofs, sysmem_L0_ofs; 1297 u32 vram_L0_pt, sysmem_L0_pt; 1298 u64 vram_L0, sysmem_L0; 1299 bool to_sysmem = (dir == XE_MIGRATE_COPY_TO_SRAM); 1300 bool use_comp_pat = to_sysmem && 1301 GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe); 1302 int pass = 0; 1303 int err; 1304 1305 xe_assert(xe, IS_ALIGNED(vram_offset | sysmem_offset | size, PAGE_SIZE)); 1306 xe_assert(xe, xe_bo_is_vram(vram_bo)); 1307 xe_assert(xe, !xe_bo_is_vram(sysmem_bo)); 1308 xe_assert(xe, !range_overflows(vram_offset, size, (u64)vram_bo->ttm.base.size)); 1309 xe_assert(xe, !range_overflows(sysmem_offset, size, (u64)sysmem_bo->ttm.base.size)); 1310 1311 xe_res_first(vram, vram_offset, size, &vram_it); 1312 xe_res_first_sg(xe_bo_sg(sysmem_bo), sysmem_offset, size, &sysmem_it); 1313 1314 while (size) { 1315 u32 pte_flags = PTE_UPDATE_FLAG_IS_VRAM; 1316 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ 1317 struct xe_sched_job *job; 1318 struct xe_bb *bb; 1319 u32 update_idx; 1320 bool usm = xe->info.has_usm; 1321 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1322 1323 sysmem_L0 = xe_migrate_res_sizes(m, &sysmem_it); 1324 vram_L0 = min(xe_migrate_res_sizes(m, &vram_it), sysmem_L0); 1325 1326 xe_dbg(xe, "Pass %u, size: %llu\n", pass++, vram_L0); 1327 1328 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; 1329 batch_size += pte_update_size(m, pte_flags, vram, &vram_it, &vram_L0, 1330 &vram_L0_ofs, &vram_L0_pt, 0, 0, avail_pts); 1331 1332 batch_size += pte_update_size(m, 0, sysmem, &sysmem_it, &vram_L0, &sysmem_L0_ofs, 1333 &sysmem_L0_pt, 0, avail_pts, avail_pts); 1334 batch_size += EMIT_COPY_DW; 1335 1336 bb = xe_bb_new(gt, batch_size, usm); 1337 if (IS_ERR(bb)) { 1338 err = PTR_ERR(bb); 1339 return ERR_PTR(err); 1340 } 1341 1342 if (xe_migrate_allow_identity(vram_L0, &vram_it)) 1343 xe_res_next(&vram_it, vram_L0); 1344 else 1345 emit_pte(m, bb, vram_L0_pt, true, use_comp_pat, &vram_it, vram_L0, vram); 1346 1347 emit_pte(m, bb, sysmem_L0_pt, false, false, &sysmem_it, vram_L0, sysmem); 1348 1349 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1350 update_idx = bb->len; 1351 1352 if (to_sysmem) 1353 emit_copy(gt, bb, vram_L0_ofs, sysmem_L0_ofs, vram_L0, XE_PAGE_SIZE); 1354 else 1355 emit_copy(gt, bb, sysmem_L0_ofs, vram_L0_ofs, vram_L0, XE_PAGE_SIZE); 1356 1357 job = xe_bb_create_migration_job(m->q, bb, xe_migrate_batch_base(m, usm), 1358 update_idx); 1359 if (IS_ERR(job)) { 1360 xe_bb_free(bb, NULL); 1361 err = PTR_ERR(job); 1362 return ERR_PTR(err); 1363 } 1364 1365 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); 1366 1367 xe_assert(xe, dma_resv_test_signaled(vram_bo->ttm.base.resv, 1368 DMA_RESV_USAGE_BOOKKEEP)); 1369 xe_assert(xe, dma_resv_test_signaled(sysmem_bo->ttm.base.resv, 1370 DMA_RESV_USAGE_BOOKKEEP)); 1371 1372 scoped_guard(mutex, &m->job_mutex) { 1373 xe_sched_job_arm(job); 1374 dma_fence_put(fence); 1375 fence = dma_fence_get(&job->drm.s_fence->finished); 1376 xe_sched_job_push(job); 1377 1378 dma_fence_put(m->fence); 1379 m->fence = dma_fence_get(fence); 1380 } 1381 1382 xe_bb_free(bb, fence); 1383 size -= vram_L0; 1384 } 1385 1386 return fence; 1387 } 1388 1389 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 1390 u32 size, u32 pitch) 1391 { 1392 struct xe_device *xe = gt_to_xe(gt); 1393 u32 *cs = bb->cs + bb->len; 1394 u32 len = PVC_MEM_SET_CMD_LEN_DW; 1395 1396 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2); 1397 *cs++ = pitch - 1; 1398 *cs++ = (size / pitch) - 1; 1399 *cs++ = pitch - 1; 1400 *cs++ = lower_32_bits(src_ofs); 1401 *cs++ = upper_32_bits(src_ofs); 1402 if (GRAPHICS_VERx100(xe) >= 2000) 1403 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index); 1404 else 1405 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index); 1406 1407 xe_gt_assert(gt, cs - bb->cs == len + bb->len); 1408 1409 bb->len += len; 1410 } 1411 1412 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, 1413 u64 src_ofs, u32 size, u32 pitch, bool is_vram) 1414 { 1415 struct xe_device *xe = gt_to_xe(gt); 1416 u32 *cs = bb->cs + bb->len; 1417 u32 len = XY_FAST_COLOR_BLT_DW; 1418 1419 if (GRAPHICS_VERx100(xe) < 1250) 1420 len = 11; 1421 1422 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | 1423 (len - 2); 1424 if (GRAPHICS_VERx100(xe) >= 2000) 1425 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) | 1426 (pitch - 1); 1427 else 1428 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) | 1429 (pitch - 1); 1430 *cs++ = 0; 1431 *cs++ = (size / pitch) << 16 | pitch / 4; 1432 *cs++ = lower_32_bits(src_ofs); 1433 *cs++ = upper_32_bits(src_ofs); 1434 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT; 1435 *cs++ = 0; 1436 *cs++ = 0; 1437 *cs++ = 0; 1438 *cs++ = 0; 1439 1440 if (len > 11) { 1441 *cs++ = 0; 1442 *cs++ = 0; 1443 *cs++ = 0; 1444 *cs++ = 0; 1445 *cs++ = 0; 1446 } 1447 1448 xe_gt_assert(gt, cs - bb->cs == len + bb->len); 1449 1450 bb->len += len; 1451 } 1452 1453 static bool has_service_copy_support(struct xe_gt *gt) 1454 { 1455 /* 1456 * What we care about is whether the architecture was designed with 1457 * service copy functionality (specifically the new MEM_SET / MEM_COPY 1458 * instructions) so check the architectural engine list rather than the 1459 * actual list since these instructions are usable on BCS0 even if 1460 * all of the actual service copy engines (BCS1-BCS8) have been fused 1461 * off. 1462 */ 1463 return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8, 1464 XE_HW_ENGINE_BCS1); 1465 } 1466 1467 static u32 emit_clear_cmd_len(struct xe_gt *gt) 1468 { 1469 if (has_service_copy_support(gt)) 1470 return PVC_MEM_SET_CMD_LEN_DW; 1471 else 1472 return XY_FAST_COLOR_BLT_DW; 1473 } 1474 1475 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 1476 u32 size, u32 pitch, bool is_vram) 1477 { 1478 if (has_service_copy_support(gt)) 1479 emit_clear_link_copy(gt, bb, src_ofs, size, pitch); 1480 else 1481 emit_clear_main_copy(gt, bb, src_ofs, size, pitch, 1482 is_vram); 1483 } 1484 1485 /** 1486 * xe_migrate_clear() - Copy content of TTM resources. 1487 * @m: The migration context. 1488 * @bo: The buffer object @dst is currently bound to. 1489 * @dst: The dst TTM resource to be cleared. 1490 * @clear_flags: flags to specify which data to clear: CCS, BO, or both. 1491 * 1492 * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set. 1493 * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA. 1494 * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata. 1495 * TODO: Eliminate the @bo argument. 1496 * 1497 * Return: Pointer to a dma_fence representing the last clear batch, or 1498 * an error pointer on failure. If there is a failure, any clear operation 1499 * started by the function call has been synced. 1500 */ 1501 struct dma_fence *xe_migrate_clear(struct xe_migrate *m, 1502 struct xe_bo *bo, 1503 struct ttm_resource *dst, 1504 u32 clear_flags) 1505 { 1506 bool clear_vram = mem_type_is_vram(dst->mem_type); 1507 bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags; 1508 bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags; 1509 struct xe_gt *gt = m->tile->primary_gt; 1510 struct xe_device *xe = gt_to_xe(gt); 1511 bool clear_only_system_ccs = false; 1512 struct dma_fence *fence = NULL; 1513 u64 size = xe_bo_size(bo); 1514 struct xe_res_cursor src_it; 1515 struct ttm_resource *src = dst; 1516 int err; 1517 1518 if (WARN_ON(!clear_bo_data && !clear_ccs)) 1519 return NULL; 1520 1521 if (!clear_bo_data && clear_ccs && !IS_DGFX(xe)) 1522 clear_only_system_ccs = true; 1523 1524 if (!clear_vram) 1525 xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it); 1526 else 1527 xe_res_first(src, 0, xe_bo_size(bo), &src_it); 1528 1529 while (size) { 1530 u64 clear_L0_ofs; 1531 u32 clear_L0_pt; 1532 u32 flush_flags = 0; 1533 u64 clear_L0; 1534 struct xe_sched_job *job; 1535 struct xe_bb *bb; 1536 u32 batch_size, update_idx; 1537 u32 pte_flags; 1538 1539 bool usm = xe->info.has_usm; 1540 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1541 1542 clear_L0 = xe_migrate_res_sizes(m, &src_it); 1543 1544 /* Calculate final sizes and batch size.. */ 1545 pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 1546 batch_size = 1 + 1547 pte_update_size(m, pte_flags, src, &src_it, 1548 &clear_L0, &clear_L0_ofs, &clear_L0_pt, 1549 clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0, 1550 avail_pts); 1551 1552 if (xe_migrate_needs_ccs_emit(xe)) 1553 batch_size += EMIT_COPY_CCS_DW; 1554 1555 /* Clear commands */ 1556 1557 if (WARN_ON_ONCE(!clear_L0)) 1558 break; 1559 1560 bb = xe_bb_new(gt, batch_size, usm); 1561 if (IS_ERR(bb)) { 1562 err = PTR_ERR(bb); 1563 goto err_sync; 1564 } 1565 1566 size -= clear_L0; 1567 /* Preemption is enabled again by the ring ops. */ 1568 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) { 1569 xe_res_next(&src_it, clear_L0); 1570 } else { 1571 emit_pte(m, bb, clear_L0_pt, clear_vram, 1572 clear_only_system_ccs, &src_it, clear_L0, dst); 1573 flush_flags |= MI_INVALIDATE_TLB; 1574 } 1575 1576 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1577 update_idx = bb->len; 1578 1579 if (clear_bo_data) 1580 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); 1581 1582 if (xe_migrate_needs_ccs_emit(xe)) { 1583 emit_copy_ccs(gt, bb, clear_L0_ofs, true, 1584 m->cleared_mem_ofs, false, clear_L0); 1585 flush_flags |= MI_FLUSH_DW_CCS; 1586 } 1587 1588 job = xe_bb_create_migration_job(m->q, bb, 1589 xe_migrate_batch_base(m, usm), 1590 update_idx); 1591 if (IS_ERR(job)) { 1592 err = PTR_ERR(job); 1593 goto err; 1594 } 1595 1596 xe_sched_job_add_migrate_flush(job, flush_flags); 1597 if (!fence) { 1598 /* 1599 * There can't be anything userspace related at this 1600 * point, so we just need to respect any potential move 1601 * fences, which are always tracked as 1602 * DMA_RESV_USAGE_KERNEL. 1603 */ 1604 err = xe_sched_job_add_deps(job, bo->ttm.base.resv, 1605 DMA_RESV_USAGE_KERNEL); 1606 if (err) 1607 goto err_job; 1608 } 1609 1610 mutex_lock(&m->job_mutex); 1611 xe_sched_job_arm(job); 1612 dma_fence_put(fence); 1613 fence = dma_fence_get(&job->drm.s_fence->finished); 1614 xe_sched_job_push(job); 1615 1616 dma_fence_put(m->fence); 1617 m->fence = dma_fence_get(fence); 1618 1619 mutex_unlock(&m->job_mutex); 1620 1621 xe_bb_free(bb, fence); 1622 continue; 1623 1624 err_job: 1625 xe_sched_job_put(job); 1626 err: 1627 xe_bb_free(bb, NULL); 1628 err_sync: 1629 /* Sync partial copies if any. FIXME: job_mutex? */ 1630 if (fence) { 1631 dma_fence_wait(fence, false); 1632 dma_fence_put(fence); 1633 } 1634 1635 return ERR_PTR(err); 1636 } 1637 1638 if (clear_ccs) 1639 bo->ccs_cleared = true; 1640 1641 return fence; 1642 } 1643 1644 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, 1645 const struct xe_vm_pgtable_update_op *pt_op, 1646 const struct xe_vm_pgtable_update *update, 1647 struct xe_migrate_pt_update *pt_update) 1648 { 1649 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1650 u32 chunk; 1651 u32 ofs = update->ofs, size = update->qwords; 1652 1653 /* 1654 * If we have 512 entries (max), we would populate it ourselves, 1655 * and update the PDE above it to the new pointer. 1656 * The only time this can only happen if we have to update the top 1657 * PDE. This requires a BO that is almost vm->size big. 1658 * 1659 * This shouldn't be possible in practice.. might change when 16K 1660 * pages are used. Hence the assert. 1661 */ 1662 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE); 1663 if (!ppgtt_ofs) 1664 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile), 1665 xe_bo_addr(update->pt_bo, 0, 1666 XE_PAGE_SIZE), false); 1667 1668 do { 1669 u64 addr = ppgtt_ofs + ofs * 8; 1670 1671 chunk = min(size, MAX_PTE_PER_SDI); 1672 1673 /* Ensure populatefn can do memset64 by aligning bb->cs */ 1674 if (!(bb->len & 1)) 1675 bb->cs[bb->len++] = MI_NOOP; 1676 1677 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 1678 bb->cs[bb->len++] = lower_32_bits(addr); 1679 bb->cs[bb->len++] = upper_32_bits(addr); 1680 if (pt_op->bind) 1681 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, 1682 ofs, chunk, update); 1683 else 1684 ops->clear(pt_update, tile, NULL, bb->cs + bb->len, 1685 ofs, chunk, update); 1686 1687 bb->len += chunk * 2; 1688 ofs += chunk; 1689 size -= chunk; 1690 } while (size); 1691 } 1692 1693 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m) 1694 { 1695 return xe_vm_get(m->q->vm); 1696 } 1697 1698 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1699 struct migrate_test_params { 1700 struct xe_test_priv base; 1701 bool force_gpu; 1702 }; 1703 1704 #define to_migrate_test_params(_priv) \ 1705 container_of(_priv, struct migrate_test_params, base) 1706 #endif 1707 1708 static struct dma_fence * 1709 xe_migrate_update_pgtables_cpu(struct xe_migrate *m, 1710 struct xe_migrate_pt_update *pt_update) 1711 { 1712 XE_TEST_DECLARE(struct migrate_test_params *test = 1713 to_migrate_test_params 1714 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));) 1715 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1716 struct xe_vm *vm = pt_update->vops->vm; 1717 struct xe_vm_pgtable_update_ops *pt_update_ops = 1718 &pt_update->vops->pt_update_ops[pt_update->tile_id]; 1719 int err; 1720 u32 i, j; 1721 1722 if (XE_TEST_ONLY(test && test->force_gpu)) 1723 return ERR_PTR(-ETIME); 1724 1725 if (ops->pre_commit) { 1726 pt_update->job = NULL; 1727 err = ops->pre_commit(pt_update); 1728 if (err) 1729 return ERR_PTR(err); 1730 } 1731 1732 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1733 const struct xe_vm_pgtable_update_op *pt_op = 1734 &pt_update_ops->ops[i]; 1735 1736 for (j = 0; j < pt_op->num_entries; j++) { 1737 const struct xe_vm_pgtable_update *update = 1738 &pt_op->entries[j]; 1739 1740 if (pt_op->bind) 1741 ops->populate(pt_update, m->tile, 1742 &update->pt_bo->vmap, NULL, 1743 update->ofs, update->qwords, 1744 update); 1745 else 1746 ops->clear(pt_update, m->tile, 1747 &update->pt_bo->vmap, NULL, 1748 update->ofs, update->qwords, update); 1749 } 1750 } 1751 1752 trace_xe_vm_cpu_bind(vm); 1753 xe_device_wmb(vm->xe); 1754 1755 return dma_fence_get_stub(); 1756 } 1757 1758 static struct dma_fence * 1759 __xe_migrate_update_pgtables(struct xe_migrate *m, 1760 struct xe_migrate_pt_update *pt_update, 1761 struct xe_vm_pgtable_update_ops *pt_update_ops) 1762 { 1763 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1764 struct xe_tile *tile = m->tile; 1765 struct xe_gt *gt = tile->primary_gt; 1766 struct xe_device *xe = tile_to_xe(tile); 1767 struct xe_sched_job *job; 1768 struct dma_fence *fence; 1769 struct drm_suballoc *sa_bo = NULL; 1770 struct xe_bb *bb; 1771 u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0; 1772 u32 num_updates = 0, current_update = 0; 1773 u64 addr; 1774 int err = 0; 1775 bool is_migrate = pt_update_ops->q == m->q; 1776 bool usm = is_migrate && xe->info.has_usm; 1777 1778 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1779 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 1780 struct xe_vm_pgtable_update *updates = pt_op->entries; 1781 1782 num_updates += pt_op->num_entries; 1783 for (j = 0; j < pt_op->num_entries; ++j) { 1784 u32 num_cmds = DIV_ROUND_UP(updates[j].qwords, 1785 MAX_PTE_PER_SDI); 1786 1787 /* align noop + MI_STORE_DATA_IMM cmd prefix */ 1788 batch_size += 4 * num_cmds + updates[j].qwords * 2; 1789 } 1790 } 1791 1792 /* fixed + PTE entries */ 1793 if (IS_DGFX(xe)) 1794 batch_size += 2; 1795 else 1796 batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) + 1797 num_updates * 2; 1798 1799 bb = xe_bb_new(gt, batch_size, usm); 1800 if (IS_ERR(bb)) 1801 return ERR_CAST(bb); 1802 1803 /* For sysmem PTE's, need to map them in our hole.. */ 1804 if (!IS_DGFX(xe)) { 1805 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 1806 u32 ptes, ofs; 1807 1808 ppgtt_ofs = NUM_KERNEL_PDE - 1; 1809 if (!is_migrate) { 1810 u32 num_units = DIV_ROUND_UP(num_updates, 1811 NUM_VMUSA_WRITES_PER_UNIT); 1812 1813 if (num_units > m->vm_update_sa.size) { 1814 err = -ENOBUFS; 1815 goto err_bb; 1816 } 1817 sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units, 1818 GFP_KERNEL, true, 0); 1819 if (IS_ERR(sa_bo)) { 1820 err = PTR_ERR(sa_bo); 1821 goto err_bb; 1822 } 1823 1824 ppgtt_ofs = NUM_KERNEL_PDE + 1825 (drm_suballoc_soffset(sa_bo) / 1826 NUM_VMUSA_UNIT_PER_PAGE); 1827 page_ofs = (drm_suballoc_soffset(sa_bo) % 1828 NUM_VMUSA_UNIT_PER_PAGE) * 1829 VM_SA_UPDATE_UNIT_SIZE; 1830 } 1831 1832 /* Map our PT's to gtt */ 1833 i = 0; 1834 j = 0; 1835 ptes = num_updates; 1836 ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; 1837 while (ptes) { 1838 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 1839 u32 idx = 0; 1840 1841 bb->cs[bb->len++] = MI_STORE_DATA_IMM | 1842 MI_SDI_NUM_QW(chunk); 1843 bb->cs[bb->len++] = ofs; 1844 bb->cs[bb->len++] = 0; /* upper_32_bits */ 1845 1846 for (; i < pt_update_ops->num_ops; ++i) { 1847 struct xe_vm_pgtable_update_op *pt_op = 1848 &pt_update_ops->ops[i]; 1849 struct xe_vm_pgtable_update *updates = pt_op->entries; 1850 1851 for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) { 1852 struct xe_vm *vm = pt_update->vops->vm; 1853 struct xe_bo *pt_bo = updates[j].pt_bo; 1854 1855 if (idx == chunk) 1856 goto next_cmd; 1857 1858 xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K); 1859 1860 /* Map a PT at most once */ 1861 if (pt_bo->update_index < 0) 1862 pt_bo->update_index = current_update; 1863 1864 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, 1865 pat_index, 0); 1866 bb->cs[bb->len++] = lower_32_bits(addr); 1867 bb->cs[bb->len++] = upper_32_bits(addr); 1868 } 1869 1870 j = 0; 1871 } 1872 1873 next_cmd: 1874 ptes -= chunk; 1875 ofs += chunk * sizeof(u64); 1876 } 1877 1878 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1879 update_idx = bb->len; 1880 1881 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) + 1882 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE; 1883 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1884 struct xe_vm_pgtable_update_op *pt_op = 1885 &pt_update_ops->ops[i]; 1886 struct xe_vm_pgtable_update *updates = pt_op->entries; 1887 1888 for (j = 0; j < pt_op->num_entries; ++j) { 1889 struct xe_bo *pt_bo = updates[j].pt_bo; 1890 1891 write_pgtable(tile, bb, addr + 1892 pt_bo->update_index * XE_PAGE_SIZE, 1893 pt_op, &updates[j], pt_update); 1894 } 1895 } 1896 } else { 1897 /* phys pages, no preamble required */ 1898 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1899 update_idx = bb->len; 1900 1901 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1902 struct xe_vm_pgtable_update_op *pt_op = 1903 &pt_update_ops->ops[i]; 1904 struct xe_vm_pgtable_update *updates = pt_op->entries; 1905 1906 for (j = 0; j < pt_op->num_entries; ++j) 1907 write_pgtable(tile, bb, 0, pt_op, &updates[j], 1908 pt_update); 1909 } 1910 } 1911 1912 job = xe_bb_create_migration_job(pt_update_ops->q, bb, 1913 xe_migrate_batch_base(m, usm), 1914 update_idx); 1915 if (IS_ERR(job)) { 1916 err = PTR_ERR(job); 1917 goto err_sa; 1918 } 1919 1920 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); 1921 1922 if (ops->pre_commit) { 1923 pt_update->job = job; 1924 err = ops->pre_commit(pt_update); 1925 if (err) 1926 goto err_job; 1927 } 1928 if (is_migrate) 1929 mutex_lock(&m->job_mutex); 1930 1931 xe_sched_job_arm(job); 1932 fence = dma_fence_get(&job->drm.s_fence->finished); 1933 xe_sched_job_push(job); 1934 1935 if (is_migrate) 1936 mutex_unlock(&m->job_mutex); 1937 1938 xe_bb_free(bb, fence); 1939 drm_suballoc_free(sa_bo, fence); 1940 1941 return fence; 1942 1943 err_job: 1944 xe_sched_job_put(job); 1945 err_sa: 1946 drm_suballoc_free(sa_bo, NULL); 1947 err_bb: 1948 xe_bb_free(bb, NULL); 1949 return ERR_PTR(err); 1950 } 1951 1952 /** 1953 * xe_migrate_update_pgtables() - Pipelined page-table update 1954 * @m: The migrate context. 1955 * @pt_update: PT update arguments 1956 * 1957 * Perform a pipelined page-table update. The update descriptors are typically 1958 * built under the same lock critical section as a call to this function. If 1959 * using the default engine for the updates, they will be performed in the 1960 * order they grab the job_mutex. If different engines are used, external 1961 * synchronization is needed for overlapping updates to maintain page-table 1962 * consistency. Note that the meaning of "overlapping" is that the updates 1963 * touch the same page-table, which might be a higher-level page-directory. 1964 * If no pipelining is needed, then updates may be performed by the cpu. 1965 * 1966 * Return: A dma_fence that, when signaled, indicates the update completion. 1967 */ 1968 struct dma_fence * 1969 xe_migrate_update_pgtables(struct xe_migrate *m, 1970 struct xe_migrate_pt_update *pt_update) 1971 1972 { 1973 struct xe_vm_pgtable_update_ops *pt_update_ops = 1974 &pt_update->vops->pt_update_ops[pt_update->tile_id]; 1975 struct dma_fence *fence; 1976 1977 fence = xe_migrate_update_pgtables_cpu(m, pt_update); 1978 1979 /* -ETIME indicates a job is needed, anything else is legit error */ 1980 if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME) 1981 return fence; 1982 1983 return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops); 1984 } 1985 1986 /** 1987 * xe_migrate_wait() - Complete all operations using the xe_migrate context 1988 * @m: Migrate context to wait for. 1989 * 1990 * Waits until the GPU no longer uses the migrate context's default engine 1991 * or its page-table objects. FIXME: What about separate page-table update 1992 * engines? 1993 */ 1994 void xe_migrate_wait(struct xe_migrate *m) 1995 { 1996 if (m->fence) 1997 dma_fence_wait(m->fence, false); 1998 } 1999 2000 static u32 pte_update_cmd_size(u64 size) 2001 { 2002 u32 num_dword; 2003 u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE); 2004 2005 XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER); 2006 2007 /* 2008 * MI_STORE_DATA_IMM command is used to update page table. Each 2009 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To 2010 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need: 2011 * 2012 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc) 2013 * - 2 dword for the page table's physical location 2014 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords) 2015 */ 2016 num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI); 2017 num_dword += entries * 2; 2018 2019 return num_dword; 2020 } 2021 2022 static void build_pt_update_batch_sram(struct xe_migrate *m, 2023 struct xe_bb *bb, u32 pt_offset, 2024 struct drm_pagemap_addr *sram_addr, 2025 u32 size, int level) 2026 { 2027 u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB]; 2028 u64 gpu_page_size = 0x1ull << xe_pt_shift(level); 2029 u32 ptes; 2030 int i = 0; 2031 2032 xe_tile_assert(m->tile, PAGE_ALIGNED(size)); 2033 2034 ptes = DIV_ROUND_UP(size, gpu_page_size); 2035 while (ptes) { 2036 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 2037 2038 if (!level) 2039 chunk = ALIGN_DOWN(chunk, PAGE_SIZE / XE_PAGE_SIZE); 2040 2041 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 2042 bb->cs[bb->len++] = pt_offset; 2043 bb->cs[bb->len++] = 0; 2044 2045 pt_offset += chunk * 8; 2046 ptes -= chunk; 2047 2048 while (chunk--) { 2049 u64 addr = sram_addr[i].addr; 2050 u64 pte; 2051 2052 xe_tile_assert(m->tile, sram_addr[i].proto == 2053 DRM_INTERCONNECT_SYSTEM || 2054 sram_addr[i].proto == XE_INTERCONNECT_P2P); 2055 xe_tile_assert(m->tile, addr); 2056 xe_tile_assert(m->tile, PAGE_ALIGNED(addr)); 2057 2058 again: 2059 pte = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, 2060 addr, pat_index, 2061 level, false, 0); 2062 bb->cs[bb->len++] = lower_32_bits(pte); 2063 bb->cs[bb->len++] = upper_32_bits(pte); 2064 2065 if (gpu_page_size < PAGE_SIZE) { 2066 addr += XE_PAGE_SIZE; 2067 if (!PAGE_ALIGNED(addr)) { 2068 chunk--; 2069 goto again; 2070 } 2071 i++; 2072 } else { 2073 i += gpu_page_size / PAGE_SIZE; 2074 } 2075 } 2076 } 2077 } 2078 2079 static bool xe_migrate_vram_use_pde(struct drm_pagemap_addr *sram_addr, 2080 unsigned long size) 2081 { 2082 u32 large_size = (0x1 << xe_pt_shift(1)); 2083 unsigned long i, incr = large_size / PAGE_SIZE; 2084 2085 for (i = 0; i < DIV_ROUND_UP(size, PAGE_SIZE); i += incr) 2086 if (PAGE_SIZE << sram_addr[i].order != large_size) 2087 return false; 2088 2089 return true; 2090 } 2091 2092 #define XE_CACHELINE_BYTES 64ull 2093 #define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) 2094 2095 static u32 xe_migrate_copy_pitch(struct xe_device *xe, u32 len) 2096 { 2097 u32 pitch; 2098 2099 if (IS_ALIGNED(len, PAGE_SIZE)) 2100 pitch = PAGE_SIZE; 2101 else if (IS_ALIGNED(len, SZ_4K)) 2102 pitch = SZ_4K; 2103 else if (IS_ALIGNED(len, SZ_256)) 2104 pitch = SZ_256; 2105 else if (IS_ALIGNED(len, 4)) 2106 pitch = 4; 2107 else 2108 pitch = 1; 2109 2110 xe_assert(xe, pitch > 1 || xe->info.has_mem_copy_instr); 2111 return pitch; 2112 } 2113 2114 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, 2115 unsigned long len, 2116 unsigned long sram_offset, 2117 struct drm_pagemap_addr *sram_addr, 2118 u64 vram_addr, 2119 struct dma_fence *deps, 2120 const enum xe_migrate_copy_dir dir) 2121 { 2122 struct xe_gt *gt = m->tile->primary_gt; 2123 struct xe_device *xe = gt_to_xe(gt); 2124 bool use_usm_batch = xe->info.has_usm; 2125 struct dma_fence *fence = NULL; 2126 u32 batch_size = 1; 2127 u64 src_L0_ofs, dst_L0_ofs; 2128 struct xe_sched_job *job; 2129 struct xe_bb *bb; 2130 u32 update_idx, pt_slot = 0; 2131 unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE); 2132 unsigned int pitch = xe_migrate_copy_pitch(xe, len); 2133 int err; 2134 unsigned long i, j; 2135 bool use_pde = xe_migrate_vram_use_pde(sram_addr, len + sram_offset); 2136 2137 if (!xe->info.has_mem_copy_instr && 2138 drm_WARN_ON(&xe->drm, 2139 (!IS_ALIGNED(len, pitch)) || (sram_offset | vram_addr) & XE_CACHELINE_MASK)) 2140 return ERR_PTR(-EOPNOTSUPP); 2141 2142 xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); 2143 2144 batch_size += pte_update_cmd_size(npages << PAGE_SHIFT); 2145 batch_size += EMIT_COPY_DW; 2146 2147 bb = xe_bb_new(gt, batch_size, use_usm_batch); 2148 if (IS_ERR(bb)) { 2149 err = PTR_ERR(bb); 2150 return ERR_PTR(err); 2151 } 2152 2153 /* 2154 * If the order of a struct drm_pagemap_addr entry is greater than 0, 2155 * the entry is populated by GPU pagemap but subsequent entries within 2156 * the range of that order are not populated. 2157 * build_pt_update_batch_sram() expects a fully populated array of 2158 * struct drm_pagemap_addr. Ensure this is the case even with higher 2159 * orders. 2160 */ 2161 for (i = 0; !use_pde && i < npages;) { 2162 unsigned int order = sram_addr[i].order; 2163 2164 for (j = 1; j < NR_PAGES(order) && i + j < npages; j++) 2165 if (!sram_addr[i + j].addr) 2166 sram_addr[i + j].addr = sram_addr[i].addr + j * PAGE_SIZE; 2167 2168 i += NR_PAGES(order); 2169 } 2170 2171 if (use_pde) 2172 build_pt_update_batch_sram(m, bb, m->large_page_copy_pdes, 2173 sram_addr, npages << PAGE_SHIFT, 1); 2174 else 2175 build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, 2176 sram_addr, npages << PAGE_SHIFT, 0); 2177 2178 if (dir == XE_MIGRATE_COPY_TO_VRAM) { 2179 if (use_pde) 2180 src_L0_ofs = m->large_page_copy_ofs + sram_offset; 2181 else 2182 src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; 2183 dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); 2184 2185 } else { 2186 src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); 2187 if (use_pde) 2188 dst_L0_ofs = m->large_page_copy_ofs + sram_offset; 2189 else 2190 dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; 2191 } 2192 2193 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 2194 update_idx = bb->len; 2195 2196 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch); 2197 2198 job = xe_bb_create_migration_job(m->q, bb, 2199 xe_migrate_batch_base(m, use_usm_batch), 2200 update_idx); 2201 if (IS_ERR(job)) { 2202 err = PTR_ERR(job); 2203 goto err; 2204 } 2205 2206 xe_sched_job_add_migrate_flush(job, MI_INVALIDATE_TLB); 2207 2208 if (deps && !dma_fence_is_signaled(deps)) { 2209 dma_fence_get(deps); 2210 err = drm_sched_job_add_dependency(&job->drm, deps); 2211 if (err) 2212 dma_fence_wait(deps, false); 2213 err = 0; 2214 } 2215 2216 mutex_lock(&m->job_mutex); 2217 xe_sched_job_arm(job); 2218 fence = dma_fence_get(&job->drm.s_fence->finished); 2219 xe_sched_job_push(job); 2220 2221 dma_fence_put(m->fence); 2222 m->fence = dma_fence_get(fence); 2223 mutex_unlock(&m->job_mutex); 2224 2225 xe_bb_free(bb, fence); 2226 2227 return fence; 2228 2229 err: 2230 xe_bb_free(bb, NULL); 2231 2232 return ERR_PTR(err); 2233 } 2234 2235 /** 2236 * xe_migrate_to_vram() - Migrate to VRAM 2237 * @m: The migration context. 2238 * @npages: Number of pages to migrate. 2239 * @src_addr: Array of DMA information (source of migrate) 2240 * @dst_addr: Device physical address of VRAM (destination of migrate) 2241 * @deps: struct dma_fence representing the dependencies that need 2242 * to be signaled before migration. 2243 * 2244 * Copy from an array dma addresses to a VRAM device physical address 2245 * 2246 * Return: dma fence for migrate to signal completion on success, ERR_PTR on 2247 * failure 2248 */ 2249 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, 2250 unsigned long npages, 2251 struct drm_pagemap_addr *src_addr, 2252 u64 dst_addr, 2253 struct dma_fence *deps) 2254 { 2255 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr, 2256 deps, XE_MIGRATE_COPY_TO_VRAM); 2257 } 2258 2259 /** 2260 * xe_migrate_from_vram() - Migrate from VRAM 2261 * @m: The migration context. 2262 * @npages: Number of pages to migrate. 2263 * @src_addr: Device physical address of VRAM (source of migrate) 2264 * @dst_addr: Array of DMA information (destination of migrate) 2265 * @deps: struct dma_fence representing the dependencies that need 2266 * to be signaled before migration. 2267 * 2268 * Copy from a VRAM device physical address to an array dma addresses 2269 * 2270 * Return: dma fence for migrate to signal completion on success, ERR_PTR on 2271 * failure 2272 */ 2273 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, 2274 unsigned long npages, 2275 u64 src_addr, 2276 struct drm_pagemap_addr *dst_addr, 2277 struct dma_fence *deps) 2278 { 2279 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr, 2280 deps, XE_MIGRATE_COPY_TO_SRAM); 2281 } 2282 2283 static void xe_migrate_dma_unmap(struct xe_device *xe, 2284 struct drm_pagemap_addr *pagemap_addr, 2285 int len, int write) 2286 { 2287 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); 2288 2289 for (i = 0; i < npages; ++i) { 2290 if (!pagemap_addr[i].addr) 2291 break; 2292 2293 dma_unmap_page(xe->drm.dev, pagemap_addr[i].addr, PAGE_SIZE, 2294 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 2295 } 2296 kfree(pagemap_addr); 2297 } 2298 2299 static struct drm_pagemap_addr *xe_migrate_dma_map(struct xe_device *xe, 2300 void *buf, int len, 2301 int write) 2302 { 2303 struct drm_pagemap_addr *pagemap_addr; 2304 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); 2305 2306 pagemap_addr = kcalloc(npages, sizeof(*pagemap_addr), GFP_KERNEL); 2307 if (!pagemap_addr) 2308 return ERR_PTR(-ENOMEM); 2309 2310 for (i = 0; i < npages; ++i) { 2311 dma_addr_t addr; 2312 struct page *page; 2313 enum dma_data_direction dir = write ? DMA_TO_DEVICE : 2314 DMA_FROM_DEVICE; 2315 2316 if (is_vmalloc_addr(buf)) 2317 page = vmalloc_to_page(buf); 2318 else 2319 page = virt_to_page(buf); 2320 2321 addr = dma_map_page(xe->drm.dev, page, 0, PAGE_SIZE, dir); 2322 if (dma_mapping_error(xe->drm.dev, addr)) 2323 goto err_fault; 2324 2325 pagemap_addr[i] = 2326 drm_pagemap_addr_encode(addr, 2327 DRM_INTERCONNECT_SYSTEM, 2328 0, dir); 2329 buf += PAGE_SIZE; 2330 } 2331 2332 return pagemap_addr; 2333 2334 err_fault: 2335 xe_migrate_dma_unmap(xe, pagemap_addr, len, write); 2336 return ERR_PTR(-EFAULT); 2337 } 2338 2339 /** 2340 * xe_migrate_access_memory - Access memory of a BO via GPU 2341 * 2342 * @m: The migration context. 2343 * @bo: buffer object 2344 * @offset: access offset into buffer object 2345 * @buf: pointer to caller memory to read into or write from 2346 * @len: length of access 2347 * @write: write access 2348 * 2349 * Access memory of a BO via GPU either reading in or writing from a passed in 2350 * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to 2351 * read to or write from pointer. 2352 * 2353 * Returns: 2354 * 0 if successful, negative error code on failure. 2355 */ 2356 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, 2357 unsigned long offset, void *buf, int len, 2358 int write) 2359 { 2360 struct xe_tile *tile = m->tile; 2361 struct xe_device *xe = tile_to_xe(tile); 2362 struct xe_res_cursor cursor; 2363 struct dma_fence *fence = NULL; 2364 struct drm_pagemap_addr *pagemap_addr; 2365 unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK; 2366 int bytes_left = len, current_page = 0; 2367 void *orig_buf = buf; 2368 2369 xe_bo_assert_held(bo); 2370 2371 /* Use bounce buffer for small access and unaligned access */ 2372 if (!xe->info.has_mem_copy_instr && 2373 (!IS_ALIGNED(len, 4) || 2374 !IS_ALIGNED(page_offset, XE_CACHELINE_BYTES) || 2375 !IS_ALIGNED(offset, XE_CACHELINE_BYTES))) { 2376 int buf_offset = 0; 2377 void *bounce; 2378 int err; 2379 2380 BUILD_BUG_ON(!is_power_of_2(XE_CACHELINE_BYTES)); 2381 bounce = kmalloc(XE_CACHELINE_BYTES, GFP_KERNEL); 2382 if (!bounce) 2383 return -ENOMEM; 2384 2385 /* 2386 * Less than ideal for large unaligned access but this should be 2387 * fairly rare, can fixup if this becomes common. 2388 */ 2389 do { 2390 int copy_bytes = min_t(int, bytes_left, 2391 XE_CACHELINE_BYTES - 2392 (offset & XE_CACHELINE_MASK)); 2393 int ptr_offset = offset & XE_CACHELINE_MASK; 2394 2395 err = xe_migrate_access_memory(m, bo, 2396 offset & 2397 ~XE_CACHELINE_MASK, 2398 bounce, 2399 XE_CACHELINE_BYTES, 0); 2400 if (err) 2401 break; 2402 2403 if (write) { 2404 memcpy(bounce + ptr_offset, buf + buf_offset, copy_bytes); 2405 2406 err = xe_migrate_access_memory(m, bo, 2407 offset & ~XE_CACHELINE_MASK, 2408 bounce, 2409 XE_CACHELINE_BYTES, write); 2410 if (err) 2411 break; 2412 } else { 2413 memcpy(buf + buf_offset, bounce + ptr_offset, 2414 copy_bytes); 2415 } 2416 2417 bytes_left -= copy_bytes; 2418 buf_offset += copy_bytes; 2419 offset += copy_bytes; 2420 } while (bytes_left); 2421 2422 kfree(bounce); 2423 return err; 2424 } 2425 2426 pagemap_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); 2427 if (IS_ERR(pagemap_addr)) 2428 return PTR_ERR(pagemap_addr); 2429 2430 xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor); 2431 2432 do { 2433 struct dma_fence *__fence; 2434 u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) + 2435 cursor.start; 2436 int current_bytes; 2437 u32 pitch; 2438 2439 if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER) 2440 current_bytes = min_t(int, bytes_left, 2441 MAX_PREEMPTDISABLE_TRANSFER); 2442 else 2443 current_bytes = min_t(int, bytes_left, cursor.size); 2444 2445 pitch = xe_migrate_copy_pitch(xe, current_bytes); 2446 if (xe->info.has_mem_copy_instr) 2447 current_bytes = min_t(int, current_bytes, U16_MAX * pitch); 2448 else 2449 current_bytes = min_t(int, current_bytes, 2450 round_down(S16_MAX * pitch, 2451 XE_CACHELINE_BYTES)); 2452 2453 __fence = xe_migrate_vram(m, current_bytes, 2454 (unsigned long)buf & ~PAGE_MASK, 2455 &pagemap_addr[current_page], 2456 vram_addr, NULL, write ? 2457 XE_MIGRATE_COPY_TO_VRAM : 2458 XE_MIGRATE_COPY_TO_SRAM); 2459 if (IS_ERR(__fence)) { 2460 if (fence) { 2461 dma_fence_wait(fence, false); 2462 dma_fence_put(fence); 2463 } 2464 fence = __fence; 2465 goto out_err; 2466 } 2467 2468 dma_fence_put(fence); 2469 fence = __fence; 2470 2471 buf += current_bytes; 2472 offset += current_bytes; 2473 current_page = (int)(buf - orig_buf) / PAGE_SIZE; 2474 bytes_left -= current_bytes; 2475 if (bytes_left) 2476 xe_res_next(&cursor, current_bytes); 2477 } while (bytes_left); 2478 2479 dma_fence_wait(fence, false); 2480 dma_fence_put(fence); 2481 2482 out_err: 2483 xe_migrate_dma_unmap(xe, pagemap_addr, len + page_offset, write); 2484 return IS_ERR(fence) ? PTR_ERR(fence) : 0; 2485 } 2486 2487 /** 2488 * xe_migrate_job_lock() - Lock migrate job lock 2489 * @m: The migration context. 2490 * @q: Queue associated with the operation which requires a lock 2491 * 2492 * Lock the migrate job lock if the queue is a migration queue, otherwise 2493 * assert the VM's dma-resv is held (user queue's have own locking). 2494 */ 2495 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q) 2496 { 2497 bool is_migrate = q == m->q; 2498 2499 if (is_migrate) 2500 mutex_lock(&m->job_mutex); 2501 else 2502 xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2503 } 2504 2505 /** 2506 * xe_migrate_job_unlock() - Unlock migrate job lock 2507 * @m: The migration context. 2508 * @q: Queue associated with the operation which requires a lock 2509 * 2510 * Unlock the migrate job lock if the queue is a migration queue, otherwise 2511 * assert the VM's dma-resv is held (user queue's have own locking). 2512 */ 2513 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q) 2514 { 2515 bool is_migrate = q == m->q; 2516 2517 if (is_migrate) 2518 mutex_unlock(&m->job_mutex); 2519 else 2520 xe_vm_assert_held(q->user_vm); /* User queues VM's should be locked */ 2521 } 2522 2523 #if IS_ENABLED(CONFIG_PROVE_LOCKING) 2524 /** 2525 * xe_migrate_job_lock_assert() - Assert migrate job lock held of queue 2526 * @q: Migrate queue 2527 */ 2528 void xe_migrate_job_lock_assert(struct xe_exec_queue *q) 2529 { 2530 struct xe_migrate *m = gt_to_tile(q->gt)->migrate; 2531 2532 xe_gt_assert(q->gt, q == m->q); 2533 lockdep_assert_held(&m->job_mutex); 2534 } 2535 #endif 2536 2537 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 2538 #include "tests/xe_migrate.c" 2539 #endif 2540