1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2020 Intel Corporation 4 */ 5 6 #include "xe_migrate.h" 7 8 #include <linux/bitfield.h> 9 #include <linux/sizes.h> 10 11 #include <drm/drm_managed.h> 12 #include <drm/ttm/ttm_tt.h> 13 #include <uapi/drm/xe_drm.h> 14 15 #include <generated/xe_wa_oob.h> 16 17 #include "instructions/xe_gpu_commands.h" 18 #include "instructions/xe_mi_commands.h" 19 #include "regs/xe_gtt_defs.h" 20 #include "tests/xe_test.h" 21 #include "xe_assert.h" 22 #include "xe_bb.h" 23 #include "xe_bo.h" 24 #include "xe_exec_queue.h" 25 #include "xe_ggtt.h" 26 #include "xe_gt.h" 27 #include "xe_hw_engine.h" 28 #include "xe_lrc.h" 29 #include "xe_map.h" 30 #include "xe_mocs.h" 31 #include "xe_pt.h" 32 #include "xe_res_cursor.h" 33 #include "xe_sa.h" 34 #include "xe_sched_job.h" 35 #include "xe_sync.h" 36 #include "xe_trace_bo.h" 37 #include "xe_vm.h" 38 #include "xe_vram.h" 39 40 /** 41 * struct xe_migrate - migrate context. 42 */ 43 struct xe_migrate { 44 /** @q: Default exec queue used for migration */ 45 struct xe_exec_queue *q; 46 /** @tile: Backpointer to the tile this struct xe_migrate belongs to. */ 47 struct xe_tile *tile; 48 /** @job_mutex: Timeline mutex for @eng. */ 49 struct mutex job_mutex; 50 /** @pt_bo: Page-table buffer object. */ 51 struct xe_bo *pt_bo; 52 /** @batch_base_ofs: VM offset of the migration batch buffer */ 53 u64 batch_base_ofs; 54 /** @usm_batch_base_ofs: VM offset of the usm batch buffer */ 55 u64 usm_batch_base_ofs; 56 /** @cleared_mem_ofs: VM offset of @cleared_bo. */ 57 u64 cleared_mem_ofs; 58 /** 59 * @fence: dma-fence representing the last migration job batch. 60 * Protected by @job_mutex. 61 */ 62 struct dma_fence *fence; 63 /** 64 * @vm_update_sa: For integrated, used to suballocate page-tables 65 * out of the pt_bo. 66 */ 67 struct drm_suballoc_manager vm_update_sa; 68 /** @min_chunk_size: For dgfx, Minimum chunk size */ 69 u64 min_chunk_size; 70 }; 71 72 #define MAX_PREEMPTDISABLE_TRANSFER SZ_8M /* Around 1ms. */ 73 #define MAX_CCS_LIMITED_TRANSFER SZ_4M /* XE_PAGE_SIZE * (FIELD_MAX(XE2_CCS_SIZE_MASK) + 1) */ 74 #define NUM_KERNEL_PDE 15 75 #define NUM_PT_SLOTS 32 76 #define LEVEL0_PAGE_TABLE_ENCODE_SIZE SZ_2M 77 #define MAX_NUM_PTE 512 78 #define IDENTITY_OFFSET 256ULL 79 80 /* 81 * Although MI_STORE_DATA_IMM's "length" field is 10-bits, 0x3FE is the largest 82 * legal value accepted. Since that instruction field is always stored in 83 * (val-2) format, this translates to 0x400 dwords for the true maximum length 84 * of the instruction. Subtracting the instruction header (1 dword) and 85 * address (2 dwords), that leaves 0x3FD dwords (0x1FE qwords) for PTE values. 86 */ 87 #define MAX_PTE_PER_SDI 0x1FEU 88 89 static void xe_migrate_fini(void *arg) 90 { 91 struct xe_migrate *m = arg; 92 93 xe_vm_lock(m->q->vm, false); 94 xe_bo_unpin(m->pt_bo); 95 xe_vm_unlock(m->q->vm); 96 97 dma_fence_put(m->fence); 98 xe_bo_put(m->pt_bo); 99 drm_suballoc_manager_fini(&m->vm_update_sa); 100 mutex_destroy(&m->job_mutex); 101 xe_vm_close_and_put(m->q->vm); 102 xe_exec_queue_put(m->q); 103 } 104 105 static u64 xe_migrate_vm_addr(u64 slot, u32 level) 106 { 107 XE_WARN_ON(slot >= NUM_PT_SLOTS); 108 109 /* First slot is reserved for mapping of PT bo and bb, start from 1 */ 110 return (slot + 1ULL) << xe_pt_shift(level + 1); 111 } 112 113 static u64 xe_migrate_vram_ofs(struct xe_device *xe, u64 addr, bool is_comp_pte) 114 { 115 /* 116 * Remove the DPA to get a correct offset into identity table for the 117 * migrate offset 118 */ 119 u64 identity_offset = IDENTITY_OFFSET; 120 121 if (GRAPHICS_VER(xe) >= 20 && is_comp_pte) 122 identity_offset += DIV_ROUND_UP_ULL(xe_vram_region_actual_physical_size 123 (xe->mem.vram), SZ_1G); 124 125 addr -= xe_vram_region_dpa_base(xe->mem.vram); 126 return addr + (identity_offset << xe_pt_shift(2)); 127 } 128 129 static void xe_migrate_program_identity(struct xe_device *xe, struct xe_vm *vm, struct xe_bo *bo, 130 u64 map_ofs, u64 vram_offset, u16 pat_index, u64 pt_2m_ofs) 131 { 132 struct xe_vram_region *vram = xe->mem.vram; 133 resource_size_t dpa_base = xe_vram_region_dpa_base(vram); 134 u64 pos, ofs, flags; 135 u64 entry; 136 /* XXX: Unclear if this should be usable_size? */ 137 u64 vram_limit = xe_vram_region_actual_physical_size(vram) + dpa_base; 138 u32 level = 2; 139 140 ofs = map_ofs + XE_PAGE_SIZE * level + vram_offset * 8; 141 flags = vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, 142 true, 0); 143 144 xe_assert(xe, IS_ALIGNED(xe_vram_region_usable_size(vram), SZ_2M)); 145 146 /* 147 * Use 1GB pages when possible, last chunk always use 2M 148 * pages as mixing reserved memory (stolen, WOCPM) with a single 149 * mapping is not allowed on certain platforms. 150 */ 151 for (pos = dpa_base; pos < vram_limit; 152 pos += SZ_1G, ofs += 8) { 153 if (pos + SZ_1G >= vram_limit) { 154 entry = vm->pt_ops->pde_encode_bo(bo, pt_2m_ofs, 155 pat_index); 156 xe_map_wr(xe, &bo->vmap, ofs, u64, entry); 157 158 flags = vm->pt_ops->pte_encode_addr(xe, 0, 159 pat_index, 160 level - 1, 161 true, 0); 162 163 for (ofs = pt_2m_ofs; pos < vram_limit; 164 pos += SZ_2M, ofs += 8) 165 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); 166 break; /* Ensure pos == vram_limit assert correct */ 167 } 168 169 xe_map_wr(xe, &bo->vmap, ofs, u64, pos | flags); 170 } 171 172 xe_assert(xe, pos == vram_limit); 173 } 174 175 static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, 176 struct xe_vm *vm) 177 { 178 struct xe_device *xe = tile_to_xe(tile); 179 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 180 u8 id = tile->id; 181 u32 num_entries = NUM_PT_SLOTS, num_level = vm->pt_root[id]->level; 182 #define VRAM_IDENTITY_MAP_COUNT 2 183 u32 num_setup = num_level + VRAM_IDENTITY_MAP_COUNT; 184 #undef VRAM_IDENTITY_MAP_COUNT 185 u32 map_ofs, level, i; 186 struct xe_bo *bo, *batch = tile->mem.kernel_bb_pool->bo; 187 u64 entry, pt29_ofs; 188 189 /* Can't bump NUM_PT_SLOTS too high */ 190 BUILD_BUG_ON(NUM_PT_SLOTS > SZ_2M/XE_PAGE_SIZE); 191 /* Must be a multiple of 64K to support all platforms */ 192 BUILD_BUG_ON(NUM_PT_SLOTS * XE_PAGE_SIZE % SZ_64K); 193 /* And one slot reserved for the 4KiB page table updates */ 194 BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1)); 195 196 /* Need to be sure everything fits in the first PT, or create more */ 197 xe_tile_assert(tile, m->batch_base_ofs + xe_bo_size(batch) < SZ_2M); 198 199 bo = xe_bo_create_pin_map(vm->xe, tile, vm, 200 num_entries * XE_PAGE_SIZE, 201 ttm_bo_type_kernel, 202 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 203 XE_BO_FLAG_PAGETABLE); 204 if (IS_ERR(bo)) 205 return PTR_ERR(bo); 206 207 /* PT30 & PT31 reserved for 2M identity map */ 208 pt29_ofs = xe_bo_size(bo) - 3 * XE_PAGE_SIZE; 209 entry = vm->pt_ops->pde_encode_bo(bo, pt29_ofs, pat_index); 210 xe_pt_write(xe, &vm->pt_root[id]->bo->vmap, 0, entry); 211 212 map_ofs = (num_entries - num_setup) * XE_PAGE_SIZE; 213 214 /* Map the entire BO in our level 0 pt */ 215 for (i = 0, level = 0; i < num_entries; level++) { 216 entry = vm->pt_ops->pte_encode_bo(bo, i * XE_PAGE_SIZE, 217 pat_index, 0); 218 219 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, entry); 220 221 if (vm->flags & XE_VM_FLAG_64K) 222 i += 16; 223 else 224 i += 1; 225 } 226 227 if (!IS_DGFX(xe)) { 228 /* Write out batch too */ 229 m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE; 230 for (i = 0; i < xe_bo_size(batch); 231 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 232 XE_PAGE_SIZE) { 233 entry = vm->pt_ops->pte_encode_bo(batch, i, 234 pat_index, 0); 235 236 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 237 entry); 238 level++; 239 } 240 if (xe->info.has_usm) { 241 xe_tile_assert(tile, xe_bo_size(batch) == SZ_1M); 242 243 batch = tile->primary_gt->usm.bb_pool->bo; 244 m->usm_batch_base_ofs = m->batch_base_ofs + SZ_1M; 245 xe_tile_assert(tile, xe_bo_size(batch) == SZ_512K); 246 247 for (i = 0; i < xe_bo_size(batch); 248 i += vm->flags & XE_VM_FLAG_64K ? XE_64K_PAGE_SIZE : 249 XE_PAGE_SIZE) { 250 entry = vm->pt_ops->pte_encode_bo(batch, i, 251 pat_index, 0); 252 253 xe_map_wr(xe, &bo->vmap, map_ofs + level * 8, u64, 254 entry); 255 level++; 256 } 257 } 258 } else { 259 u64 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); 260 261 m->batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); 262 263 if (xe->info.has_usm) { 264 batch = tile->primary_gt->usm.bb_pool->bo; 265 batch_addr = xe_bo_addr(batch, 0, XE_PAGE_SIZE); 266 m->usm_batch_base_ofs = xe_migrate_vram_ofs(xe, batch_addr, false); 267 } 268 } 269 270 for (level = 1; level < num_level; level++) { 271 u32 flags = 0; 272 273 if (vm->flags & XE_VM_FLAG_64K && level == 1) 274 flags = XE_PDE_64K; 275 276 entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) * 277 XE_PAGE_SIZE, pat_index); 278 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, 279 entry | flags); 280 } 281 282 /* Write PDE's that point to our BO. */ 283 for (i = 0; i < map_ofs / PAGE_SIZE; i++) { 284 entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE, 285 pat_index); 286 287 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + 288 (i + 1) * 8, u64, entry); 289 } 290 291 /* Set up a 1GiB NULL mapping at 255GiB offset. */ 292 level = 2; 293 xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level + 255 * 8, u64, 294 vm->pt_ops->pte_encode_addr(xe, 0, pat_index, level, IS_DGFX(xe), 0) 295 | XE_PTE_NULL); 296 m->cleared_mem_ofs = (255ULL << xe_pt_shift(level)); 297 298 /* Identity map the entire vram at 256GiB offset */ 299 if (IS_DGFX(xe)) { 300 u64 pt30_ofs = xe_bo_size(bo) - 2 * XE_PAGE_SIZE; 301 resource_size_t actual_phy_size = xe_vram_region_actual_physical_size(xe->mem.vram); 302 303 xe_migrate_program_identity(xe, vm, bo, map_ofs, IDENTITY_OFFSET, 304 pat_index, pt30_ofs); 305 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET) * SZ_1G); 306 307 /* 308 * Identity map the entire vram for compressed pat_index for xe2+ 309 * if flat ccs is enabled. 310 */ 311 if (GRAPHICS_VER(xe) >= 20 && xe_device_has_flat_ccs(xe)) { 312 u16 comp_pat_index = xe->pat.idx[XE_CACHE_NONE_COMPRESSION]; 313 u64 vram_offset = IDENTITY_OFFSET + 314 DIV_ROUND_UP_ULL(actual_phy_size, SZ_1G); 315 u64 pt31_ofs = xe_bo_size(bo) - XE_PAGE_SIZE; 316 317 xe_assert(xe, actual_phy_size <= (MAX_NUM_PTE - IDENTITY_OFFSET - 318 IDENTITY_OFFSET / 2) * SZ_1G); 319 xe_migrate_program_identity(xe, vm, bo, map_ofs, vram_offset, 320 comp_pat_index, pt31_ofs); 321 } 322 } 323 324 /* 325 * Example layout created above, with root level = 3: 326 * [PT0...PT7]: kernel PT's for copy/clear; 64 or 4KiB PTE's 327 * [PT8]: Kernel PT for VM_BIND, 4 KiB PTE's 328 * [PT9...PT26]: Userspace PT's for VM_BIND, 4 KiB PTE's 329 * [PT27 = PDE 0] [PT28 = PDE 1] [PT29 = PDE 2] [PT30 & PT31 = 2M vram identity map] 330 * 331 * This makes the lowest part of the VM point to the pagetables. 332 * Hence the lowest 2M in the vm should point to itself, with a few writes 333 * and flushes, other parts of the VM can be used either for copying and 334 * clearing. 335 * 336 * For performance, the kernel reserves PDE's, so about 20 are left 337 * for async VM updates. 338 * 339 * To make it easier to work, each scratch PT is put in slot (1 + PT #) 340 * everywhere, this allows lockless updates to scratch pages by using 341 * the different addresses in VM. 342 */ 343 #define NUM_VMUSA_UNIT_PER_PAGE 32 344 #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE) 345 #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64)) 346 drm_suballoc_manager_init(&m->vm_update_sa, 347 (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) * 348 NUM_VMUSA_UNIT_PER_PAGE, 0); 349 350 m->pt_bo = bo; 351 return 0; 352 } 353 354 /* 355 * Including the reserved copy engine is required to avoid deadlocks due to 356 * migrate jobs servicing the faults gets stuck behind the job that faulted. 357 */ 358 static u32 xe_migrate_usm_logical_mask(struct xe_gt *gt) 359 { 360 u32 logical_mask = 0; 361 struct xe_hw_engine *hwe; 362 enum xe_hw_engine_id id; 363 364 for_each_hw_engine(hwe, gt, id) { 365 if (hwe->class != XE_ENGINE_CLASS_COPY) 366 continue; 367 368 if (xe_gt_is_usm_hwe(gt, hwe)) 369 logical_mask |= BIT(hwe->logical_instance); 370 } 371 372 return logical_mask; 373 } 374 375 static bool xe_migrate_needs_ccs_emit(struct xe_device *xe) 376 { 377 return xe_device_has_flat_ccs(xe) && !(GRAPHICS_VER(xe) >= 20 && IS_DGFX(xe)); 378 } 379 380 /** 381 * xe_migrate_alloc - Allocate a migrate struct for a given &xe_tile 382 * @tile: &xe_tile 383 * 384 * Allocates a &xe_migrate for a given tile. 385 * 386 * Return: &xe_migrate on success, or NULL when out of memory. 387 */ 388 struct xe_migrate *xe_migrate_alloc(struct xe_tile *tile) 389 { 390 struct xe_migrate *m = drmm_kzalloc(&tile_to_xe(tile)->drm, sizeof(*m), GFP_KERNEL); 391 392 if (m) 393 m->tile = tile; 394 return m; 395 } 396 397 /** 398 * xe_migrate_init() - Initialize a migrate context 399 * @m: The migration context 400 * 401 * Return: 0 if successful, negative error code on failure 402 */ 403 int xe_migrate_init(struct xe_migrate *m) 404 { 405 struct xe_tile *tile = m->tile; 406 struct xe_gt *primary_gt = tile->primary_gt; 407 struct xe_device *xe = tile_to_xe(tile); 408 struct xe_vm *vm; 409 int err; 410 411 /* Special layout, prepared below.. */ 412 vm = xe_vm_create(xe, XE_VM_FLAG_MIGRATION | 413 XE_VM_FLAG_SET_TILE_ID(tile)); 414 if (IS_ERR(vm)) 415 return PTR_ERR(vm); 416 417 xe_vm_lock(vm, false); 418 err = xe_migrate_prepare_vm(tile, m, vm); 419 xe_vm_unlock(vm); 420 if (err) 421 goto err_out; 422 423 if (xe->info.has_usm) { 424 struct xe_hw_engine *hwe = xe_gt_hw_engine(primary_gt, 425 XE_ENGINE_CLASS_COPY, 426 primary_gt->usm.reserved_bcs_instance, 427 false); 428 u32 logical_mask = xe_migrate_usm_logical_mask(primary_gt); 429 430 if (!hwe || !logical_mask) { 431 err = -EINVAL; 432 goto err_out; 433 } 434 435 /* 436 * XXX: Currently only reserving 1 (likely slow) BCS instance on 437 * PVC, may want to revisit if performance is needed. 438 */ 439 m->q = xe_exec_queue_create(xe, vm, logical_mask, 1, hwe, 440 EXEC_QUEUE_FLAG_KERNEL | 441 EXEC_QUEUE_FLAG_PERMANENT | 442 EXEC_QUEUE_FLAG_HIGH_PRIORITY | 443 EXEC_QUEUE_FLAG_MIGRATE, 0); 444 } else { 445 m->q = xe_exec_queue_create_class(xe, primary_gt, vm, 446 XE_ENGINE_CLASS_COPY, 447 EXEC_QUEUE_FLAG_KERNEL | 448 EXEC_QUEUE_FLAG_PERMANENT | 449 EXEC_QUEUE_FLAG_MIGRATE, 0); 450 } 451 if (IS_ERR(m->q)) { 452 err = PTR_ERR(m->q); 453 goto err_out; 454 } 455 456 mutex_init(&m->job_mutex); 457 fs_reclaim_acquire(GFP_KERNEL); 458 might_lock(&m->job_mutex); 459 fs_reclaim_release(GFP_KERNEL); 460 461 err = devm_add_action_or_reset(xe->drm.dev, xe_migrate_fini, m); 462 if (err) 463 return err; 464 465 if (IS_DGFX(xe)) { 466 if (xe_migrate_needs_ccs_emit(xe)) 467 /* min chunk size corresponds to 4K of CCS Metadata */ 468 m->min_chunk_size = SZ_4K * SZ_64K / 469 xe_device_ccs_bytes(xe, SZ_64K); 470 else 471 /* Somewhat arbitrary to avoid a huge amount of blits */ 472 m->min_chunk_size = SZ_64K; 473 m->min_chunk_size = roundup_pow_of_two(m->min_chunk_size); 474 drm_dbg(&xe->drm, "Migrate min chunk size is 0x%08llx\n", 475 (unsigned long long)m->min_chunk_size); 476 } 477 478 return err; 479 480 err_out: 481 xe_vm_close_and_put(vm); 482 return err; 483 484 } 485 486 static u64 max_mem_transfer_per_pass(struct xe_device *xe) 487 { 488 if (!IS_DGFX(xe) && xe_device_has_flat_ccs(xe)) 489 return MAX_CCS_LIMITED_TRANSFER; 490 491 return MAX_PREEMPTDISABLE_TRANSFER; 492 } 493 494 static u64 xe_migrate_res_sizes(struct xe_migrate *m, struct xe_res_cursor *cur) 495 { 496 struct xe_device *xe = tile_to_xe(m->tile); 497 u64 size = min_t(u64, max_mem_transfer_per_pass(xe), cur->remaining); 498 499 if (mem_type_is_vram(cur->mem_type)) { 500 /* 501 * VRAM we want to blit in chunks with sizes aligned to 502 * min_chunk_size in order for the offset to CCS metadata to be 503 * page-aligned. If it's the last chunk it may be smaller. 504 * 505 * Another constraint is that we need to limit the blit to 506 * the VRAM block size, unless size is smaller than 507 * min_chunk_size. 508 */ 509 u64 chunk = max_t(u64, cur->size, m->min_chunk_size); 510 511 size = min_t(u64, size, chunk); 512 if (size > m->min_chunk_size) 513 size = round_down(size, m->min_chunk_size); 514 } 515 516 return size; 517 } 518 519 static bool xe_migrate_allow_identity(u64 size, const struct xe_res_cursor *cur) 520 { 521 /* If the chunk is not fragmented, allow identity map. */ 522 return cur->size >= size; 523 } 524 525 #define PTE_UPDATE_FLAG_IS_VRAM BIT(0) 526 #define PTE_UPDATE_FLAG_IS_COMP_PTE BIT(1) 527 528 static u32 pte_update_size(struct xe_migrate *m, 529 u32 flags, 530 struct ttm_resource *res, 531 struct xe_res_cursor *cur, 532 u64 *L0, u64 *L0_ofs, u32 *L0_pt, 533 u32 cmd_size, u32 pt_ofs, u32 avail_pts) 534 { 535 u32 cmds = 0; 536 bool is_vram = PTE_UPDATE_FLAG_IS_VRAM & flags; 537 bool is_comp_pte = PTE_UPDATE_FLAG_IS_COMP_PTE & flags; 538 539 *L0_pt = pt_ofs; 540 if (is_vram && xe_migrate_allow_identity(*L0, cur)) { 541 /* Offset into identity map. */ 542 *L0_ofs = xe_migrate_vram_ofs(tile_to_xe(m->tile), 543 cur->start + vram_region_gpu_offset(res), 544 is_comp_pte); 545 cmds += cmd_size; 546 } else { 547 /* Clip L0 to available size */ 548 u64 size = min(*L0, (u64)avail_pts * SZ_2M); 549 u32 num_4k_pages = (size + XE_PAGE_SIZE - 1) >> XE_PTE_SHIFT; 550 551 *L0 = size; 552 *L0_ofs = xe_migrate_vm_addr(pt_ofs, 0); 553 554 /* MI_STORE_DATA_IMM */ 555 cmds += 3 * DIV_ROUND_UP(num_4k_pages, MAX_PTE_PER_SDI); 556 557 /* PDE qwords */ 558 cmds += num_4k_pages * 2; 559 560 /* Each chunk has a single blit command */ 561 cmds += cmd_size; 562 } 563 564 return cmds; 565 } 566 567 static void emit_pte(struct xe_migrate *m, 568 struct xe_bb *bb, u32 at_pt, 569 bool is_vram, bool is_comp_pte, 570 struct xe_res_cursor *cur, 571 u32 size, struct ttm_resource *res) 572 { 573 struct xe_device *xe = tile_to_xe(m->tile); 574 struct xe_vm *vm = m->q->vm; 575 u16 pat_index; 576 u32 ptes; 577 u64 ofs = (u64)at_pt * XE_PAGE_SIZE; 578 u64 cur_ofs; 579 580 /* Indirect access needs compression enabled uncached PAT index */ 581 if (GRAPHICS_VERx100(xe) >= 2000) 582 pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] : 583 xe->pat.idx[XE_CACHE_WB]; 584 else 585 pat_index = xe->pat.idx[XE_CACHE_WB]; 586 587 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); 588 589 while (ptes) { 590 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 591 592 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 593 bb->cs[bb->len++] = ofs; 594 bb->cs[bb->len++] = 0; 595 596 cur_ofs = ofs; 597 ofs += chunk * 8; 598 ptes -= chunk; 599 600 while (chunk--) { 601 u64 addr, flags = 0; 602 bool devmem = false; 603 604 addr = xe_res_dma(cur) & PAGE_MASK; 605 if (is_vram) { 606 if (vm->flags & XE_VM_FLAG_64K) { 607 u64 va = cur_ofs * XE_PAGE_SIZE / 8; 608 609 xe_assert(xe, (va & (SZ_64K - 1)) == 610 (addr & (SZ_64K - 1))); 611 612 flags |= XE_PTE_PS64; 613 } 614 615 addr += vram_region_gpu_offset(res); 616 devmem = true; 617 } 618 619 addr = vm->pt_ops->pte_encode_addr(m->tile->xe, 620 addr, pat_index, 621 0, devmem, flags); 622 bb->cs[bb->len++] = lower_32_bits(addr); 623 bb->cs[bb->len++] = upper_32_bits(addr); 624 625 xe_res_next(cur, min_t(u32, size, PAGE_SIZE)); 626 cur_ofs += 8; 627 } 628 } 629 } 630 631 #define EMIT_COPY_CCS_DW 5 632 static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb, 633 u64 dst_ofs, bool dst_is_indirect, 634 u64 src_ofs, bool src_is_indirect, 635 u32 size) 636 { 637 struct xe_device *xe = gt_to_xe(gt); 638 u32 *cs = bb->cs + bb->len; 639 u32 num_ccs_blks; 640 u32 num_pages; 641 u32 ccs_copy_size; 642 u32 mocs; 643 644 if (GRAPHICS_VERx100(xe) >= 2000) { 645 num_pages = DIV_ROUND_UP(size, XE_PAGE_SIZE); 646 xe_gt_assert(gt, FIELD_FIT(XE2_CCS_SIZE_MASK, num_pages - 1)); 647 648 ccs_copy_size = REG_FIELD_PREP(XE2_CCS_SIZE_MASK, num_pages - 1); 649 mocs = FIELD_PREP(XE2_XY_CTRL_SURF_MOCS_INDEX_MASK, gt->mocs.uc_index); 650 651 } else { 652 num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size), 653 NUM_CCS_BYTES_PER_BLOCK); 654 xe_gt_assert(gt, FIELD_FIT(CCS_SIZE_MASK, num_ccs_blks - 1)); 655 656 ccs_copy_size = REG_FIELD_PREP(CCS_SIZE_MASK, num_ccs_blks - 1); 657 mocs = FIELD_PREP(XY_CTRL_SURF_MOCS_MASK, gt->mocs.uc_index); 658 } 659 660 *cs++ = XY_CTRL_SURF_COPY_BLT | 661 (src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT | 662 (dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT | 663 ccs_copy_size; 664 *cs++ = lower_32_bits(src_ofs); 665 *cs++ = upper_32_bits(src_ofs) | mocs; 666 *cs++ = lower_32_bits(dst_ofs); 667 *cs++ = upper_32_bits(dst_ofs) | mocs; 668 669 bb->len = cs - bb->cs; 670 } 671 672 #define EMIT_COPY_DW 10 673 static void emit_copy(struct xe_gt *gt, struct xe_bb *bb, 674 u64 src_ofs, u64 dst_ofs, unsigned int size, 675 unsigned int pitch) 676 { 677 struct xe_device *xe = gt_to_xe(gt); 678 u32 mocs = 0; 679 u32 tile_y = 0; 680 681 xe_gt_assert(gt, !(pitch & 3)); 682 xe_gt_assert(gt, size / pitch <= S16_MAX); 683 xe_gt_assert(gt, pitch / 4 <= S16_MAX); 684 xe_gt_assert(gt, pitch <= U16_MAX); 685 686 if (GRAPHICS_VER(xe) >= 20) 687 mocs = FIELD_PREP(XE2_XY_FAST_COPY_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index); 688 689 if (GRAPHICS_VERx100(xe) >= 1250) 690 tile_y = XY_FAST_COPY_BLT_D1_SRC_TILE4 | XY_FAST_COPY_BLT_D1_DST_TILE4; 691 692 bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2); 693 bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch | tile_y | mocs; 694 bb->cs[bb->len++] = 0; 695 bb->cs[bb->len++] = (size / pitch) << 16 | pitch / 4; 696 bb->cs[bb->len++] = lower_32_bits(dst_ofs); 697 bb->cs[bb->len++] = upper_32_bits(dst_ofs); 698 bb->cs[bb->len++] = 0; 699 bb->cs[bb->len++] = pitch | mocs; 700 bb->cs[bb->len++] = lower_32_bits(src_ofs); 701 bb->cs[bb->len++] = upper_32_bits(src_ofs); 702 } 703 704 static u64 xe_migrate_batch_base(struct xe_migrate *m, bool usm) 705 { 706 return usm ? m->usm_batch_base_ofs : m->batch_base_ofs; 707 } 708 709 static u32 xe_migrate_ccs_copy(struct xe_migrate *m, 710 struct xe_bb *bb, 711 u64 src_ofs, bool src_is_indirect, 712 u64 dst_ofs, bool dst_is_indirect, u32 dst_size, 713 u64 ccs_ofs, bool copy_ccs) 714 { 715 struct xe_gt *gt = m->tile->primary_gt; 716 u32 flush_flags = 0; 717 718 if (!copy_ccs && dst_is_indirect) { 719 /* 720 * If the src is already in vram, then it should already 721 * have been cleared by us, or has been populated by the 722 * user. Make sure we copy the CCS aux state as-is. 723 * 724 * Otherwise if the bo doesn't have any CCS metadata attached, 725 * we still need to clear it for security reasons. 726 */ 727 u64 ccs_src_ofs = src_is_indirect ? src_ofs : m->cleared_mem_ofs; 728 729 emit_copy_ccs(gt, bb, 730 dst_ofs, true, 731 ccs_src_ofs, src_is_indirect, dst_size); 732 733 flush_flags = MI_FLUSH_DW_CCS; 734 } else if (copy_ccs) { 735 if (!src_is_indirect) 736 src_ofs = ccs_ofs; 737 else if (!dst_is_indirect) 738 dst_ofs = ccs_ofs; 739 740 xe_gt_assert(gt, src_is_indirect || dst_is_indirect); 741 742 emit_copy_ccs(gt, bb, dst_ofs, dst_is_indirect, src_ofs, 743 src_is_indirect, dst_size); 744 if (dst_is_indirect) 745 flush_flags = MI_FLUSH_DW_CCS; 746 } 747 748 return flush_flags; 749 } 750 751 /** 752 * xe_migrate_copy() - Copy content of TTM resources. 753 * @m: The migration context. 754 * @src_bo: The buffer object @src is currently bound to. 755 * @dst_bo: If copying between resources created for the same bo, set this to 756 * the same value as @src_bo. If copying between buffer objects, set it to 757 * the buffer object @dst is currently bound to. 758 * @src: The source TTM resource. 759 * @dst: The dst TTM resource. 760 * @copy_only_ccs: If true copy only CCS metadata 761 * 762 * Copies the contents of @src to @dst: On flat CCS devices, 763 * the CCS metadata is copied as well if needed, or if not present, 764 * the CCS metadata of @dst is cleared for security reasons. 765 * 766 * Return: Pointer to a dma_fence representing the last copy batch, or 767 * an error pointer on failure. If there is a failure, any copy operation 768 * started by the function call has been synced. 769 */ 770 struct dma_fence *xe_migrate_copy(struct xe_migrate *m, 771 struct xe_bo *src_bo, 772 struct xe_bo *dst_bo, 773 struct ttm_resource *src, 774 struct ttm_resource *dst, 775 bool copy_only_ccs) 776 { 777 struct xe_gt *gt = m->tile->primary_gt; 778 struct xe_device *xe = gt_to_xe(gt); 779 struct dma_fence *fence = NULL; 780 u64 size = xe_bo_size(src_bo); 781 struct xe_res_cursor src_it, dst_it, ccs_it; 782 u64 src_L0_ofs, dst_L0_ofs; 783 u32 src_L0_pt, dst_L0_pt; 784 u64 src_L0, dst_L0; 785 int pass = 0; 786 int err; 787 bool src_is_pltt = src->mem_type == XE_PL_TT; 788 bool dst_is_pltt = dst->mem_type == XE_PL_TT; 789 bool src_is_vram = mem_type_is_vram(src->mem_type); 790 bool dst_is_vram = mem_type_is_vram(dst->mem_type); 791 bool type_device = src_bo->ttm.type == ttm_bo_type_device; 792 bool needs_ccs_emit = type_device && xe_migrate_needs_ccs_emit(xe); 793 bool copy_ccs = xe_device_has_flat_ccs(xe) && 794 xe_bo_needs_ccs_pages(src_bo) && xe_bo_needs_ccs_pages(dst_bo); 795 bool copy_system_ccs = copy_ccs && (!src_is_vram || !dst_is_vram); 796 bool use_comp_pat = type_device && xe_device_has_flat_ccs(xe) && 797 GRAPHICS_VER(xe) >= 20 && src_is_vram && !dst_is_vram; 798 799 /* Copying CCS between two different BOs is not supported yet. */ 800 if (XE_WARN_ON(copy_ccs && src_bo != dst_bo)) 801 return ERR_PTR(-EINVAL); 802 803 if (src_bo != dst_bo && XE_WARN_ON(xe_bo_size(src_bo) != xe_bo_size(dst_bo))) 804 return ERR_PTR(-EINVAL); 805 806 if (!src_is_vram) 807 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it); 808 else 809 xe_res_first(src, 0, size, &src_it); 810 if (!dst_is_vram) 811 xe_res_first_sg(xe_bo_sg(dst_bo), 0, size, &dst_it); 812 else 813 xe_res_first(dst, 0, size, &dst_it); 814 815 if (copy_system_ccs) 816 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo), 817 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), 818 &ccs_it); 819 820 while (size) { 821 u32 batch_size = 2; /* arb_clear() + MI_BATCH_BUFFER_END */ 822 struct xe_sched_job *job; 823 struct xe_bb *bb; 824 u32 flush_flags = 0; 825 u32 update_idx; 826 u64 ccs_ofs, ccs_size; 827 u32 ccs_pt; 828 u32 pte_flags; 829 830 bool usm = xe->info.has_usm; 831 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 832 833 src_L0 = xe_migrate_res_sizes(m, &src_it); 834 dst_L0 = xe_migrate_res_sizes(m, &dst_it); 835 836 drm_dbg(&xe->drm, "Pass %u, sizes: %llu & %llu\n", 837 pass++, src_L0, dst_L0); 838 839 src_L0 = min(src_L0, dst_L0); 840 841 pte_flags = src_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 842 pte_flags |= use_comp_pat ? PTE_UPDATE_FLAG_IS_COMP_PTE : 0; 843 batch_size += pte_update_size(m, pte_flags, src, &src_it, &src_L0, 844 &src_L0_ofs, &src_L0_pt, 0, 0, 845 avail_pts); 846 847 pte_flags = dst_is_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 848 batch_size += pte_update_size(m, pte_flags, dst, &dst_it, &src_L0, 849 &dst_L0_ofs, &dst_L0_pt, 0, 850 avail_pts, avail_pts); 851 852 if (copy_system_ccs) { 853 xe_assert(xe, type_device); 854 ccs_size = xe_device_ccs_bytes(xe, src_L0); 855 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, 856 &ccs_ofs, &ccs_pt, 0, 857 2 * avail_pts, 858 avail_pts); 859 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 860 } 861 862 /* Add copy commands size here */ 863 batch_size += ((copy_only_ccs) ? 0 : EMIT_COPY_DW) + 864 ((needs_ccs_emit ? EMIT_COPY_CCS_DW : 0)); 865 866 bb = xe_bb_new(gt, batch_size, usm); 867 if (IS_ERR(bb)) { 868 err = PTR_ERR(bb); 869 goto err_sync; 870 } 871 872 if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) 873 xe_res_next(&src_it, src_L0); 874 else 875 emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, 876 &src_it, src_L0, src); 877 878 if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) 879 xe_res_next(&dst_it, src_L0); 880 else 881 emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs, 882 &dst_it, src_L0, dst); 883 884 if (copy_system_ccs) 885 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); 886 887 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 888 update_idx = bb->len; 889 890 if (!copy_only_ccs) 891 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, src_L0, XE_PAGE_SIZE); 892 893 if (needs_ccs_emit) 894 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, 895 IS_DGFX(xe) ? src_is_vram : src_is_pltt, 896 dst_L0_ofs, 897 IS_DGFX(xe) ? dst_is_vram : dst_is_pltt, 898 src_L0, ccs_ofs, copy_ccs); 899 900 job = xe_bb_create_migration_job(m->q, bb, 901 xe_migrate_batch_base(m, usm), 902 update_idx); 903 if (IS_ERR(job)) { 904 err = PTR_ERR(job); 905 goto err; 906 } 907 908 xe_sched_job_add_migrate_flush(job, flush_flags); 909 if (!fence) { 910 err = xe_sched_job_add_deps(job, src_bo->ttm.base.resv, 911 DMA_RESV_USAGE_BOOKKEEP); 912 if (!err && src_bo != dst_bo) 913 err = xe_sched_job_add_deps(job, dst_bo->ttm.base.resv, 914 DMA_RESV_USAGE_BOOKKEEP); 915 if (err) 916 goto err_job; 917 } 918 919 mutex_lock(&m->job_mutex); 920 xe_sched_job_arm(job); 921 dma_fence_put(fence); 922 fence = dma_fence_get(&job->drm.s_fence->finished); 923 xe_sched_job_push(job); 924 925 dma_fence_put(m->fence); 926 m->fence = dma_fence_get(fence); 927 928 mutex_unlock(&m->job_mutex); 929 930 xe_bb_free(bb, fence); 931 size -= src_L0; 932 continue; 933 934 err_job: 935 xe_sched_job_put(job); 936 err: 937 xe_bb_free(bb, NULL); 938 939 err_sync: 940 /* Sync partial copy if any. FIXME: under job_mutex? */ 941 if (fence) { 942 dma_fence_wait(fence, false); 943 dma_fence_put(fence); 944 } 945 946 return ERR_PTR(err); 947 } 948 949 return fence; 950 } 951 952 /** 953 * xe_get_migrate_lrc() - Get the LRC from migrate context. 954 * @migrate: Migrate context. 955 * 956 * Return: Pointer to LRC on success, error on failure 957 */ 958 struct xe_lrc *xe_migrate_lrc(struct xe_migrate *migrate) 959 { 960 return migrate->q->lrc[0]; 961 } 962 963 static int emit_flush_invalidate(struct xe_migrate *m, u32 *dw, int i, 964 u32 flags) 965 { 966 dw[i++] = MI_FLUSH_DW | MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW | 967 MI_FLUSH_IMM_DW | flags; 968 dw[i++] = lower_32_bits(xe_lrc_start_seqno_ggtt_addr(xe_migrate_lrc(m))) | 969 MI_FLUSH_DW_USE_GTT; 970 dw[i++] = upper_32_bits(xe_lrc_start_seqno_ggtt_addr(xe_migrate_lrc(m))); 971 dw[i++] = MI_NOOP; 972 dw[i++] = MI_NOOP; 973 974 return i; 975 } 976 977 /** 978 * xe_migrate_ccs_rw_copy() - Copy content of TTM resources. 979 * @m: The migration context. 980 * @src_bo: The buffer object @src is currently bound to. 981 * @read_write : Creates BB commands for CCS read/write. 982 * 983 * Creates batch buffer instructions to copy CCS metadata from CCS pool to 984 * memory and vice versa. 985 * 986 * This function should only be called for IGPU. 987 * 988 * Return: 0 if successful, negative error code on failure. 989 */ 990 int xe_migrate_ccs_rw_copy(struct xe_migrate *m, 991 struct xe_bo *src_bo, 992 enum xe_sriov_vf_ccs_rw_ctxs read_write) 993 994 { 995 bool src_is_pltt = read_write == XE_SRIOV_VF_CCS_READ_CTX; 996 bool dst_is_pltt = read_write == XE_SRIOV_VF_CCS_WRITE_CTX; 997 struct ttm_resource *src = src_bo->ttm.resource; 998 struct xe_gt *gt = m->tile->primary_gt; 999 u32 batch_size, batch_size_allocated; 1000 struct xe_device *xe = gt_to_xe(gt); 1001 struct xe_res_cursor src_it, ccs_it; 1002 u64 size = xe_bo_size(src_bo); 1003 struct xe_bb *bb = NULL; 1004 u64 src_L0, src_L0_ofs; 1005 u32 src_L0_pt; 1006 int err; 1007 1008 xe_res_first_sg(xe_bo_sg(src_bo), 0, size, &src_it); 1009 1010 xe_res_first_sg(xe_bo_sg(src_bo), xe_bo_ccs_pages_start(src_bo), 1011 PAGE_ALIGN(xe_device_ccs_bytes(xe, size)), 1012 &ccs_it); 1013 1014 /* Calculate Batch buffer size */ 1015 batch_size = 0; 1016 while (size) { 1017 batch_size += 10; /* Flush + ggtt addr + 2 NOP */ 1018 u64 ccs_ofs, ccs_size; 1019 u32 ccs_pt; 1020 1021 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1022 1023 src_L0 = min_t(u64, max_mem_transfer_per_pass(xe), size); 1024 1025 batch_size += pte_update_size(m, false, src, &src_it, &src_L0, 1026 &src_L0_ofs, &src_L0_pt, 0, 0, 1027 avail_pts); 1028 1029 ccs_size = xe_device_ccs_bytes(xe, src_L0); 1030 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, 1031 &ccs_pt, 0, avail_pts, avail_pts); 1032 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 1033 1034 /* Add copy commands size here */ 1035 batch_size += EMIT_COPY_CCS_DW; 1036 1037 size -= src_L0; 1038 } 1039 1040 bb = xe_bb_ccs_new(gt, batch_size, read_write); 1041 if (IS_ERR(bb)) { 1042 drm_err(&xe->drm, "BB allocation failed.\n"); 1043 err = PTR_ERR(bb); 1044 goto err_ret; 1045 } 1046 1047 batch_size_allocated = batch_size; 1048 size = xe_bo_size(src_bo); 1049 batch_size = 0; 1050 1051 /* 1052 * Emit PTE and copy commands here. 1053 * The CCS copy command can only support limited size. If the size to be 1054 * copied is more than the limit, divide copy into chunks. So, calculate 1055 * sizes here again before copy command is emitted. 1056 */ 1057 while (size) { 1058 batch_size += 10; /* Flush + ggtt addr + 2 NOP */ 1059 u32 flush_flags = 0; 1060 u64 ccs_ofs, ccs_size; 1061 u32 ccs_pt; 1062 1063 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1064 1065 src_L0 = xe_migrate_res_sizes(m, &src_it); 1066 1067 batch_size += pte_update_size(m, false, src, &src_it, &src_L0, 1068 &src_L0_ofs, &src_L0_pt, 0, 0, 1069 avail_pts); 1070 1071 ccs_size = xe_device_ccs_bytes(xe, src_L0); 1072 batch_size += pte_update_size(m, 0, NULL, &ccs_it, &ccs_size, &ccs_ofs, 1073 &ccs_pt, 0, avail_pts, avail_pts); 1074 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); 1075 batch_size += EMIT_COPY_CCS_DW; 1076 1077 emit_pte(m, bb, src_L0_pt, false, true, &src_it, src_L0, src); 1078 1079 emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src); 1080 1081 bb->len = emit_flush_invalidate(m, bb->cs, bb->len, flush_flags); 1082 flush_flags = xe_migrate_ccs_copy(m, bb, src_L0_ofs, src_is_pltt, 1083 src_L0_ofs, dst_is_pltt, 1084 src_L0, ccs_ofs, true); 1085 bb->len = emit_flush_invalidate(m, bb->cs, bb->len, flush_flags); 1086 1087 size -= src_L0; 1088 } 1089 1090 xe_assert(xe, (batch_size_allocated == bb->len)); 1091 src_bo->bb_ccs[read_write] = bb; 1092 1093 return 0; 1094 1095 err_ret: 1096 return err; 1097 } 1098 1099 /** 1100 * xe_get_migrate_exec_queue() - Get the execution queue from migrate context. 1101 * @migrate: Migrate context. 1102 * 1103 * Return: Pointer to execution queue on success, error on failure 1104 */ 1105 struct xe_exec_queue *xe_migrate_exec_queue(struct xe_migrate *migrate) 1106 { 1107 return migrate->q; 1108 } 1109 1110 static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 1111 u32 size, u32 pitch) 1112 { 1113 struct xe_device *xe = gt_to_xe(gt); 1114 u32 *cs = bb->cs + bb->len; 1115 u32 len = PVC_MEM_SET_CMD_LEN_DW; 1116 1117 *cs++ = PVC_MEM_SET_CMD | PVC_MEM_SET_MATRIX | (len - 2); 1118 *cs++ = pitch - 1; 1119 *cs++ = (size / pitch) - 1; 1120 *cs++ = pitch - 1; 1121 *cs++ = lower_32_bits(src_ofs); 1122 *cs++ = upper_32_bits(src_ofs); 1123 if (GRAPHICS_VERx100(xe) >= 2000) 1124 *cs++ = FIELD_PREP(XE2_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index); 1125 else 1126 *cs++ = FIELD_PREP(PVC_MEM_SET_MOCS_INDEX_MASK, gt->mocs.uc_index); 1127 1128 xe_gt_assert(gt, cs - bb->cs == len + bb->len); 1129 1130 bb->len += len; 1131 } 1132 1133 static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb, 1134 u64 src_ofs, u32 size, u32 pitch, bool is_vram) 1135 { 1136 struct xe_device *xe = gt_to_xe(gt); 1137 u32 *cs = bb->cs + bb->len; 1138 u32 len = XY_FAST_COLOR_BLT_DW; 1139 1140 if (GRAPHICS_VERx100(xe) < 1250) 1141 len = 11; 1142 1143 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 | 1144 (len - 2); 1145 if (GRAPHICS_VERx100(xe) >= 2000) 1146 *cs++ = FIELD_PREP(XE2_XY_FAST_COLOR_BLT_MOCS_INDEX_MASK, gt->mocs.uc_index) | 1147 (pitch - 1); 1148 else 1149 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, gt->mocs.uc_index) | 1150 (pitch - 1); 1151 *cs++ = 0; 1152 *cs++ = (size / pitch) << 16 | pitch / 4; 1153 *cs++ = lower_32_bits(src_ofs); 1154 *cs++ = upper_32_bits(src_ofs); 1155 *cs++ = (is_vram ? 0x0 : 0x1) << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT; 1156 *cs++ = 0; 1157 *cs++ = 0; 1158 *cs++ = 0; 1159 *cs++ = 0; 1160 1161 if (len > 11) { 1162 *cs++ = 0; 1163 *cs++ = 0; 1164 *cs++ = 0; 1165 *cs++ = 0; 1166 *cs++ = 0; 1167 } 1168 1169 xe_gt_assert(gt, cs - bb->cs == len + bb->len); 1170 1171 bb->len += len; 1172 } 1173 1174 static bool has_service_copy_support(struct xe_gt *gt) 1175 { 1176 /* 1177 * What we care about is whether the architecture was designed with 1178 * service copy functionality (specifically the new MEM_SET / MEM_COPY 1179 * instructions) so check the architectural engine list rather than the 1180 * actual list since these instructions are usable on BCS0 even if 1181 * all of the actual service copy engines (BCS1-BCS8) have been fused 1182 * off. 1183 */ 1184 return gt->info.engine_mask & GENMASK(XE_HW_ENGINE_BCS8, 1185 XE_HW_ENGINE_BCS1); 1186 } 1187 1188 static u32 emit_clear_cmd_len(struct xe_gt *gt) 1189 { 1190 if (has_service_copy_support(gt)) 1191 return PVC_MEM_SET_CMD_LEN_DW; 1192 else 1193 return XY_FAST_COLOR_BLT_DW; 1194 } 1195 1196 static void emit_clear(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs, 1197 u32 size, u32 pitch, bool is_vram) 1198 { 1199 if (has_service_copy_support(gt)) 1200 emit_clear_link_copy(gt, bb, src_ofs, size, pitch); 1201 else 1202 emit_clear_main_copy(gt, bb, src_ofs, size, pitch, 1203 is_vram); 1204 } 1205 1206 /** 1207 * xe_migrate_clear() - Copy content of TTM resources. 1208 * @m: The migration context. 1209 * @bo: The buffer object @dst is currently bound to. 1210 * @dst: The dst TTM resource to be cleared. 1211 * @clear_flags: flags to specify which data to clear: CCS, BO, or both. 1212 * 1213 * Clear the contents of @dst to zero when XE_MIGRATE_CLEAR_FLAG_BO_DATA is set. 1214 * On flat CCS devices, the CCS metadata is cleared to zero with XE_MIGRATE_CLEAR_FLAG_CCS_DATA. 1215 * Set XE_MIGRATE_CLEAR_FLAG_FULL to clear bo as well as CCS metadata. 1216 * TODO: Eliminate the @bo argument. 1217 * 1218 * Return: Pointer to a dma_fence representing the last clear batch, or 1219 * an error pointer on failure. If there is a failure, any clear operation 1220 * started by the function call has been synced. 1221 */ 1222 struct dma_fence *xe_migrate_clear(struct xe_migrate *m, 1223 struct xe_bo *bo, 1224 struct ttm_resource *dst, 1225 u32 clear_flags) 1226 { 1227 bool clear_vram = mem_type_is_vram(dst->mem_type); 1228 bool clear_bo_data = XE_MIGRATE_CLEAR_FLAG_BO_DATA & clear_flags; 1229 bool clear_ccs = XE_MIGRATE_CLEAR_FLAG_CCS_DATA & clear_flags; 1230 struct xe_gt *gt = m->tile->primary_gt; 1231 struct xe_device *xe = gt_to_xe(gt); 1232 bool clear_only_system_ccs = false; 1233 struct dma_fence *fence = NULL; 1234 u64 size = xe_bo_size(bo); 1235 struct xe_res_cursor src_it; 1236 struct ttm_resource *src = dst; 1237 int err; 1238 1239 if (WARN_ON(!clear_bo_data && !clear_ccs)) 1240 return NULL; 1241 1242 if (!clear_bo_data && clear_ccs && !IS_DGFX(xe)) 1243 clear_only_system_ccs = true; 1244 1245 if (!clear_vram) 1246 xe_res_first_sg(xe_bo_sg(bo), 0, xe_bo_size(bo), &src_it); 1247 else 1248 xe_res_first(src, 0, xe_bo_size(bo), &src_it); 1249 1250 while (size) { 1251 u64 clear_L0_ofs; 1252 u32 clear_L0_pt; 1253 u32 flush_flags = 0; 1254 u64 clear_L0; 1255 struct xe_sched_job *job; 1256 struct xe_bb *bb; 1257 u32 batch_size, update_idx; 1258 u32 pte_flags; 1259 1260 bool usm = xe->info.has_usm; 1261 u32 avail_pts = max_mem_transfer_per_pass(xe) / LEVEL0_PAGE_TABLE_ENCODE_SIZE; 1262 1263 clear_L0 = xe_migrate_res_sizes(m, &src_it); 1264 1265 /* Calculate final sizes and batch size.. */ 1266 pte_flags = clear_vram ? PTE_UPDATE_FLAG_IS_VRAM : 0; 1267 batch_size = 2 + 1268 pte_update_size(m, pte_flags, src, &src_it, 1269 &clear_L0, &clear_L0_ofs, &clear_L0_pt, 1270 clear_bo_data ? emit_clear_cmd_len(gt) : 0, 0, 1271 avail_pts); 1272 1273 if (xe_migrate_needs_ccs_emit(xe)) 1274 batch_size += EMIT_COPY_CCS_DW; 1275 1276 /* Clear commands */ 1277 1278 if (WARN_ON_ONCE(!clear_L0)) 1279 break; 1280 1281 bb = xe_bb_new(gt, batch_size, usm); 1282 if (IS_ERR(bb)) { 1283 err = PTR_ERR(bb); 1284 goto err_sync; 1285 } 1286 1287 size -= clear_L0; 1288 /* Preemption is enabled again by the ring ops. */ 1289 if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it)) 1290 xe_res_next(&src_it, clear_L0); 1291 else 1292 emit_pte(m, bb, clear_L0_pt, clear_vram, clear_only_system_ccs, 1293 &src_it, clear_L0, dst); 1294 1295 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1296 update_idx = bb->len; 1297 1298 if (clear_bo_data) 1299 emit_clear(gt, bb, clear_L0_ofs, clear_L0, XE_PAGE_SIZE, clear_vram); 1300 1301 if (xe_migrate_needs_ccs_emit(xe)) { 1302 emit_copy_ccs(gt, bb, clear_L0_ofs, true, 1303 m->cleared_mem_ofs, false, clear_L0); 1304 flush_flags = MI_FLUSH_DW_CCS; 1305 } 1306 1307 job = xe_bb_create_migration_job(m->q, bb, 1308 xe_migrate_batch_base(m, usm), 1309 update_idx); 1310 if (IS_ERR(job)) { 1311 err = PTR_ERR(job); 1312 goto err; 1313 } 1314 1315 xe_sched_job_add_migrate_flush(job, flush_flags); 1316 if (!fence) { 1317 /* 1318 * There can't be anything userspace related at this 1319 * point, so we just need to respect any potential move 1320 * fences, which are always tracked as 1321 * DMA_RESV_USAGE_KERNEL. 1322 */ 1323 err = xe_sched_job_add_deps(job, bo->ttm.base.resv, 1324 DMA_RESV_USAGE_KERNEL); 1325 if (err) 1326 goto err_job; 1327 } 1328 1329 mutex_lock(&m->job_mutex); 1330 xe_sched_job_arm(job); 1331 dma_fence_put(fence); 1332 fence = dma_fence_get(&job->drm.s_fence->finished); 1333 xe_sched_job_push(job); 1334 1335 dma_fence_put(m->fence); 1336 m->fence = dma_fence_get(fence); 1337 1338 mutex_unlock(&m->job_mutex); 1339 1340 xe_bb_free(bb, fence); 1341 continue; 1342 1343 err_job: 1344 xe_sched_job_put(job); 1345 err: 1346 xe_bb_free(bb, NULL); 1347 err_sync: 1348 /* Sync partial copies if any. FIXME: job_mutex? */ 1349 if (fence) { 1350 dma_fence_wait(fence, false); 1351 dma_fence_put(fence); 1352 } 1353 1354 return ERR_PTR(err); 1355 } 1356 1357 if (clear_ccs) 1358 bo->ccs_cleared = true; 1359 1360 return fence; 1361 } 1362 1363 static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs, 1364 const struct xe_vm_pgtable_update_op *pt_op, 1365 const struct xe_vm_pgtable_update *update, 1366 struct xe_migrate_pt_update *pt_update) 1367 { 1368 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1369 u32 chunk; 1370 u32 ofs = update->ofs, size = update->qwords; 1371 1372 /* 1373 * If we have 512 entries (max), we would populate it ourselves, 1374 * and update the PDE above it to the new pointer. 1375 * The only time this can only happen if we have to update the top 1376 * PDE. This requires a BO that is almost vm->size big. 1377 * 1378 * This shouldn't be possible in practice.. might change when 16K 1379 * pages are used. Hence the assert. 1380 */ 1381 xe_tile_assert(tile, update->qwords < MAX_NUM_PTE); 1382 if (!ppgtt_ofs) 1383 ppgtt_ofs = xe_migrate_vram_ofs(tile_to_xe(tile), 1384 xe_bo_addr(update->pt_bo, 0, 1385 XE_PAGE_SIZE), false); 1386 1387 do { 1388 u64 addr = ppgtt_ofs + ofs * 8; 1389 1390 chunk = min(size, MAX_PTE_PER_SDI); 1391 1392 /* Ensure populatefn can do memset64 by aligning bb->cs */ 1393 if (!(bb->len & 1)) 1394 bb->cs[bb->len++] = MI_NOOP; 1395 1396 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 1397 bb->cs[bb->len++] = lower_32_bits(addr); 1398 bb->cs[bb->len++] = upper_32_bits(addr); 1399 if (pt_op->bind) 1400 ops->populate(pt_update, tile, NULL, bb->cs + bb->len, 1401 ofs, chunk, update); 1402 else 1403 ops->clear(pt_update, tile, NULL, bb->cs + bb->len, 1404 ofs, chunk, update); 1405 1406 bb->len += chunk * 2; 1407 ofs += chunk; 1408 size -= chunk; 1409 } while (size); 1410 } 1411 1412 struct xe_vm *xe_migrate_get_vm(struct xe_migrate *m) 1413 { 1414 return xe_vm_get(m->q->vm); 1415 } 1416 1417 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 1418 struct migrate_test_params { 1419 struct xe_test_priv base; 1420 bool force_gpu; 1421 }; 1422 1423 #define to_migrate_test_params(_priv) \ 1424 container_of(_priv, struct migrate_test_params, base) 1425 #endif 1426 1427 static struct dma_fence * 1428 xe_migrate_update_pgtables_cpu(struct xe_migrate *m, 1429 struct xe_migrate_pt_update *pt_update) 1430 { 1431 XE_TEST_DECLARE(struct migrate_test_params *test = 1432 to_migrate_test_params 1433 (xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE));) 1434 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1435 struct xe_vm *vm = pt_update->vops->vm; 1436 struct xe_vm_pgtable_update_ops *pt_update_ops = 1437 &pt_update->vops->pt_update_ops[pt_update->tile_id]; 1438 int err; 1439 u32 i, j; 1440 1441 if (XE_TEST_ONLY(test && test->force_gpu)) 1442 return ERR_PTR(-ETIME); 1443 1444 if (ops->pre_commit) { 1445 pt_update->job = NULL; 1446 err = ops->pre_commit(pt_update); 1447 if (err) 1448 return ERR_PTR(err); 1449 } 1450 1451 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1452 const struct xe_vm_pgtable_update_op *pt_op = 1453 &pt_update_ops->ops[i]; 1454 1455 for (j = 0; j < pt_op->num_entries; j++) { 1456 const struct xe_vm_pgtable_update *update = 1457 &pt_op->entries[j]; 1458 1459 if (pt_op->bind) 1460 ops->populate(pt_update, m->tile, 1461 &update->pt_bo->vmap, NULL, 1462 update->ofs, update->qwords, 1463 update); 1464 else 1465 ops->clear(pt_update, m->tile, 1466 &update->pt_bo->vmap, NULL, 1467 update->ofs, update->qwords, update); 1468 } 1469 } 1470 1471 trace_xe_vm_cpu_bind(vm); 1472 xe_device_wmb(vm->xe); 1473 1474 return dma_fence_get_stub(); 1475 } 1476 1477 static struct dma_fence * 1478 __xe_migrate_update_pgtables(struct xe_migrate *m, 1479 struct xe_migrate_pt_update *pt_update, 1480 struct xe_vm_pgtable_update_ops *pt_update_ops) 1481 { 1482 const struct xe_migrate_pt_update_ops *ops = pt_update->ops; 1483 struct xe_tile *tile = m->tile; 1484 struct xe_gt *gt = tile->primary_gt; 1485 struct xe_device *xe = tile_to_xe(tile); 1486 struct xe_sched_job *job; 1487 struct dma_fence *fence; 1488 struct drm_suballoc *sa_bo = NULL; 1489 struct xe_bb *bb; 1490 u32 i, j, batch_size = 0, ppgtt_ofs, update_idx, page_ofs = 0; 1491 u32 num_updates = 0, current_update = 0; 1492 u64 addr; 1493 int err = 0; 1494 bool is_migrate = pt_update_ops->q == m->q; 1495 bool usm = is_migrate && xe->info.has_usm; 1496 1497 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1498 struct xe_vm_pgtable_update_op *pt_op = &pt_update_ops->ops[i]; 1499 struct xe_vm_pgtable_update *updates = pt_op->entries; 1500 1501 num_updates += pt_op->num_entries; 1502 for (j = 0; j < pt_op->num_entries; ++j) { 1503 u32 num_cmds = DIV_ROUND_UP(updates[j].qwords, 1504 MAX_PTE_PER_SDI); 1505 1506 /* align noop + MI_STORE_DATA_IMM cmd prefix */ 1507 batch_size += 4 * num_cmds + updates[j].qwords * 2; 1508 } 1509 } 1510 1511 /* fixed + PTE entries */ 1512 if (IS_DGFX(xe)) 1513 batch_size += 2; 1514 else 1515 batch_size += 6 * (num_updates / MAX_PTE_PER_SDI + 1) + 1516 num_updates * 2; 1517 1518 bb = xe_bb_new(gt, batch_size, usm); 1519 if (IS_ERR(bb)) 1520 return ERR_CAST(bb); 1521 1522 /* For sysmem PTE's, need to map them in our hole.. */ 1523 if (!IS_DGFX(xe)) { 1524 u16 pat_index = xe->pat.idx[XE_CACHE_WB]; 1525 u32 ptes, ofs; 1526 1527 ppgtt_ofs = NUM_KERNEL_PDE - 1; 1528 if (!is_migrate) { 1529 u32 num_units = DIV_ROUND_UP(num_updates, 1530 NUM_VMUSA_WRITES_PER_UNIT); 1531 1532 if (num_units > m->vm_update_sa.size) { 1533 err = -ENOBUFS; 1534 goto err_bb; 1535 } 1536 sa_bo = drm_suballoc_new(&m->vm_update_sa, num_units, 1537 GFP_KERNEL, true, 0); 1538 if (IS_ERR(sa_bo)) { 1539 err = PTR_ERR(sa_bo); 1540 goto err_bb; 1541 } 1542 1543 ppgtt_ofs = NUM_KERNEL_PDE + 1544 (drm_suballoc_soffset(sa_bo) / 1545 NUM_VMUSA_UNIT_PER_PAGE); 1546 page_ofs = (drm_suballoc_soffset(sa_bo) % 1547 NUM_VMUSA_UNIT_PER_PAGE) * 1548 VM_SA_UPDATE_UNIT_SIZE; 1549 } 1550 1551 /* Map our PT's to gtt */ 1552 i = 0; 1553 j = 0; 1554 ptes = num_updates; 1555 ofs = ppgtt_ofs * XE_PAGE_SIZE + page_ofs; 1556 while (ptes) { 1557 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 1558 u32 idx = 0; 1559 1560 bb->cs[bb->len++] = MI_STORE_DATA_IMM | 1561 MI_SDI_NUM_QW(chunk); 1562 bb->cs[bb->len++] = ofs; 1563 bb->cs[bb->len++] = 0; /* upper_32_bits */ 1564 1565 for (; i < pt_update_ops->num_ops; ++i) { 1566 struct xe_vm_pgtable_update_op *pt_op = 1567 &pt_update_ops->ops[i]; 1568 struct xe_vm_pgtable_update *updates = pt_op->entries; 1569 1570 for (; j < pt_op->num_entries; ++j, ++current_update, ++idx) { 1571 struct xe_vm *vm = pt_update->vops->vm; 1572 struct xe_bo *pt_bo = updates[j].pt_bo; 1573 1574 if (idx == chunk) 1575 goto next_cmd; 1576 1577 xe_tile_assert(tile, xe_bo_size(pt_bo) == SZ_4K); 1578 1579 /* Map a PT at most once */ 1580 if (pt_bo->update_index < 0) 1581 pt_bo->update_index = current_update; 1582 1583 addr = vm->pt_ops->pte_encode_bo(pt_bo, 0, 1584 pat_index, 0); 1585 bb->cs[bb->len++] = lower_32_bits(addr); 1586 bb->cs[bb->len++] = upper_32_bits(addr); 1587 } 1588 1589 j = 0; 1590 } 1591 1592 next_cmd: 1593 ptes -= chunk; 1594 ofs += chunk * sizeof(u64); 1595 } 1596 1597 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1598 update_idx = bb->len; 1599 1600 addr = xe_migrate_vm_addr(ppgtt_ofs, 0) + 1601 (page_ofs / sizeof(u64)) * XE_PAGE_SIZE; 1602 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1603 struct xe_vm_pgtable_update_op *pt_op = 1604 &pt_update_ops->ops[i]; 1605 struct xe_vm_pgtable_update *updates = pt_op->entries; 1606 1607 for (j = 0; j < pt_op->num_entries; ++j) { 1608 struct xe_bo *pt_bo = updates[j].pt_bo; 1609 1610 write_pgtable(tile, bb, addr + 1611 pt_bo->update_index * XE_PAGE_SIZE, 1612 pt_op, &updates[j], pt_update); 1613 } 1614 } 1615 } else { 1616 /* phys pages, no preamble required */ 1617 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1618 update_idx = bb->len; 1619 1620 for (i = 0; i < pt_update_ops->num_ops; ++i) { 1621 struct xe_vm_pgtable_update_op *pt_op = 1622 &pt_update_ops->ops[i]; 1623 struct xe_vm_pgtable_update *updates = pt_op->entries; 1624 1625 for (j = 0; j < pt_op->num_entries; ++j) 1626 write_pgtable(tile, bb, 0, pt_op, &updates[j], 1627 pt_update); 1628 } 1629 } 1630 1631 job = xe_bb_create_migration_job(pt_update_ops->q, bb, 1632 xe_migrate_batch_base(m, usm), 1633 update_idx); 1634 if (IS_ERR(job)) { 1635 err = PTR_ERR(job); 1636 goto err_sa; 1637 } 1638 1639 if (ops->pre_commit) { 1640 pt_update->job = job; 1641 err = ops->pre_commit(pt_update); 1642 if (err) 1643 goto err_job; 1644 } 1645 if (is_migrate) 1646 mutex_lock(&m->job_mutex); 1647 1648 xe_sched_job_arm(job); 1649 fence = dma_fence_get(&job->drm.s_fence->finished); 1650 xe_sched_job_push(job); 1651 1652 if (is_migrate) 1653 mutex_unlock(&m->job_mutex); 1654 1655 xe_bb_free(bb, fence); 1656 drm_suballoc_free(sa_bo, fence); 1657 1658 return fence; 1659 1660 err_job: 1661 xe_sched_job_put(job); 1662 err_sa: 1663 drm_suballoc_free(sa_bo, NULL); 1664 err_bb: 1665 xe_bb_free(bb, NULL); 1666 return ERR_PTR(err); 1667 } 1668 1669 /** 1670 * xe_migrate_update_pgtables() - Pipelined page-table update 1671 * @m: The migrate context. 1672 * @pt_update: PT update arguments 1673 * 1674 * Perform a pipelined page-table update. The update descriptors are typically 1675 * built under the same lock critical section as a call to this function. If 1676 * using the default engine for the updates, they will be performed in the 1677 * order they grab the job_mutex. If different engines are used, external 1678 * synchronization is needed for overlapping updates to maintain page-table 1679 * consistency. Note that the meaning of "overlapping" is that the updates 1680 * touch the same page-table, which might be a higher-level page-directory. 1681 * If no pipelining is needed, then updates may be performed by the cpu. 1682 * 1683 * Return: A dma_fence that, when signaled, indicates the update completion. 1684 */ 1685 struct dma_fence * 1686 xe_migrate_update_pgtables(struct xe_migrate *m, 1687 struct xe_migrate_pt_update *pt_update) 1688 1689 { 1690 struct xe_vm_pgtable_update_ops *pt_update_ops = 1691 &pt_update->vops->pt_update_ops[pt_update->tile_id]; 1692 struct dma_fence *fence; 1693 1694 fence = xe_migrate_update_pgtables_cpu(m, pt_update); 1695 1696 /* -ETIME indicates a job is needed, anything else is legit error */ 1697 if (!IS_ERR(fence) || PTR_ERR(fence) != -ETIME) 1698 return fence; 1699 1700 return __xe_migrate_update_pgtables(m, pt_update, pt_update_ops); 1701 } 1702 1703 /** 1704 * xe_migrate_wait() - Complete all operations using the xe_migrate context 1705 * @m: Migrate context to wait for. 1706 * 1707 * Waits until the GPU no longer uses the migrate context's default engine 1708 * or its page-table objects. FIXME: What about separate page-table update 1709 * engines? 1710 */ 1711 void xe_migrate_wait(struct xe_migrate *m) 1712 { 1713 if (m->fence) 1714 dma_fence_wait(m->fence, false); 1715 } 1716 1717 static u32 pte_update_cmd_size(u64 size) 1718 { 1719 u32 num_dword; 1720 u64 entries = DIV_U64_ROUND_UP(size, XE_PAGE_SIZE); 1721 1722 XE_WARN_ON(size > MAX_PREEMPTDISABLE_TRANSFER); 1723 1724 /* 1725 * MI_STORE_DATA_IMM command is used to update page table. Each 1726 * instruction can update maximumly MAX_PTE_PER_SDI pte entries. To 1727 * update n (n <= MAX_PTE_PER_SDI) pte entries, we need: 1728 * 1729 * - 1 dword for the MI_STORE_DATA_IMM command header (opcode etc) 1730 * - 2 dword for the page table's physical location 1731 * - 2*n dword for value of pte to fill (each pte entry is 2 dwords) 1732 */ 1733 num_dword = (1 + 2) * DIV_U64_ROUND_UP(entries, MAX_PTE_PER_SDI); 1734 num_dword += entries * 2; 1735 1736 return num_dword; 1737 } 1738 1739 static void build_pt_update_batch_sram(struct xe_migrate *m, 1740 struct xe_bb *bb, u32 pt_offset, 1741 dma_addr_t *sram_addr, u32 size) 1742 { 1743 u16 pat_index = tile_to_xe(m->tile)->pat.idx[XE_CACHE_WB]; 1744 u32 ptes; 1745 int i = 0; 1746 1747 ptes = DIV_ROUND_UP(size, XE_PAGE_SIZE); 1748 while (ptes) { 1749 u32 chunk = min(MAX_PTE_PER_SDI, ptes); 1750 1751 bb->cs[bb->len++] = MI_STORE_DATA_IMM | MI_SDI_NUM_QW(chunk); 1752 bb->cs[bb->len++] = pt_offset; 1753 bb->cs[bb->len++] = 0; 1754 1755 pt_offset += chunk * 8; 1756 ptes -= chunk; 1757 1758 while (chunk--) { 1759 u64 addr = sram_addr[i++] & PAGE_MASK; 1760 1761 xe_tile_assert(m->tile, addr); 1762 addr = m->q->vm->pt_ops->pte_encode_addr(m->tile->xe, 1763 addr, pat_index, 1764 0, false, 0); 1765 bb->cs[bb->len++] = lower_32_bits(addr); 1766 bb->cs[bb->len++] = upper_32_bits(addr); 1767 } 1768 } 1769 } 1770 1771 enum xe_migrate_copy_dir { 1772 XE_MIGRATE_COPY_TO_VRAM, 1773 XE_MIGRATE_COPY_TO_SRAM, 1774 }; 1775 1776 #define XE_CACHELINE_BYTES 64ull 1777 #define XE_CACHELINE_MASK (XE_CACHELINE_BYTES - 1) 1778 1779 static struct dma_fence *xe_migrate_vram(struct xe_migrate *m, 1780 unsigned long len, 1781 unsigned long sram_offset, 1782 dma_addr_t *sram_addr, u64 vram_addr, 1783 const enum xe_migrate_copy_dir dir) 1784 { 1785 struct xe_gt *gt = m->tile->primary_gt; 1786 struct xe_device *xe = gt_to_xe(gt); 1787 bool use_usm_batch = xe->info.has_usm; 1788 struct dma_fence *fence = NULL; 1789 u32 batch_size = 2; 1790 u64 src_L0_ofs, dst_L0_ofs; 1791 struct xe_sched_job *job; 1792 struct xe_bb *bb; 1793 u32 update_idx, pt_slot = 0; 1794 unsigned long npages = DIV_ROUND_UP(len + sram_offset, PAGE_SIZE); 1795 unsigned int pitch = len >= PAGE_SIZE && !(len & ~PAGE_MASK) ? 1796 PAGE_SIZE : 4; 1797 int err; 1798 1799 if (drm_WARN_ON(&xe->drm, (len & XE_CACHELINE_MASK) || 1800 (sram_offset | vram_addr) & XE_CACHELINE_MASK)) 1801 return ERR_PTR(-EOPNOTSUPP); 1802 1803 xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); 1804 1805 batch_size += pte_update_cmd_size(len); 1806 batch_size += EMIT_COPY_DW; 1807 1808 bb = xe_bb_new(gt, batch_size, use_usm_batch); 1809 if (IS_ERR(bb)) { 1810 err = PTR_ERR(bb); 1811 return ERR_PTR(err); 1812 } 1813 1814 build_pt_update_batch_sram(m, bb, pt_slot * XE_PAGE_SIZE, 1815 sram_addr, len + sram_offset); 1816 1817 if (dir == XE_MIGRATE_COPY_TO_VRAM) { 1818 src_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; 1819 dst_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); 1820 1821 } else { 1822 src_L0_ofs = xe_migrate_vram_ofs(xe, vram_addr, false); 1823 dst_L0_ofs = xe_migrate_vm_addr(pt_slot, 0) + sram_offset; 1824 } 1825 1826 bb->cs[bb->len++] = MI_BATCH_BUFFER_END; 1827 update_idx = bb->len; 1828 1829 emit_copy(gt, bb, src_L0_ofs, dst_L0_ofs, len, pitch); 1830 1831 job = xe_bb_create_migration_job(m->q, bb, 1832 xe_migrate_batch_base(m, use_usm_batch), 1833 update_idx); 1834 if (IS_ERR(job)) { 1835 err = PTR_ERR(job); 1836 goto err; 1837 } 1838 1839 xe_sched_job_add_migrate_flush(job, 0); 1840 1841 mutex_lock(&m->job_mutex); 1842 xe_sched_job_arm(job); 1843 fence = dma_fence_get(&job->drm.s_fence->finished); 1844 xe_sched_job_push(job); 1845 1846 dma_fence_put(m->fence); 1847 m->fence = dma_fence_get(fence); 1848 mutex_unlock(&m->job_mutex); 1849 1850 xe_bb_free(bb, fence); 1851 1852 return fence; 1853 1854 err: 1855 xe_bb_free(bb, NULL); 1856 1857 return ERR_PTR(err); 1858 } 1859 1860 /** 1861 * xe_migrate_to_vram() - Migrate to VRAM 1862 * @m: The migration context. 1863 * @npages: Number of pages to migrate. 1864 * @src_addr: Array of dma addresses (source of migrate) 1865 * @dst_addr: Device physical address of VRAM (destination of migrate) 1866 * 1867 * Copy from an array dma addresses to a VRAM device physical address 1868 * 1869 * Return: dma fence for migrate to signal completion on succees, ERR_PTR on 1870 * failure 1871 */ 1872 struct dma_fence *xe_migrate_to_vram(struct xe_migrate *m, 1873 unsigned long npages, 1874 dma_addr_t *src_addr, 1875 u64 dst_addr) 1876 { 1877 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, src_addr, dst_addr, 1878 XE_MIGRATE_COPY_TO_VRAM); 1879 } 1880 1881 /** 1882 * xe_migrate_from_vram() - Migrate from VRAM 1883 * @m: The migration context. 1884 * @npages: Number of pages to migrate. 1885 * @src_addr: Device physical address of VRAM (source of migrate) 1886 * @dst_addr: Array of dma addresses (destination of migrate) 1887 * 1888 * Copy from a VRAM device physical address to an array dma addresses 1889 * 1890 * Return: dma fence for migrate to signal completion on succees, ERR_PTR on 1891 * failure 1892 */ 1893 struct dma_fence *xe_migrate_from_vram(struct xe_migrate *m, 1894 unsigned long npages, 1895 u64 src_addr, 1896 dma_addr_t *dst_addr) 1897 { 1898 return xe_migrate_vram(m, npages * PAGE_SIZE, 0, dst_addr, src_addr, 1899 XE_MIGRATE_COPY_TO_SRAM); 1900 } 1901 1902 static void xe_migrate_dma_unmap(struct xe_device *xe, dma_addr_t *dma_addr, 1903 int len, int write) 1904 { 1905 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); 1906 1907 for (i = 0; i < npages; ++i) { 1908 if (!dma_addr[i]) 1909 break; 1910 1911 dma_unmap_page(xe->drm.dev, dma_addr[i], PAGE_SIZE, 1912 write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1913 } 1914 kfree(dma_addr); 1915 } 1916 1917 static dma_addr_t *xe_migrate_dma_map(struct xe_device *xe, 1918 void *buf, int len, int write) 1919 { 1920 dma_addr_t *dma_addr; 1921 unsigned long i, npages = DIV_ROUND_UP(len, PAGE_SIZE); 1922 1923 dma_addr = kcalloc(npages, sizeof(*dma_addr), GFP_KERNEL); 1924 if (!dma_addr) 1925 return ERR_PTR(-ENOMEM); 1926 1927 for (i = 0; i < npages; ++i) { 1928 dma_addr_t addr; 1929 struct page *page; 1930 1931 if (is_vmalloc_addr(buf)) 1932 page = vmalloc_to_page(buf); 1933 else 1934 page = virt_to_page(buf); 1935 1936 addr = dma_map_page(xe->drm.dev, 1937 page, 0, PAGE_SIZE, 1938 write ? DMA_TO_DEVICE : 1939 DMA_FROM_DEVICE); 1940 if (dma_mapping_error(xe->drm.dev, addr)) 1941 goto err_fault; 1942 1943 dma_addr[i] = addr; 1944 buf += PAGE_SIZE; 1945 } 1946 1947 return dma_addr; 1948 1949 err_fault: 1950 xe_migrate_dma_unmap(xe, dma_addr, len, write); 1951 return ERR_PTR(-EFAULT); 1952 } 1953 1954 /** 1955 * xe_migrate_access_memory - Access memory of a BO via GPU 1956 * 1957 * @m: The migration context. 1958 * @bo: buffer object 1959 * @offset: access offset into buffer object 1960 * @buf: pointer to caller memory to read into or write from 1961 * @len: length of access 1962 * @write: write access 1963 * 1964 * Access memory of a BO via GPU either reading in or writing from a passed in 1965 * pointer. Pointer is dma mapped for GPU access and GPU commands are issued to 1966 * read to or write from pointer. 1967 * 1968 * Returns: 1969 * 0 if successful, negative error code on failure. 1970 */ 1971 int xe_migrate_access_memory(struct xe_migrate *m, struct xe_bo *bo, 1972 unsigned long offset, void *buf, int len, 1973 int write) 1974 { 1975 struct xe_tile *tile = m->tile; 1976 struct xe_device *xe = tile_to_xe(tile); 1977 struct xe_res_cursor cursor; 1978 struct dma_fence *fence = NULL; 1979 dma_addr_t *dma_addr; 1980 unsigned long page_offset = (unsigned long)buf & ~PAGE_MASK; 1981 int bytes_left = len, current_page = 0; 1982 void *orig_buf = buf; 1983 1984 xe_bo_assert_held(bo); 1985 1986 /* Use bounce buffer for small access and unaligned access */ 1987 if (!IS_ALIGNED(len, XE_CACHELINE_BYTES) || 1988 !IS_ALIGNED((unsigned long)buf + offset, XE_CACHELINE_BYTES)) { 1989 int buf_offset = 0; 1990 1991 /* 1992 * Less than ideal for large unaligned access but this should be 1993 * fairly rare, can fixup if this becomes common. 1994 */ 1995 do { 1996 u8 bounce[XE_CACHELINE_BYTES]; 1997 void *ptr = (void *)bounce; 1998 int err; 1999 int copy_bytes = min_t(int, bytes_left, 2000 XE_CACHELINE_BYTES - 2001 (offset & XE_CACHELINE_MASK)); 2002 int ptr_offset = offset & XE_CACHELINE_MASK; 2003 2004 err = xe_migrate_access_memory(m, bo, 2005 offset & 2006 ~XE_CACHELINE_MASK, 2007 (void *)ptr, 2008 sizeof(bounce), 0); 2009 if (err) 2010 return err; 2011 2012 if (write) { 2013 memcpy(ptr + ptr_offset, buf + buf_offset, copy_bytes); 2014 2015 err = xe_migrate_access_memory(m, bo, 2016 offset & ~XE_CACHELINE_MASK, 2017 (void *)ptr, 2018 sizeof(bounce), write); 2019 if (err) 2020 return err; 2021 } else { 2022 memcpy(buf + buf_offset, ptr + ptr_offset, 2023 copy_bytes); 2024 } 2025 2026 bytes_left -= copy_bytes; 2027 buf_offset += copy_bytes; 2028 offset += copy_bytes; 2029 } while (bytes_left); 2030 2031 return 0; 2032 } 2033 2034 dma_addr = xe_migrate_dma_map(xe, buf, len + page_offset, write); 2035 if (IS_ERR(dma_addr)) 2036 return PTR_ERR(dma_addr); 2037 2038 xe_res_first(bo->ttm.resource, offset, xe_bo_size(bo) - offset, &cursor); 2039 2040 do { 2041 struct dma_fence *__fence; 2042 u64 vram_addr = vram_region_gpu_offset(bo->ttm.resource) + 2043 cursor.start; 2044 int current_bytes; 2045 2046 if (cursor.size > MAX_PREEMPTDISABLE_TRANSFER) 2047 current_bytes = min_t(int, bytes_left, 2048 MAX_PREEMPTDISABLE_TRANSFER); 2049 else 2050 current_bytes = min_t(int, bytes_left, cursor.size); 2051 2052 if (fence) 2053 dma_fence_put(fence); 2054 2055 __fence = xe_migrate_vram(m, current_bytes, 2056 (unsigned long)buf & ~PAGE_MASK, 2057 dma_addr + current_page, 2058 vram_addr, write ? 2059 XE_MIGRATE_COPY_TO_VRAM : 2060 XE_MIGRATE_COPY_TO_SRAM); 2061 if (IS_ERR(__fence)) { 2062 if (fence) 2063 dma_fence_wait(fence, false); 2064 fence = __fence; 2065 goto out_err; 2066 } 2067 fence = __fence; 2068 2069 buf += current_bytes; 2070 offset += current_bytes; 2071 current_page = (int)(buf - orig_buf) / PAGE_SIZE; 2072 bytes_left -= current_bytes; 2073 if (bytes_left) 2074 xe_res_next(&cursor, current_bytes); 2075 } while (bytes_left); 2076 2077 dma_fence_wait(fence, false); 2078 dma_fence_put(fence); 2079 2080 out_err: 2081 xe_migrate_dma_unmap(xe, dma_addr, len + page_offset, write); 2082 return IS_ERR(fence) ? PTR_ERR(fence) : 0; 2083 } 2084 2085 /** 2086 * xe_migrate_job_lock() - Lock migrate job lock 2087 * @m: The migration context. 2088 * @q: Queue associated with the operation which requires a lock 2089 * 2090 * Lock the migrate job lock if the queue is a migration queue, otherwise 2091 * assert the VM's dma-resv is held (user queue's have own locking). 2092 */ 2093 void xe_migrate_job_lock(struct xe_migrate *m, struct xe_exec_queue *q) 2094 { 2095 bool is_migrate = q == m->q; 2096 2097 if (is_migrate) 2098 mutex_lock(&m->job_mutex); 2099 else 2100 xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2101 } 2102 2103 /** 2104 * xe_migrate_job_unlock() - Unlock migrate job lock 2105 * @m: The migration context. 2106 * @q: Queue associated with the operation which requires a lock 2107 * 2108 * Unlock the migrate job lock if the queue is a migration queue, otherwise 2109 * assert the VM's dma-resv is held (user queue's have own locking). 2110 */ 2111 void xe_migrate_job_unlock(struct xe_migrate *m, struct xe_exec_queue *q) 2112 { 2113 bool is_migrate = q == m->q; 2114 2115 if (is_migrate) 2116 mutex_unlock(&m->job_mutex); 2117 else 2118 xe_vm_assert_held(q->vm); /* User queues VM's should be locked */ 2119 } 2120 2121 #if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) 2122 #include "tests/xe_migrate.c" 2123 #endif 2124