Lines Matching +full:cs +full:- +full:2
1 // SPDX-License-Identifier: MIT
33 GEM_BUG_ON(engine->class != COPY_ENGINE_CLASS);
48 vm->insert_page(vm, 0, d->offset,
49 i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
51 GEM_BUG_ON(!pt->is_compact);
52 d->offset += SZ_2M;
68 vm->insert_page(vm, px_dma(pt), d->offset,
69 i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
71 d->offset += SZ_64K;
80 vm->insert_page(vm, px_dma(pt), d->offset,
81 i915_gem_get_pat_index(vm->i915, I915_CACHE_NONE),
82 i915_gem_object_is_lmem(pt->base) ? PTE_LM : 0);
83 d->offset += PAGE_SIZE;
96 * to pre-allocate the page directories for the migration VM, this
103 * fly. Only 2 implicit vma are used for all migration operations.
107 * [0, CHUNK_SZ) -> first object
108 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second object
109 * [2 * CHUNK_SZ, 2 * CHUNK_SZ + 2 * CHUNK_SZ >> 9] -> PTE
114 * i.e. within the same non-preemptible window so that we do not switch
119 * first is reserved for mapping system-memory, and that just uses the
125 * compact layout for each of these page-tables, that fall within the
130 * [0, CHUNK_SZ) -> first window/object, maps smem
131 * [CHUNK_SZ, 2 * CHUNK_SZ) -> second window/object, maps lmem src
132 * [2 * CHUNK_SZ, 3 * CHUNK_SZ) -> third window/object, maps lmem dst
142 * [3 * CHUNK_SZ, 3 * CHUNK_SZ + ((3 * CHUNK_SZ / SZ_2M) * SZ_64K)] -> PTE
149 if (!vm->vm.allocate_va_range || !vm->vm.foreach) {
150 err = -ENODEV;
154 if (HAS_64K_PAGES(gt->i915))
161 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
168 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
173 * We copy in 8MiB chunks. Each PDE covers 2MiB, so we need
176 if (HAS_64K_PAGES(gt->i915))
179 sz = 2 * CHUNK_SZ;
186 if (HAS_64K_PAGES(gt->i915))
191 err = i915_vm_alloc_pt_stash(&vm->vm, &stash, sz);
196 err = i915_vm_lock_objects(&vm->vm, &ww);
199 err = i915_vm_map_pt_stash(&vm->vm, &stash);
203 vm->vm.allocate_va_range(&vm->vm, &stash, base, sz);
205 i915_vm_free_pt_stash(&vm->vm, &stash);
210 if (HAS_64K_PAGES(gt->i915)) {
211 vm->vm.foreach(&vm->vm, base, d.offset - base,
214 vm->vm.foreach(&vm->vm,
216 2 * CHUNK_SZ,
219 vm->vm.foreach(&vm->vm, base, d.offset - base,
224 return &vm->vm;
227 i915_vm_put(&vm->vm);
236 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
237 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
254 return ERR_PTR(-ENODEV);
277 m->context = ce;
293 for (i = 0; i < ARRAY_SIZE(gt->engine_class[COPY_ENGINE_CLASS]); i++) {
294 engine = gt->engine_class[COPY_ENGINE_CLASS][i];
312 * balancing of the virtual-engine.
314 ce = __migrate_engines(m->context->engine->gt);
318 ce->ring = NULL;
319 ce->ring_size = SZ_256K;
321 i915_vm_put(ce->vm);
322 ce->vm = i915_vm_get(m->context->vm);
336 u32 *cs;
338 cs = intel_ring_begin(rq, 2);
339 if (IS_ERR(cs))
340 return PTR_ERR(cs);
343 *cs++ = MI_ARB_ON_OFF;
344 *cs++ = MI_NOOP;
345 intel_ring_advance(rq, cs);
352 struct intel_ring *ring = rq->ring;
354 pkt = min_t(int, pkt, (ring->space - rq->reserved_space) / sizeof(u32) + 5);
355 pkt = min_t(int, pkt, (ring->size - ring->emit) / sizeof(u32) + 5);
369 bool has_64K_pages = HAS_64K_PAGES(rq->i915);
370 const u64 encode = rq->context->vm->pte_encode(0, pat_index,
372 struct intel_ring *ring = rq->ring;
376 u32 *hdr, *cs;
378 GEM_BUG_ON(GRAPHICS_VER(rq->i915) < 8);
398 offset += 2 * CHUNK_SZ;
401 offset += (u64)rq->engine->instance << 32;
403 cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
404 if (IS_ERR(cs))
405 return PTR_ERR(cs);
410 hdr = cs;
411 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21); /* as qword elements */
412 *cs++ = lower_32_bits(offset);
413 *cs++ = upper_32_bits(offset);
416 if (cs - hdr >= pkt) {
419 *hdr += cs - hdr - 2;
420 *cs++ = MI_NOOP;
422 ring->emit = (void *)cs - ring->vaddr;
423 intel_ring_advance(rq, cs);
426 cs = intel_ring_begin(rq, I915_EMIT_PTE_NUM_DWORDS);
427 if (IS_ERR(cs))
428 return PTR_ERR(cs);
435 dword_rem = SZ_2M - (total & (SZ_2M - 1));
437 dword_rem *= 2;
443 hdr = cs;
444 *cs++ = MI_STORE_DATA_IMM | REG_BIT(21);
445 *cs++ = lower_32_bits(offset);
446 *cs++ = upper_32_bits(offset);
449 GEM_BUG_ON(!IS_ALIGNED(it->dma, page_size));
451 *cs++ = lower_32_bits(encode | it->dma);
452 *cs++ = upper_32_bits(encode | it->dma);
457 it->dma += page_size;
458 if (it->dma >= it->max) {
459 it->sg = __sg_next(it->sg);
460 if (!it->sg || sg_dma_len(it->sg) == 0)
463 it->dma = sg_dma_address(it->sg);
464 it->max = it->dma + sg_dma_len(it->sg);
468 *hdr += cs - hdr - 2;
469 *cs++ = MI_NOOP;
471 ring->emit = (void *)cs - ring->vaddr;
472 intel_ring_advance(rq, cs);
489 * DOC: Flat-CCS - Memory compression for Local memory
491 * On Xe-HP and later devices, we use dedicated compression control state (CCS)
504 * I915 supports Flat-CCS on lmem only objects. When an objects has smem in
506 * content into smem. If the lmem object is Flat-CCS compressed by userspace,
508 * for such decompression. Hence I915 supports Flat-CCS only on lmem only objects.
510 * When we exhaust the lmem, Flat-CCS capable objects' lmem backing memory can
512 * it can be potentially swapped-out at a later point, if required.
515 * and potentially performing any required swap-in.
518 * {lmem, smem}, objects are treated as non Flat-CCS capable objects.
534 struct drm_i915_private *i915 = rq->i915;
535 int mocs = rq->engine->gt->mocs.uc_index << 1;
537 u32 *cs;
539 cs = intel_ring_begin(rq, 12);
540 if (IS_ERR(cs))
541 return PTR_ERR(cs);
546 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
562 *cs++ = XY_CTRL_SURF_COPY_BLT |
565 ((num_ccs_blks - 1) & CCS_SIZE_MASK) << CCS_SIZE_SHIFT;
566 *cs++ = src_offset;
567 *cs++ = rq->engine->instance |
569 *cs++ = dst_offset;
570 *cs++ = rq->engine->instance |
573 cs = i915_flush_dw(cs, MI_FLUSH_DW_LLC | MI_FLUSH_DW_CCS);
574 *cs++ = MI_NOOP;
576 intel_ring_advance(rq, cs);
584 const int ver = GRAPHICS_VER(rq->i915);
585 u32 instance = rq->engine->instance;
586 u32 *cs;
588 cs = intel_ring_begin(rq, ver >= 8 ? 10 : 6);
589 if (IS_ERR(cs))
590 return PTR_ERR(cs);
593 *cs++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
594 *cs++ = BLT_DEPTH_32 | PAGE_SIZE;
595 *cs++ = 0;
596 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
597 *cs++ = dst_offset;
598 *cs++ = instance;
599 *cs++ = 0;
600 *cs++ = PAGE_SIZE;
601 *cs++ = src_offset;
602 *cs++ = instance;
604 *cs++ = XY_SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (10 - 2);
605 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
606 *cs++ = 0;
607 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
608 *cs++ = dst_offset;
609 *cs++ = instance;
610 *cs++ = 0;
611 *cs++ = PAGE_SIZE;
612 *cs++ = src_offset;
613 *cs++ = instance;
616 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
617 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | PAGE_SIZE;
618 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE;
619 *cs++ = dst_offset;
620 *cs++ = PAGE_SIZE;
621 *cs++ = src_offset;
624 intel_ring_advance(rq, cs);
647 * will be taken for the blt. in Flat-ccs supported
662 GEM_BUG_ON(!it->sg || !sg_dma_len(it->sg));
663 len = it->max - it->dma;
665 it->dma += bytes_to_cpy;
669 bytes_to_cpy -= len;
671 it->sg = __sg_next(it->sg);
672 it->dma = sg_dma_address(it->sg);
673 it->max = it->dma + sg_dma_len(it->sg);
689 struct drm_i915_private *i915 = ce->engine->i915;
699 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
700 GEM_BUG_ON(IS_DGFX(ce->engine->i915) && (!src_is_lmem && !dst_is_lmem));
703 GEM_BUG_ON(ce->ring->size < SZ_64K);
728 * TO-DO: Want to move the size mismatch check to a WARN_ON,
729 * but still we have some requests of smem->lmem with same size.
741 if (HAS_64K_PAGES(ce->engine->i915)) {
747 dst_offset = 2 * CHUNK_SZ;
764 if (rq->engine->emit_init_breadcrumb) {
765 err = rq->engine->emit_init_breadcrumb(rq);
784 err = -EINVAL;
797 err = -EINVAL;
801 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
809 bytes_to_cpy -= len;
814 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
825 err = -EINVAL;
829 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
838 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
841 ccs_bytes_to_cpy -= ccs_sz;
843 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
850 * be doing an lmem -> lmem transfer, and so
857 * need to copy the CCS state as-is.
879 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
884 /* Arbitration is re-enabled between requests. */
906 err = -EINVAL;
920 struct drm_i915_private *i915 = rq->i915;
921 int mocs = rq->engine->gt->mocs.uc_index << 1;
924 u32 *cs;
935 cs = intel_ring_begin(rq, ring_sz);
936 if (IS_ERR(cs))
937 return PTR_ERR(cs);
940 *cs++ = XY_FAST_COLOR_BLT_CMD | XY_FAST_COLOR_BLT_DEPTH_32 |
941 (XY_FAST_COLOR_BLT_DW - 2);
942 *cs++ = FIELD_PREP(XY_FAST_COLOR_BLT_MOCS_MASK, mocs) |
943 (PAGE_SIZE - 1);
944 *cs++ = 0;
945 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
946 *cs++ = offset;
947 *cs++ = rq->engine->instance;
948 *cs++ = !is_lmem << XY_FAST_COLOR_BLT_MEM_TYPE_SHIFT;
950 *cs++ = value;
951 *cs++ = 0;
952 *cs++ = 0;
953 *cs++ = 0;
955 *cs++ = 0;
956 *cs++ = 0;
958 *cs++ = 0;
959 *cs++ = 0;
960 *cs++ = 0;
962 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (7 - 2);
963 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
964 *cs++ = 0;
965 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
966 *cs++ = offset;
967 *cs++ = rq->engine->instance;
968 *cs++ = value;
969 *cs++ = MI_NOOP;
971 *cs++ = XY_COLOR_BLT_CMD | BLT_WRITE_RGBA | (6 - 2);
972 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | PAGE_SIZE;
973 *cs++ = 0;
974 *cs++ = size >> PAGE_SHIFT << 16 | PAGE_SIZE / 4;
975 *cs++ = offset;
976 *cs++ = value;
979 intel_ring_advance(rq, cs);
992 struct drm_i915_private *i915 = ce->engine->i915;
998 GEM_BUG_ON(ce->vm != ce->engine->gt->migrate.context->vm);
1001 GEM_BUG_ON(ce->ring->size < SZ_64K);
1021 if (rq->engine->emit_init_breadcrumb) {
1022 err = rq->engine->emit_init_breadcrumb(rq);
1041 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1060 err = rq->engine->emit_flush(rq, EMIT_INVALIDATE);
1062 /* Arbitration is re-enabled between requests. */
1093 if (!m->context)
1094 return -ENODEV;
1098 ce = intel_context_get(m->context);
1130 if (!m->context)
1131 return -ENODEV;
1135 ce = intel_context_get(m->context);
1155 ce = fetch_and_zero(&m->context);