Lines Matching refs:xe_walk
352 xe_pt_insert_entry(struct xe_pt_stage_bind_walk *xe_walk, struct xe_pt *parent, in xe_pt_insert_entry() argument
355 struct xe_pt_update *upd = &xe_walk->wupd.updates[parent->level]; in xe_pt_insert_entry()
357 &xe_walk->wupd.updates[xe_child->level] : NULL; in xe_pt_insert_entry()
360 ret = xe_pt_new_shared(&xe_walk->wupd, parent, offset, true); in xe_pt_insert_entry()
381 xe_pt_write(xe_walk->vm->xe, map, offset, pte); in xe_pt_insert_entry()
398 struct xe_pt_stage_bind_walk *xe_walk) in xe_pt_hugepte_possible() argument
406 if (!xe_pt_covers(addr, next, level, &xe_walk->base)) in xe_pt_hugepte_possible()
410 if (next - xe_walk->va_curs_start > xe_walk->curs->size) in xe_pt_hugepte_possible()
414 if (xe_vma_is_null(xe_walk->vma)) in xe_pt_hugepte_possible()
419 dma = addr - xe_walk->va_curs_start + xe_res_dma(xe_walk->curs); in xe_pt_hugepte_possible()
429 xe_pt_scan_64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) in xe_pt_scan_64K() argument
431 struct xe_res_cursor curs = *xe_walk->curs; in xe_pt_scan_64K()
436 if (next > xe_walk->l0_end_addr) in xe_pt_scan_64K()
440 if (xe_vma_is_null(xe_walk->vma)) in xe_pt_scan_64K()
443 xe_res_next(&curs, addr - xe_walk->va_curs_start); in xe_pt_scan_64K()
463 xe_pt_is_pte_ps64K(u64 addr, u64 next, struct xe_pt_stage_bind_walk *xe_walk) in xe_pt_is_pte_ps64K() argument
466 if (xe_walk->found_64K && addr - xe_walk->addr_64K < SZ_64K) in xe_pt_is_pte_ps64K()
469 xe_walk->found_64K = xe_pt_scan_64K(addr, addr + SZ_64K, xe_walk); in xe_pt_is_pte_ps64K()
470 xe_walk->addr_64K = addr; in xe_pt_is_pte_ps64K()
472 return xe_walk->found_64K; in xe_pt_is_pte_ps64K()
482 struct xe_pt_stage_bind_walk *xe_walk = in xe_pt_stage_bind_entry() local
483 container_of(walk, typeof(*xe_walk), base); in xe_pt_stage_bind_entry()
484 u16 pat_index = xe_walk->vma->pat_index; in xe_pt_stage_bind_entry()
486 struct xe_vm *vm = xe_walk->vm; in xe_pt_stage_bind_entry()
493 if (level == 0 || xe_pt_hugepte_possible(addr, next, level, xe_walk)) { in xe_pt_stage_bind_entry()
494 struct xe_res_cursor *curs = xe_walk->curs; in xe_pt_stage_bind_entry()
495 bool is_null = xe_vma_is_null(xe_walk->vma); in xe_pt_stage_bind_entry()
497 XE_WARN_ON(xe_walk->va_curs_start != addr); in xe_pt_stage_bind_entry()
500 xe_res_dma(curs) + xe_walk->dma_offset, in xe_pt_stage_bind_entry()
501 xe_walk->vma, pat_index, level); in xe_pt_stage_bind_entry()
502 pte |= xe_walk->default_pte; in xe_pt_stage_bind_entry()
509 if (xe_pt_is_pte_ps64K(addr, next, xe_walk)) { in xe_pt_stage_bind_entry()
510 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_64K; in xe_pt_stage_bind_entry()
512 } else if (XE_WARN_ON(xe_walk->needs_64K)) { in xe_pt_stage_bind_entry()
517 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, NULL, pte); in xe_pt_stage_bind_entry()
523 xe_walk->va_curs_start = next; in xe_pt_stage_bind_entry()
524 xe_walk->vma->gpuva.flags |= (XE_VMA_PTE_4K << level); in xe_pt_stage_bind_entry()
537 xe_walk->l0_end_addr = next; in xe_pt_stage_bind_entry()
540 covers = xe_pt_covers(addr, next, level, &xe_walk->base); in xe_pt_stage_bind_entry()
544 xe_child = xe_pt_create(xe_walk->vm, xe_walk->tile, level - 1); in xe_pt_stage_bind_entry()
552 xe_pt_populate_empty(xe_walk->tile, xe_walk->vm, xe_child); in xe_pt_stage_bind_entry()
563 if (GRAPHICS_VERx100(tile_to_xe(xe_walk->tile)) >= 1250 && level == 1 && in xe_pt_stage_bind_entry()
564 covers && xe_pt_scan_64K(addr, next, xe_walk)) { in xe_pt_stage_bind_entry()
566 xe_walk->vma->gpuva.flags |= XE_VMA_PTE_COMPACT; in xe_pt_stage_bind_entry()
572 ret = xe_pt_insert_entry(xe_walk, xe_parent, offset, xe_child, in xe_pt_stage_bind_entry()
611 struct xe_pt_stage_bind_walk xe_walk = { in xe_pt_stage_bind() local
642 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; in xe_pt_stage_bind()
650 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; in xe_pt_stage_bind()
652 xe_walk.default_pte |= XE_USM_PPGTT_PTE_AE; in xe_pt_stage_bind()
660 xe_walk.default_pte &= ~XE_USM_PPGTT_PTE_AE; in xe_pt_stage_bind()
664 xe_walk.default_pte |= XE_PPGTT_PTE_DM; in xe_pt_stage_bind()
665 xe_walk.dma_offset = vram_region_gpu_offset(bo->ttm.resource); in xe_pt_stage_bind()
669 xe_walk.dma_offset = xe_ttm_stolen_gpu_offset(xe_bo_device(bo)); in xe_pt_stage_bind()
688 xe_vma_end(vma), &xe_walk.base); in xe_pt_stage_bind()
690 *num_entries = xe_walk.wupd.num_used_entries; in xe_pt_stage_bind()
766 struct xe_pt_zap_ptes_walk *xe_walk = in xe_pt_zap_ptes_entry() local
767 container_of(walk, typeof(*xe_walk), base); in xe_pt_zap_ptes_entry()
781 xe_map_memset(tile_to_xe(xe_walk->tile), &xe_child->bo->vmap, in xe_pt_zap_ptes_entry()
784 xe_walk->needs_invalidate = true; in xe_pt_zap_ptes_entry()
812 struct xe_pt_zap_ptes_walk xe_walk = { in xe_pt_zap_ptes() local
827 xe_vma_end(vma), &xe_walk.base); in xe_pt_zap_ptes()
829 return xe_walk.needs_invalidate; in xe_pt_zap_ptes()
1414 struct xe_pt_stage_unbind_walk *xe_walk = in xe_pt_check_kill() local
1415 container_of(walk, typeof(*xe_walk), base); in xe_pt_check_kill()
1425 if (xe_walk->modified_start >= addr) in xe_pt_check_kill()
1426 xe_walk->modified_start = round_down(addr, size); in xe_pt_check_kill()
1427 if (xe_walk->modified_end <= next) in xe_pt_check_kill()
1428 xe_walk->modified_end = round_up(next, size); in xe_pt_check_kill()
1459 struct xe_pt_stage_unbind_walk *xe_walk = in xe_pt_stage_unbind_post_descend() local
1460 container_of(walk, typeof(*xe_walk), base); in xe_pt_stage_unbind_post_descend()
1467 addr = xe_walk->modified_start; in xe_pt_stage_unbind_post_descend()
1469 next = xe_walk->modified_end; in xe_pt_stage_unbind_post_descend()
1480 err = xe_pt_new_shared(&xe_walk->wupd, xe_child, offset, true); in xe_pt_stage_unbind_post_descend()
1484 xe_walk->wupd.updates[level].update->qwords = end_offset - offset; in xe_pt_stage_unbind_post_descend()
1511 struct xe_pt_stage_unbind_walk xe_walk = { in xe_pt_stage_unbind() local
1525 xe_vma_end(vma), &xe_walk.base); in xe_pt_stage_unbind()
1527 return xe_walk.wupd.num_used_entries; in xe_pt_stage_unbind()