/linux/drivers/gpu/drm/xe/ |
H A D | xe_sriov_pf_service.c | 29 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_init() 48 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_negotiate_version() 49 xe_assert(xe, base.major); in pf_negotiate_version() 50 xe_assert(xe, base.major <= latest.major); in pf_negotiate_version() 51 xe_assert(xe, (base.major < latest.major) || (base.minor <= latest.minor)); in pf_negotiate_version() 77 xe_assert(xe, base.major == latest.major); in pf_negotiate_version() 90 xe_assert(xe, major || minor); in pf_connect() 200 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_service_print_versions()
|
H A D | xe_sriov_pf_helpers.h | 26 xe_assert((xe), (vfid) <= xe_sriov_pf_get_totalvfs(xe)) 36 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_get_totalvfs() 42 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_master_mutex()
|
H A D | xe_hmm.c | 44 xe_assert(xe, hmm_pfn & HMM_PFN_VALID); in xe_alloc_sg() 111 xe_assert(xe, !is_device_private_page(page)); in xe_build_sg() 118 xe_assert(xe, i >= npages); in xe_build_sg() 124 xe_assert(xe, i < npages); in xe_build_sg() 143 xe_assert(vm->xe, !userptr->mapped); in xe_hmm_userptr_set_mapped() 185 xe_assert(xe_vma_vm(&uvma->vma)->xe, userptr->sg); in xe_hmm_userptr_free_sg()
|
H A D | xe_pci_sriov.c | 76 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_pci_pf_get_vf_dev() 153 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_enable_vfs() 154 xe_assert(xe, num_vfs > 0); in pf_enable_vfs() 155 xe_assert(xe, num_vfs <= total_vfs); in pf_enable_vfs() 211 xe_assert(xe, IS_SRIOV_PF(xe)); in pf_disable_vfs()
|
H A D | xe_guc.c | 61 xe_assert(xe, addr >= xe_wopcm_size(guc_to_xe(guc))); in guc_bo_ggtt_addr() 62 xe_assert(xe, addr < GUC_GGTT_TOP); in guc_bo_ggtt_addr() 63 xe_assert(xe, xe_bo_size(bo) <= GUC_GGTT_TOP - addr); in guc_bo_ggtt_addr() 301 xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT)); in guc_action_register_g2g_buffer() 302 xe_assert(xe, !(size % SZ_4K)); in guc_action_register_g2g_buffer() 318 xe_assert(xe, (type == XE_G2G_TYPE_IN) || (type == XE_G2G_TYPE_OUT)); in guc_action_deregister_g2g_buffer() 411 xe_assert(xe, xe == gt_to_xe(far_gt)); in guc_g2g_register() 414 xe_assert(xe, g2g_bo); in guc_g2g_register() 417 xe_assert(xe, slot >= 0); in guc_g2g_register() 423 xe_assert(xe, (desc - base + G2G_DESC_SIZE) <= G2G_DESC_AREA_SIZE); in guc_g2g_register() [all …]
|
H A D | xe_hw_engine_group.c | 127 xe_assert(xe, group); in xe_hw_engine_group_add_exec_queue() 128 xe_assert(xe, !(q->flags & EXEC_QUEUE_FLAG_VM)); in xe_hw_engine_group_add_exec_queue() 129 xe_assert(xe, q->vm); in xe_hw_engine_group_add_exec_queue() 167 xe_assert(xe, group); in xe_hw_engine_group_del_exec_queue() 168 xe_assert(xe, q->vm); in xe_hw_engine_group_del_exec_queue()
|
H A D | xe_bo.c | 163 xe_assert(xe, mem_type == XE_PL_STOLEN || mem_type_is_vram(mem_type)); in mem_type_to_migrate() 174 xe_assert(xe, resource_is_vram(res)); in res_to_mem_region() 185 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_system() 222 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in add_vram() 225 xe_assert(xe, vram && vram->usable_size); in add_vram() 256 xe_assert(xe, *c < ARRAY_SIZE(bo->placements)); in try_add_stolen() 495 xe_assert(xe, bo->cpu_caching == 0); in xe_ttm_tt_create() 697 xe_assert(xe, attach); in xe_bo_move_dmabuf() 698 xe_assert(xe, ttm_bo->ttm); in xe_bo_move_dmabuf() 916 xe_assert(xe, migrate); in xe_bo_move() [all …]
|
H A D | xe_sriov_pf.c | 61 xe_assert(xe, totalvfs <= U16_MAX); in xe_sriov_pf_readiness() 90 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_init_early() 143 xe_assert(xe, IS_SRIOV_PF(xe)); in xe_sriov_pf_print_vfs_summary()
|
H A D | xe_gt_ccs_mode.c | 25 xe_assert(xe, xe_gt_ccs_mode_enabled(gt)); in __xe_gt_apply_ccs_mode() 27 xe_assert(xe, num_engines && num_engines <= num_slices); in __xe_gt_apply_ccs_mode() 28 xe_assert(xe, !(num_slices % num_engines)); in __xe_gt_apply_ccs_mode()
|
H A D | xe_wa.h | 44 xe_assert(xe__, (xe__)->wa_active.oob_initialized); \ 49 xe_assert(xe__, (xe__)->wa_active.oob_initialized); \
|
H A D | xe_vm.c | 76 xe_assert(xe, xe_vma_is_userptr(vma)); in xe_vma_userptr_pin_pages() 172 xe_assert(vm->xe, link != list); in arm_preempt_fences() 234 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_add_compute_exec_queue() 523 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in preempt_rebind_work_func() 674 xe_assert(vm->xe, xe_vma_is_userptr(vma)); in vma_userptr_invalidate() 731 xe_assert(vm->xe, !xe_vm_in_fault_mode(vm)); in xe_vm_userptr_pin() 736 xe_assert(vm->xe, list_empty(&vm->userptr.repin_list)); in xe_vm_userptr_pin() 938 xe_assert(vm->xe, vma->tile_present); in xe_vm_rebind() 985 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vma_rebind() 1074 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); in xe_vm_range_rebind() [all …]
|
H A D | xe_shrinker.c | 90 xe_assert(xe, !IS_ERR(ttm_bo)); in __xe_shrinker_walk() 270 xe_assert(shrinker->xe, !shrinker->shrinkable_pages); in xe_shrinker_fini() 271 xe_assert(shrinker->xe, !shrinker->purgeable_pages); in xe_shrinker_fini()
|
H A D | xe_pxp.c | 500 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); in xe_pxp_exec_queue_set_type() 541 xe_assert(pxp->xe, type == DRM_XE_PXP_TYPE_HWDRM); in pxp_start() 771 xe_assert(pxp->xe, !bo->pxp_key_instance); in xe_pxp_key_assign() 805 xe_assert(pxp->xe, bo->pxp_key_instance); in xe_pxp_bo_key_check() 940 xe_assert(pxp->xe, pxp->status == XE_PXP_SUSPENDED); in xe_pxp_pm_resume()
|
H A D | xe_sriov.h | 24 xe_assert(xe, xe->sriov.__mode); in xe_device_sriov_mode()
|
H A D | xe_drm_client.c | 158 xe_assert(xe, !kref_read(&bo->ttm.base.refcount)); in xe_drm_client_remove_bo() 246 xe_assert(xef->xe, !list_empty(&bo->client_link)); in show_meminfo()
|
H A D | xe_gt_topology.c | 113 xe_assert(xe, find_last_bit(pattern, XE_MAX_L3_BANK_MASK_BITS) < patternbits || in gen_l3_mask_from_pattern() 115 xe_assert(xe, !mask || patternbits * (__fls(mask) + 1) <= XE_MAX_L3_BANK_MASK_BITS); in gen_l3_mask_from_pattern()
|
H A D | xe_vm.h | 182 xe_assert(xe_vma_vm(vma)->xe, xe_vma_is_userptr(vma)); in to_userptr_vma() 240 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); in xe_vm_queue_rebind_worker()
|
H A D | xe_uc_fw.c | 312 xe_assert(xe, !(size % 4)); in xe_uc_fw_copy_rsa() 313 xe_assert(xe, xe_uc_fw_is_available(uc_fw)); in xe_uc_fw_copy_rsa() 490 xe_assert(xe, manifest_entry); in parse_cpd_header() 542 xe_assert(xe, xe->info.platform != XE_DG2); in parse_cpd_header() 898 xe_assert(xe, !xe_uc_fw_is_loaded(uc_fw)); in xe_uc_fw_upload()
|
H A D | xe_svm.c | 596 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_close() 608 xe_assert(vm->xe, xe_vm_is_closed(vm)); in xe_svm_fini() 778 xe_assert(vm->xe, IS_DGFX(vm->xe)); in xe_svm_range_needs_migrate_to_vram() 830 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); in xe_svm_handle_pagefault() 1016 xe_assert(tile_to_xe(tile), range->base.flags.migrate_devmem); in xe_svm_alloc_vram()
|
H A D | xe_assert.h | 108 #define xe_assert(xe, condition) xe_assert_msg((xe), condition, "") macro
|
H A D | xe_mocs.c | 652 xe_assert(xe, info->unused_entries_index != 0); in get_mocs_settings() 654 xe_assert(xe, info->ops && info->ops->dump); in get_mocs_settings() 655 xe_assert(xe, info->table_size <= info->num_mocs_regs); in get_mocs_settings()
|
H A D | xe_migrate.c | 153 xe_assert(xe, IS_ALIGNED(xe->mem.vram.usable_size, SZ_2M)); in xe_migrate_program_identity() 181 xe_assert(xe, pos == vram_limit); in xe_migrate_program_identity() 313 xe_assert(xe, xe->mem.vram.actual_physical_size <= in xe_migrate_prepare_vm() 326 xe_assert(xe, xe->mem.vram.actual_physical_size <= (MAX_NUM_PTE - in xe_migrate_prepare_vm() 600 xe_assert(xe, (va & (SZ_64K - 1)) == in emit_pte() 844 xe_assert(xe, type_device); in xe_migrate_copy() 850 xe_assert(xe, IS_ALIGNED(ccs_it.start, PAGE_SIZE)); in xe_migrate_copy() 1636 xe_assert(xe, npages * PAGE_SIZE <= MAX_PREEMPTDISABLE_TRANSFER); in xe_migrate_vram()
|
H A D | xe_sriov_vf.c | 300 xe_assert(xe, IS_SRIOV_VF(xe)); in xe_sriov_vf_start_migration_recovery()
|
H A D | xe_execlist.c | 352 xe_assert(xe, !xe_device_uc_enabled(xe)); in execlist_exec_queue_init() 397 xe_assert(xe, !xe_device_uc_enabled(xe)); in execlist_exec_queue_fini_async()
|
/linux/Documentation/gpu/xe/ |
H A D | xe_debugging.rst | 7 .. kernel-doc:: drivers/gpu/drm/xe/xe_assert.h
|