/linux/drivers/gpu/drm/amd/amdkfd/ |
H A D | kfd_device_queue_manager_v9.c | 30 struct qcm_process_device *qpd); 32 struct qcm_process_device *qpd); 52 struct qcm_process_device *qpd) in update_qpd_v9() argument 56 pdd = qpd_to_pdd(qpd); in update_qpd_v9() 59 if (qpd->sh_mem_config == 0) { in update_qpd_v9() 60 qpd->sh_mem_config = SH_MEM_ALIGNMENT_MODE_UNALIGNED << in update_qpd_v9() 64 qpd->sh_mem_config |= 1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT; in update_qpd_v9() 68 qpd->sh_mem_config |= in update_qpd_v9() 71 qpd->sh_mem_ape1_limit = 0; in update_qpd_v9() 72 qpd->sh_mem_ape1_base = 0; in update_qpd_v9() [all …]
|
H A D | kfd_device_queue_manager_cik.c | 31 struct qcm_process_device *qpd, 37 struct qcm_process_device *qpd); 40 struct qcm_process_device *qpd); 79 struct qcm_process_device *qpd, in set_cache_memory_policy_cik() argument 96 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32) in set_cache_memory_policy_cik() 105 struct qcm_process_device *qpd) in update_qpd_cik() argument 110 pdd = qpd_to_pdd(qpd); in update_qpd_cik() 113 if (qpd->sh_mem_config == 0) { in update_qpd_cik() 114 qpd->sh_mem_config = in update_qpd_cik() 118 qpd->sh_mem_ape1_limit = 0; in update_qpd_cik() [all …]
|
H A D | kfd_packet_manager_v9.c | 33 uint32_t *buffer, struct qcm_process_device *qpd) in pm_map_process_v9() argument 36 uint64_t vm_page_table_base_addr = qpd->page_table_base; in pm_map_process_v9() 39 container_of(qpd, struct kfd_process_device, qpd); in pm_map_process_v9() 48 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; in pm_map_process_v9() 50 packet->bitfields2.pasid = qpd->pqm->process->pasid; in pm_map_process_v9() 51 packet->bitfields14.gds_size = qpd->gds_size & 0x3F; in pm_map_process_v9() 52 packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; in pm_map_process_v9() 53 packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0; in pm_map_process_v9() 54 packet->bitfields14.num_oac = qpd->num_oac; in pm_map_process_v9() 56 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_v9() [all …]
|
H A D | kfd_device_queue_manager_vi.c | 31 struct qcm_process_device *qpd, 37 struct qcm_process_device *qpd); 40 struct qcm_process_device *qpd); 80 struct qcm_process_device *qpd, in set_cache_memory_policy_vi() argument 97 qpd->sh_mem_config = in set_cache_memory_policy_vi() 107 struct qcm_process_device *qpd) in update_qpd_vi() argument 112 pdd = qpd_to_pdd(qpd); in update_qpd_vi() 115 if (qpd->sh_mem_config == 0) { in update_qpd_vi() 116 qpd->sh_mem_config = in update_qpd_vi() 124 qpd->sh_mem_ape1_limit = 0; in update_qpd_vi() [all …]
|
H A D | kfd_device_queue_manager_v10.c | 31 struct qcm_process_device *qpd); 33 struct qcm_process_device *qpd); 53 struct qcm_process_device *qpd) in update_qpd_v10() argument 57 pdd = qpd_to_pdd(qpd); in update_qpd_v10() 60 if (qpd->sh_mem_config == 0) { in update_qpd_v10() 61 qpd->sh_mem_config = in update_qpd_v10() 65 qpd->sh_mem_ape1_limit = 0; in update_qpd_v10() 66 qpd->sh_mem_ape1_base = 0; in update_qpd_v10() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v10() 71 pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); in update_qpd_v10() [all …]
|
H A D | kfd_device_queue_manager_v11.c | 30 struct qcm_process_device *qpd); 32 struct qcm_process_device *qpd); 52 struct qcm_process_device *qpd) in update_qpd_v11() argument 56 pdd = qpd_to_pdd(qpd); in update_qpd_v11() 59 if (qpd->sh_mem_config == 0) { in update_qpd_v11() 60 qpd->sh_mem_config = in update_qpd_v11() 65 qpd->sh_mem_ape1_limit = 0; in update_qpd_v11() 66 qpd->sh_mem_ape1_base = 0; in update_qpd_v11() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v11() 71 pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); in update_qpd_v11() [all …]
|
H A D | kfd_device_queue_manager_v12.c | 30 struct qcm_process_device *qpd); 32 struct qcm_process_device *qpd); 52 struct qcm_process_device *qpd) in update_qpd_v12() argument 56 pdd = qpd_to_pdd(qpd); in update_qpd_v12() 59 if (qpd->sh_mem_config == 0) { in update_qpd_v12() 60 qpd->sh_mem_config = in update_qpd_v12() 65 qpd->sh_mem_ape1_limit = 0; in update_qpd_v12() 66 qpd->sh_mem_ape1_base = 0; in update_qpd_v12() 69 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(pdd); in update_qpd_v12() 71 pr_debug("sh_mem_bases 0x%X\n", qpd->sh_mem_bases); in update_qpd_v12() [all …]
|
H A D | kfd_doorbell.c | 208 static int init_doorbell_bitmap(struct qcm_process_device *qpd, in init_doorbell_bitmap() argument 226 __set_bit(i, qpd->doorbell_bitmap); in init_doorbell_bitmap() 228 qpd->doorbell_bitmap); in init_doorbell_bitmap() 240 if (!pdd->qpd.proc_doorbells) { in kfd_get_process_doorbells() 247 pdd->qpd.proc_doorbells, in kfd_get_process_doorbells() 256 struct qcm_process_device *qpd = &pdd->qpd; in kfd_alloc_process_doorbells() local 259 qpd->doorbell_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS, in kfd_alloc_process_doorbells() 261 if (!qpd->doorbell_bitmap) { in kfd_alloc_process_doorbells() 266 r = init_doorbell_bitmap(&pdd->qpd, kfd); in kfd_alloc_process_doorbells() 278 &qpd->proc_doorbells, in kfd_alloc_process_doorbells() [all …]
|
H A D | kfd_device_queue_manager.h | 43 struct qcm_process_device *qpd; member 140 struct qcm_process_device *qpd, 146 struct qcm_process_device *qpd, 153 struct qcm_process_device *qpd); 156 struct qcm_process_device *qpd); 166 struct qcm_process_device *qpd); 170 struct qcm_process_device *qpd); 173 struct qcm_process_device *qpd, 180 struct qcm_process_device *qpd); 183 struct qcm_process_device *qpd); [all …]
|
H A D | kfd_packet_manager_vi.c | 43 struct qcm_process_device *qpd) in pm_map_process_vi() argument 53 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; in pm_map_process_vi() 55 packet->bitfields2.pasid = qpd->pqm->process->pasid; in pm_map_process_vi() 56 packet->bitfields3.page_table_base = qpd->page_table_base; in pm_map_process_vi() 57 packet->bitfields10.gds_size = qpd->gds_size; in pm_map_process_vi() 58 packet->bitfields10.num_gws = qpd->num_gws; in pm_map_process_vi() 59 packet->bitfields10.num_oac = qpd->num_oac; in pm_map_process_vi() 60 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; in pm_map_process_vi() 62 packet->sh_mem_config = qpd->sh_mem_config; in pm_map_process_vi() 63 packet->sh_mem_bases = qpd->sh_mem_bases; in pm_map_process_vi() [all …]
|
H A D | kfd_packet_manager.c | 134 struct qcm_process_device *qpd; in pm_create_runlist_ib() local 154 qpd = cur->qpd; in pm_create_runlist_ib() 162 retval = pm->pmf->map_process(pm, &rl_buffer[rl_wptr], qpd); in pm_create_runlist_ib() 170 list_for_each_entry(kq, &qpd->priv_queue_list, list) { in pm_create_runlist_ib() 176 kq->queue->queue, qpd->is_debug); in pm_create_runlist_ib() 181 qpd->is_debug); in pm_create_runlist_ib() 190 list_for_each_entry(q, &qpd->queues_list, list) { in pm_create_runlist_ib() 196 q->queue, qpd->is_debug); in pm_create_runlist_ib() 201 qpd->is_debug); in pm_create_runlist_ib()
|
H A D | kfd_flat_memory.c | 337 pdd->qpd.cwsr_base = SVM_CWSR_BASE; in kfd_init_apertures_vi() 338 pdd->qpd.ib_base = SVM_IB_BASE; in kfd_init_apertures_vi() 360 pdd->qpd.cwsr_base = AMDGPU_VA_RESERVED_TRAP_START(pdd->dev->adev); in kfd_init_apertures_v9()
|
H A D | kfd_debug.c | 596 kfd_process_set_trap_debug_flag(&pdd->qpd, false); in kfd_dbg_trap_deactivate() 609 release_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd)) in kfd_dbg_trap_deactivate() 688 r = reserve_debug_trap_vmid(pdd->dev->dqm, &pdd->qpd); in kfd_dbg_trap_activate() 727 kfd_process_set_trap_debug_flag(&pdd->qpd, true); in kfd_dbg_trap_activate() 768 if (pdd->qpd.num_gws && (!kfd_dbg_has_gws_support(pdd->dev) || in kfd_dbg_trap_enable() 934 struct qcm_process_device *qpd = &pdd->qpd; in kfd_dbg_trap_query_exception_info() local 936 list_for_each_entry(queue, &qpd->queues_list, list) { in kfd_dbg_trap_query_exception_info()
|
H A D | kfd_priv.h | 749 struct qcm_process_device qpd; member 854 #define qpd_to_pdd(x) container_of(x, struct kfd_process_device, qpd) 1183 void kfd_process_set_trap_handler(struct qcm_process_device *qpd, 1186 void kfd_process_set_trap_debug_flag(struct qcm_process_device *qpd, 1406 struct qcm_process_device *qpd);
|
H A D | kfd_chardev.c | 350 if (!pdd->qpd.proc_doorbells) { in kfd_ioctl_create_queue() 595 &pdd->qpd, in kfd_ioctl_set_memory_policy() 630 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr); in kfd_ioctl_set_trap_handler() 904 pdd->qpd.sh_hidden_private_base = args->va_addr; in kfd_ioctl_set_scratch_backing_va() 909 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va) in kfd_ioctl_set_scratch_backing_va() 911 dev->adev, args->va_addr, pdd->qpd.vmid); in kfd_ioctl_set_scratch_backing_va() 2268 if (!pdd->qpd.proc_doorbells) { in criu_restore_devices() 2764 if (pdd->qpd.queue_count) in runtime_enable()
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | device.c | 246 struct c4iw_debugfs_data *qpd) in dump_qp() argument 253 space = qpd->bufsize - qpd->pos - 1; in dump_qp() 267 cc = snprintf(qpd->buf + qpd->pos, space, in dump_qp() 288 cc = snprintf(qpd->buf + qpd->pos, space, in dump_qp() 304 cc = snprintf(qpd->buf + qpd->pos, space, in dump_qp() 310 qpd->pos += cc; in dump_qp() 316 struct c4iw_debugfs_data *qpd = file->private_data; in qp_release() local 317 if (!qpd) { in qp_release() 321 vfree(qpd->buf); in qp_release() 322 kfree(qpd); in qp_release() [all …]
|