| /linux/drivers/gpu/drm/amd/amdkfd/ |
| H A D | kfd_device.c | 62 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, 64 static void kfd_gtt_sa_fini(struct kfd_dev *kfd); 66 static int kfd_resume(struct kfd_node *kfd); 68 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) in kfd_device_info_set_sdma_info() argument 70 uint32_t sdma_version = amdgpu_ip_version(kfd->adev, SDMA0_HWIP, 0); in kfd_device_info_set_sdma_info() 82 kfd->device_info.num_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info() 98 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info() 112 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info() 114 kfd->device_info.num_reserved_sdma_queues_per_engine = 2; in kfd_device_info_set_sdma_info() 120 kfd->device_info.num_sdma_queues_per_engine = 8; in kfd_device_info_set_sdma_info() [all …]
|
| H A D | kfd_doorbell.c | 50 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd) in kfd_doorbell_process_slice() argument 52 if (!kfd->shared_resources.enable_mes) in kfd_doorbell_process_slice() 53 return roundup(kfd->device_info.doorbell_size * in kfd_doorbell_process_slice() 58 (struct amdgpu_device *)kfd->adev); in kfd_doorbell_process_slice() 62 int kfd_doorbell_init(struct kfd_dev *kfd) in kfd_doorbell_init() argument 75 kfd->doorbell_bitmap = bitmap_zalloc(size / sizeof(u32), GFP_KERNEL); in kfd_doorbell_init() 76 if (!kfd->doorbell_bitmap) { in kfd_doorbell_init() 82 r = amdgpu_bo_create_kernel(kfd->adev, in kfd_doorbell_init() 86 &kfd->doorbells, in kfd_doorbell_init() 88 (void **)&kfd->doorbell_kernel_ptr); in kfd_doorbell_init() [all …]
|
| H A D | kfd_interrupt.c | 58 KFD_IH_NUM_ENTRIES * node->kfd->device_info.ih_ring_entry_size, in kfd_interrupt_init() 65 if (!node->kfd->ih_wq) { in kfd_interrupt_init() 66 node->kfd->ih_wq = alloc_workqueue("KFD IH", WQ_HIGHPRI | WQ_UNBOUND, in kfd_interrupt_init() 67 node->kfd->num_nodes); in kfd_interrupt_init() 68 if (unlikely(!node->kfd->ih_wq)) { in kfd_interrupt_init() 116 kfifo_in(&node->ih_fifo, ih_ring_entry, node->kfd->device_info.ih_ring_entry_size); in enqueue_ih_ring_entry() 131 node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 132 WARN_ON(count != node->kfd->device_info.ih_ring_entry_size); in dequeue_ih_ring_entry() 133 return count == node->kfd->device_info.ih_ring_entry_size; in dequeue_ih_ring_entry() 143 dev->kfd->device_info.event_interrupt_class->interrupt_wq(dev, in interrupt_wq() [all …]
|
| H A D | kfd_device_queue_manager.c | 86 int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec in is_pipe_enabled() 87 + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; in is_pipe_enabled() 90 for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) in is_pipe_enabled() 92 dqm->dev->kfd->shared_resources.cp_queue_bitmap)) in is_pipe_enabled() 99 return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, in get_cp_queues_num() 105 return dqm->dev->kfd->shared_resources.num_queue_per_pipe; in get_queues_per_pipe() 110 return dqm->dev->kfd->shared_resources.num_pipe_per_mec; in get_pipes_per_mec() 122 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_sdma_queues() 128 dqm->dev->kfd->device_info.num_sdma_queues_per_engine; in get_num_xgmi_sdma_queues() 141 dqm->dev->kfd->device_info.num_reserved_sdma_queues_per_engine); in init_sdma_bitmaps() [all …]
|
| H A D | kfd_kernel_queue.c | 64 prop.doorbell_ptr = kfd_get_kernel_doorbell(dev->kfd, &prop.doorbell_off); in kq_initialize() 102 retval = kfd_gtt_sa_allocate(dev, dev->kfd->device_info.doorbell_size, in kq_initialize() 113 memset(kq->wptr_kernel, 0, dev->kfd->device_info.doorbell_size); in kq_initialize() 165 kfd_release_kernel_doorbell(dev->kfd, prop.doorbell_ptr); in kq_initialize() 196 kfd_release_kernel_doorbell(kq->dev->kfd, in kq_uninitialize() 281 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_submit_packet() 298 if (kq->dev->kfd->device_info.doorbell_size == 8) { in kq_rollback_packet()
|
| H A D | kfd_debug.h | 115 && dev->kfd->mec2_fw_version < 0x81b6) || in kfd_dbg_has_gws_support() 118 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 120 && dev->kfd->mec2_fw_version < 0x1b6) || in kfd_dbg_has_gws_support() 122 && dev->kfd->mec2_fw_version < 0x30) || in kfd_dbg_has_gws_support()
|
| H A D | kfd_priv.h | 318 struct kfd_dev *kfd; member 1124 size_t kfd_doorbell_process_slice(struct kfd_dev *kfd); 1125 int kfd_doorbell_init(struct kfd_dev *kfd); 1126 void kfd_doorbell_fini(struct kfd_dev *kfd); 1129 void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd, 1131 void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr); 1135 unsigned int kfd_get_doorbell_dw_offset_in_bar(struct kfd_dev *kfd, 1139 int kfd_alloc_process_doorbells(struct kfd_dev *kfd, 1141 void kfd_free_process_doorbells(struct kfd_dev *kfd, 1177 struct kfd_dev *dev = adev->kfd.dev; in kfd_node_by_irq_ids() [all …]
|
| H A D | kfd_debug.c | 442 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_clear_dev_address_watch() 454 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_clear_dev_address_watch() 479 if (!pdd->dev->kfd->shared_resources.enable_mes) { in kfd_dbg_trap_set_dev_address_watch() 499 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_dev_address_watch() 563 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 586 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_flags() 648 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_deactivate() 766 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_activate() 900 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_wave_launch_override() 932 if (!pdd->dev->kfd->shared_resources.enable_mes) in kfd_dbg_trap_set_wave_launch_mode() [all …]
|
| H A D | kfd_process.c | 1130 kfd_free_process_doorbells(pdd->dev->kfd, pdd); in kfd_process_destroy_pdds() 1132 if (pdd->dev->kfd->shared_resources.enable_mes && in kfd_process_destroy_pdds() 1145 atomic_dec(&pdd->dev->kfd->kfd_processes_count); in kfd_process_destroy_pdds() 1422 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || qpd->cwsr_base) in kfd_process_init_cwsr_apu() 1440 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); in kfd_process_init_cwsr_apu() 1465 if (!dev->kfd->cwsr_enabled || qpd->cwsr_kaddr || !qpd->cwsr_base) in kfd_process_device_init_cwsr_dgpu() 1478 memcpy(qpd->cwsr_kaddr, dev->kfd->cwsr_isa, dev->kfd->cwsr_isa_size); in kfd_process_device_init_cwsr_dgpu() 1495 if (!dev->kfd->cwsr_enabled || !qpd->cwsr_kaddr || !qpd->cwsr_base) in kfd_process_device_destroy_cwsr_dgpu() 1552 if (!amdgpu_sriov_xnack_support(dev->kfd->adev)) { in kfd_process_xnack_mode() 1568 if (dev->kfd->noretry) in kfd_process_xnack_mode() [all …]
|
| H A D | kfd_topology.c | 501 __ilog2_u32(dev->gpu->kfd->device_info.num_of_watch_points); in node_show() 527 dev->gpu->kfd->mec_fw_version); in node_show() 535 dev->gpu->kfd->sdma_fw_version); in node_show() 1231 if (!dev->gpu->kfd->pci_atomic_requested || in kfd_set_iolink_no_atomics() 1285 adev->aid_mask && num_xgmi_nodes && gpu->kfd->num_nodes == 1 && in kfd_set_recommended_sdma_engines() 1592 (dev->gpu->kfd->hive_id && in kfd_dev_create_p2p_links() 1593 dev->gpu->kfd->hive_id == new_dev->gpu->kfd->hive_id && in kfd_dev_create_p2p_links() 1948 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459 + 32768; in kfd_topology_set_dbg_firmware_support() 1955 firmware_supported = dev->gpu->kfd->mec_fw_version >= 459; in kfd_topology_set_dbg_firmware_support() 1958 firmware_supported = dev->gpu->kfd->mec_fw_version >= 60; in kfd_topology_set_dbg_firmware_support() [all …]
|
| H A D | kfd_process_queue_manager.c | 94 if (dev->kfd->shared_resources.enable_mes && !!pdd->proc_ctx_gpu_addr && in kfd_process_dequeue_from_device() 141 !dev->kfd->shared_resources.enable_mes) { in pqm_set_gws() 206 !dev->kfd->shared_resources.enable_mes) in pqm_clean_queue_resource() 212 if (dev->kfd->shared_resources.enable_mes) { in pqm_clean_queue_resource() 267 if (dev->kfd->shared_resources.enable_mes) { in init_user_queue() 351 max_queues = dev->kfd->device_info.max_no_of_hqd/2; in pqm_create_queue() 371 if (!pdd->proc_ctx_cpu_ptr && dev->kfd->shared_resources.enable_mes) { in pqm_create_queue() 460 pdd->dev->kfd->device_info.doorbell_size); in pqm_create_queue()
|
| H A D | kfd_mqd_manager_v10.c | 79 struct kfd_node *kfd = mm->dev; in allocate_mqd() local 82 if (kfd_gtt_sa_allocate(kfd, sizeof(struct v10_compute_mqd), in allocate_mqd() 132 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 221 if (mm->dev->kfd->cwsr_enabled) in update_mqd()
|
| H A D | kfd_mqd_manager_vi.c | 82 struct kfd_node *kfd = mm->dev; in allocate_mqd() local 85 if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd), in allocate_mqd() 139 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 230 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in __update_mqd()
|
| H A D | kfd_mqd_manager_v11.c | 172 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 260 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 393 if (mm->dev->kfd->shared_resources.enable_mes) in init_mqd_sdma() 544 if (dev->kfd->shared_resources.enable_mes) { in mqd_manager_init_v11()
|
| H A D | kfd_crat.c | 1644 kfd_fill_gpu_cache_info_from_gfx_config_v2(kdev->kfd, in kfd_get_gpu_cache_info() 1718 kfd_fill_gpu_cache_info_from_gfx_config(kdev->kfd, in kfd_get_gpu_cache_info() 2189 bool use_ta_info = kdev->kfd->num_nodes == 1; in kfd_fill_gpu_xgmi_link_to_gpu() 2215 bool is_single_hop = kdev->kfd == peer_kdev->kfd; in kfd_fill_gpu_xgmi_link_to_gpu() 2284 (cu_info->number / kdev->kfd->num_nodes); in kfd_create_vcrat_image_gpu() 2359 if (kdev->kfd->hive_id) { in kfd_create_vcrat_image_gpu() 2364 if (peer_dev->gpu->kfd->hive_id != kdev->kfd->hive_id) in kfd_create_vcrat_image_gpu()
|
| H A D | kfd_mqd_manager_v12_1.c | 58 cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes; in mqd_symmetrically_map_cu_mask_v12_1() 203 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 313 if (mm->dev->kfd->cwsr_enabled) in update_mqd() 472 if (mm->dev->kfd->cwsr_enabled && in init_mqd_v12_1()
|
| H A D | kfd_mqd_manager.c | 77 dev->kfd->device_info.num_sdma_queues_per_engine + in allocate_sdma_mqd() 114 cu_active_per_node = cu_info->number / mm->dev->kfd->num_nodes; in mqd_symmetrically_map_cu_mask()
|
| H A D | kfd_mqd_manager_v9.c | 44 if (mm->dev->kfd->cwsr_enabled && in mqd_stride_v9() 149 if (node->kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) { in allocate_mqd() 229 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) { in init_mqd() 321 if (mm->dev->kfd->cwsr_enabled && q->ctx_save_restore_area_address) in update_mqd() 718 if (mm->dev->kfd->cwsr_enabled && in init_mqd_v9_4_3()
|
| H A D | kfd_mqd_manager_cik.c | 79 struct kfd_node *kfd = mm->dev; in allocate_mqd() local 82 if (kfd_gtt_sa_allocate(kfd, sizeof(struct cik_mqd), in allocate_mqd()
|
| H A D | kfd_mqd_manager_v12.c | 147 if (mm->dev->kfd->cwsr_enabled) { in init_mqd() 235 if (mm->dev->kfd->cwsr_enabled) in update_mqd()
|
| /linux/drivers/gpu/drm/amd/amdgpu/ |
| H A D | amdgpu_amdkfd.c | 77 adev->kfd.dev = kgd2kfd_probe(adev, vf); in amdgpu_amdkfd_device_probe() 128 kfd.reset_work); in amdgpu_amdkfd_reset_work() 152 if (!adev->kfd.init_complete || adev->kfd.client.dev) in amdgpu_amdkfd_drm_client_create() 155 ret = drm_client_init(&adev->ddev, &adev->kfd.client, "kfd", in amdgpu_amdkfd_drm_client_create() 163 drm_client_register(&adev->kfd.client); in amdgpu_amdkfd_drm_client_create() 175 if (adev->kfd.dev) { in amdgpu_amdkfd_device_init() 226 adev->kfd.init_complete = kgd2kfd_device_init(adev->kfd.dev, in amdgpu_amdkfd_device_init() 231 INIT_WORK(&adev->kfd.reset_work, amdgpu_amdkfd_reset_work); in amdgpu_amdkfd_device_init() 237 if (adev->kfd.dev) { in amdgpu_amdkfd_device_fini_sw() 238 kgd2kfd_device_exit(adev->kfd.dev); in amdgpu_amdkfd_device_fini_sw() [all …]
|
| H A D | amdgpu_amdkfd_gpuvm.c | 239 if (adev->kfd.vram_used[xcp_id] + vram_needed > vram_available) { in amdgpu_amdkfd_reserve_mem_limit() 251 adev->kfd.vram_used[xcp_id] += vram_needed; in amdgpu_amdkfd_reserve_mem_limit() 252 adev->kfd.vram_used_aligned[xcp_id] += in amdgpu_amdkfd_reserve_mem_limit() 280 adev->kfd.vram_used[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 282 adev->kfd.vram_used_aligned[xcp_id] -= size; in amdgpu_amdkfd_unreserve_mem_limit() 286 adev->kfd.vram_used_aligned[xcp_id] -= in amdgpu_amdkfd_unreserve_mem_limit() 298 WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, in amdgpu_amdkfd_unreserve_mem_limit() 825 dmabuf = drm_gem_prime_handle_to_dmabuf(&bo_adev->ddev, bo_adev->kfd.client.file, in kfd_mem_export_dmabuf() 1677 - adev->kfd.vram_used_aligned[xcp_id]; in amdgpu_amdkfd_get_available_memory() 1680 - adev->kfd.vram_used_aligned[xcp_id] in amdgpu_amdkfd_get_available_memory() [all …]
|
| /linux/samples/bpf/ |
| H A D | task_fd_query_user.c | 234 int err = -1, res, kfd, efd; in test_debug_fs_uprobe() local 240 kfd = open(buf, O_WRONLY | O_TRUNC, 0); in test_debug_fs_uprobe() 241 CHECK_PERROR_RET(kfd < 0); in test_debug_fs_uprobe() 250 CHECK_PERROR_RET(write(kfd, buf, strlen(buf)) < 0); in test_debug_fs_uprobe() 252 close(kfd); in test_debug_fs_uprobe() 253 kfd = -1; in test_debug_fs_uprobe() 270 kfd = sys_perf_event_open(&attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); in test_debug_fs_uprobe() 271 link = bpf_program__attach_perf_event(progs[0], kfd); in test_debug_fs_uprobe() 275 close(kfd); in test_debug_fs_uprobe() 280 err = bpf_task_fd_query(getpid(), kfd, 0, buf, &len, in test_debug_fs_uprobe()
|
| /linux/tools/perf/ |
| H A D | builtin-probe.c | 442 int ret, ret2, ufd = -1, kfd = -1; in perf_del_probe_events() local 456 ret = probe_file__open_both(&kfd, &ufd, PF_FL_RW); in perf_del_probe_events() 467 ret = probe_file__get_events(kfd, filter, klist); in perf_del_probe_events() 472 ret = probe_file__del_strlist(kfd, klist); in perf_del_probe_events() 495 if (kfd >= 0) in perf_del_probe_events() 496 close(kfd); in perf_del_probe_events()
|
| /linux/tools/perf/util/ |
| H A D | probe-file.c | 153 int probe_file__open_both(int *kfd, int *ufd, int flag) in probe_file__open_both() argument 155 if (!kfd || !ufd) in probe_file__open_both() 158 *kfd = open_kprobe_events(flag & PF_FL_RW); in probe_file__open_both() 160 if (*kfd < 0 && *ufd < 0) { in probe_file__open_both() 161 print_both_open_warning(*kfd, *ufd, flag & PF_FL_RW); in probe_file__open_both() 162 return *kfd; in probe_file__open_both()
|