Lines Matching refs:pdd
72 struct kfd_process_device *pdd;
75 pdd = kfd_process_device_data_by_id(p, gpu_id);
77 if (pdd)
78 return pdd;
84 static inline void kfd_unlock_pdd(struct kfd_process_device *pdd)
86 mutex_unlock(&pdd->process->mutex);
313 struct kfd_process_device *pdd;
329 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
330 if (!pdd) {
335 dev = pdd->dev;
337 pdd = kfd_bind_process_to_device(dev, p);
338 if (IS_ERR(pdd)) {
355 if (!pdd->qpd.proc_doorbells) {
356 err = kfd_alloc_process_doorbells(dev->kfd, pdd);
363 err = kfd_queue_acquire_buffers(pdd, &q_properties);
407 kfd_queue_unref_bo_vas(pdd, &q_properties);
408 kfd_queue_release_buffers(pdd, &q_properties);
570 struct kfd_process_device *pdd;
584 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
585 if (!pdd) {
591 pdd = kfd_bind_process_to_device(pdd->dev, p);
592 if (IS_ERR(pdd)) {
604 if (!pdd->dev->dqm->ops.set_cache_memory_policy(pdd->dev->dqm,
605 &pdd->qpd,
625 struct kfd_process_device *pdd;
629 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
630 if (!pdd) {
635 pdd = kfd_bind_process_to_device(pdd->dev, p);
636 if (IS_ERR(pdd)) {
641 kfd_process_set_trap_handler(&pdd->qpd, args->tba_addr, args->tma_addr);
679 struct kfd_process_device *pdd;
682 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
684 if (pdd)
686 args->gpu_clock_counter = amdgpu_amdkfd_get_gpu_clock_counter(pdd->dev->adev);
714 /* Run over all pdd of the process */
716 struct kfd_process_device *pdd = p->pdds[i];
720 pAperture->gpu_id = pdd->dev->id;
721 pAperture->lds_base = pdd->lds_base;
722 pAperture->lds_limit = pdd->lds_limit;
723 pAperture->gpuvm_base = pdd->gpuvm_base;
724 pAperture->gpuvm_limit = pdd->gpuvm_limit;
725 pAperture->scratch_base = pdd->scratch_base;
726 pAperture->scratch_limit = pdd->scratch_limit;
731 "gpu id %u\n", pdd->dev->id);
733 "lds_base %llX\n", pdd->lds_base);
735 "lds_limit %llX\n", pdd->lds_limit);
737 "gpuvm_base %llX\n", pdd->gpuvm_base);
739 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
741 "scratch_base %llX\n", pdd->scratch_base);
743 "scratch_limit %llX\n", pdd->scratch_limit);
790 /* Run over all pdd of the process */
792 struct kfd_process_device *pdd = p->pdds[i];
794 pa[i].gpu_id = pdd->dev->id;
795 pa[i].lds_base = pdd->lds_base;
796 pa[i].lds_limit = pdd->lds_limit;
797 pa[i].gpuvm_base = pdd->gpuvm_base;
798 pa[i].gpuvm_limit = pdd->gpuvm_limit;
799 pa[i].scratch_base = pdd->scratch_base;
800 pa[i].scratch_limit = pdd->scratch_limit;
803 "gpu id %u\n", pdd->dev->id);
805 "lds_base %llX\n", pdd->lds_base);
807 "lds_limit %llX\n", pdd->lds_limit);
809 "gpuvm_base %llX\n", pdd->gpuvm_base);
811 "gpuvm_limit %llX\n", pdd->gpuvm_limit);
813 "scratch_base %llX\n", pdd->scratch_base);
815 "scratch_limit %llX\n", pdd->scratch_limit);
898 struct kfd_process_device *pdd;
903 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
904 if (!pdd) {
908 dev = pdd->dev;
910 pdd = kfd_bind_process_to_device(dev, p);
911 if (IS_ERR(pdd)) {
912 err = PTR_ERR(pdd);
916 pdd->qpd.sh_hidden_private_base = args->va_addr;
921 pdd->qpd.vmid != 0 && dev->kfd2kgd->set_scratch_backing_va)
923 dev->adev, args->va_addr, pdd->qpd.vmid);
937 struct kfd_process_device *pdd;
942 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
944 if (!pdd)
947 amdgpu_amdkfd_get_tile_config(pdd->dev->adev, &config);
981 struct kfd_process_device *pdd;
990 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
991 if (!pdd) {
996 if (pdd->drm_file) {
997 ret = pdd->drm_file == drm_file ? 0 : -EBUSY;
1001 ret = kfd_process_device_init_vm(pdd, drm_file);
1042 struct kfd_process_device *pdd = kfd_lock_pdd_by_id(p, args->gpu_id);
1044 if (!pdd)
1046 args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev,
1047 pdd->dev->node_id);
1048 kfd_unlock_pdd(pdd);
1056 struct kfd_process_device *pdd;
1099 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1100 if (!pdd) {
1105 dev = pdd->dev;
1115 pdd = kfd_bind_process_to_device(dev, p);
1116 if (IS_ERR(pdd)) {
1117 err = PTR_ERR(pdd);
1126 offset = kfd_get_process_doorbells(pdd);
1145 pdd->drm_priv, (struct kgd_mem **) &mem, &offset,
1151 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1163 atomic64_add(PAGE_ALIGN(size), &pdd->vram_usage);
1182 pdd->drm_priv, NULL);
1194 struct kfd_process_device *pdd;
1210 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1211 if (!pdd) {
1218 pdd, GET_IDR_HANDLE(args->handle));
1224 ret = amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev,
1225 (struct kgd_mem *)mem, pdd->drm_priv, &size);
1232 pdd, GET_IDR_HANDLE(args->handle));
1234 atomic64_sub(size, &pdd->vram_usage);
1246 struct kfd_process_device *pdd, *peer_pdd;
1276 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1277 if (!pdd) {
1281 dev = pdd->dev;
1283 pdd = kfd_bind_process_to_device(dev, p);
1284 if (IS_ERR(pdd)) {
1285 err = PTR_ERR(pdd);
1289 mem = kfd_process_device_translate_handle(pdd,
1364 struct kfd_process_device *pdd, *peer_pdd;
1393 pdd = kfd_process_device_data_by_id(p, GET_GPU_ID(args->handle));
1394 if (!pdd) {
1399 mem = kfd_process_device_translate_handle(pdd,
1421 flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev->kfd);
1423 err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev,
1563 struct kfd_process_device *pdd;
1570 pdd = kfd_process_device_data_by_id(p, args->gpu_id);
1571 if (!pdd) {
1576 pdd = kfd_bind_process_to_device(pdd->dev, p);
1577 if (IS_ERR(pdd)) {
1578 r = PTR_ERR(pdd);
1582 r = amdgpu_amdkfd_gpuvm_import_dmabuf_fd(pdd->dev->adev, args->dmabuf_fd,
1583 args->va_addr, pdd->drm_priv,
1589 idr_handle = kfd_process_device_create_obj_handle(pdd, mem);
1602 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, (struct kgd_mem *)mem,
1603 pdd->drm_priv, NULL);
1613 struct kfd_process_device *pdd;
1625 pdd = kfd_get_process_device_data(dev, p);
1626 if (!pdd) {
1631 mem = kfd_process_device_translate_handle(pdd,
1666 struct kfd_process_device *pdd;
1670 pdd = kfd_process_device_data_by_id(p, args->gpuid);
1672 if (!pdd)
1675 return kfd_smi_event_open(pdd->dev, &args->anon_fd);
1794 struct kfd_process_device *pdd = p->pdds[i];
1796 device_buckets[i].user_gpu_id = pdd->user_gpu_id;
1797 device_buckets[i].actual_gpu_id = pdd->dev->id;
1834 struct kfd_process_device *pdd = p->pdds[i];
1838 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1841 if (!kgd_mem->va || kgd_mem->va > pdd->gpuvm_base)
1924 struct kfd_process_device *pdd = p->pdds[pdd_index];
1928 idr_for_each_entry(&pdd->alloc_idr, mem, id) {
1940 if (kgd_mem->va && kgd_mem->va <= pdd->gpuvm_base)
1946 bo_bucket->gpu_id = pdd->user_gpu_id;
1974 KFD_MMAP_GPU_ID(pdd->dev->id);
1978 KFD_MMAP_GPU_ID(pdd->dev->id);
2074 pr_err("No pdd for given process\n");
2220 struct kfd_process_device *pdd;
2239 pdd = kfd_get_process_device_data(dev, p);
2240 if (!pdd) {
2241 pr_err("Failed to get pdd for gpu_id = %x\n",
2246 pdd->user_gpu_id = device_buckets[i].user_gpu_id;
2256 if (pdd->drm_file) {
2261 /* create the vm using render nodes for kfd pdd */
2262 if (kfd_process_device_init_vm(pdd, drm_file)) {
2263 pr_err("could not init vm for given pdd\n");
2270 * pdd now already has the vm bound to render node so below api won't create a new
2274 pdd = kfd_bind_process_to_device(dev, p);
2275 if (IS_ERR(pdd)) {
2276 ret = PTR_ERR(pdd);
2280 if (!pdd->qpd.proc_doorbells) {
2281 ret = kfd_alloc_process_doorbells(dev->kfd, pdd);
2298 static int criu_restore_memory_of_gpu(struct kfd_process_device *pdd,
2310 kfd_doorbell_process_slice(pdd->dev->kfd))
2313 offset = kfd_get_process_doorbells(pdd);
2322 offset = pdd->dev->adev->rmmio_remap.bus_addr;
2331 ret = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(pdd->dev->adev, bo_bucket->addr,
2332 bo_bucket->size, pdd->drm_priv, kgd_mem,
2343 idr_handle = idr_alloc(&pdd->alloc_idr, *kgd_mem, bo_priv->idr_handle,
2348 amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->adev, *kgd_mem, pdd->drm_priv,
2354 bo_bucket->restored_offset = KFD_MMAP_TYPE_DOORBELL | KFD_MMAP_GPU_ID(pdd->dev->id);
2356 bo_bucket->restored_offset = KFD_MMAP_TYPE_MMIO | KFD_MMAP_GPU_ID(pdd->dev->id);
2362 atomic64_add(bo_bucket->size, &pdd->vram_usage);
2372 struct kfd_process_device *pdd;
2381 pdd = kfd_process_device_data_by_id(p, bo_bucket->gpu_id);
2382 if (!pdd) {
2383 pr_err("Failed to get pdd\n");
2387 ret = criu_restore_memory_of_gpu(pdd, bo_bucket, bo_priv, &kgd_mem);
2696 pr_err("No pdd for given process\n");
2774 struct kfd_process_device *pdd = p->pdds[i];
2776 if (pdd->qpd.queue_count)
2786 if (pdd->dev->kfd->shared_resources.enable_mes)
2787 kfd_dbg_set_mes_debug_mode(pdd, !kfd_dbg_has_cwsr_workaround(pdd->dev));
2796 struct kfd_process_device *pdd = p->pdds[i];
2798 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev)) {
2799 amdgpu_gfx_off_ctrl(pdd->dev->adev, false);
2800 pdd->dev->kfd2kgd->enable_debug_trap(
2801 pdd->dev->adev,
2803 pdd->dev->vm_info.last_vmid_kfd);
2804 } else if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2805 pdd->spi_dbg_override = pdd->dev->kfd2kgd->enable_debug_trap(
2806 pdd->dev->adev,
2858 struct kfd_process_device *pdd = p->pdds[i];
2860 if (!kfd_dbg_is_rlc_restore_supported(pdd->dev))
2861 amdgpu_gfx_off_ctrl(pdd->dev->adev, true);
2869 struct kfd_process_device *pdd = p->pdds[i];
2871 if (kfd_dbg_is_per_vmid_supported(pdd->dev)) {
2872 pdd->spi_dbg_override =
2873 pdd->dev->kfd2kgd->disable_debug_trap(
2874 pdd->dev->adev,
2876 pdd->dev->vm_info.last_vmid_kfd);
2878 if (!pdd->dev->kfd->shared_resources.enable_mes)
2879 debug_refresh_runlist(pdd->dev->dqm);
2881 kfd_dbg_set_mes_debug_mode(pdd,
2882 !kfd_dbg_has_cwsr_workaround(pdd->dev));
2914 struct kfd_process_device *pdd = NULL;
3000 pdd = kfd_process_device_data_by_id(target, user_gpu_id);
3001 if (user_gpu_id == -EINVAL || !pdd) {
3058 r = kfd_dbg_trap_set_dev_address_watch(pdd,
3065 r = kfd_dbg_trap_clear_dev_address_watch(pdd,