Home
last modified time | relevance | path

Searched refs:ptdev (Results 1 – 14 of 14) sorted by relevance

/linux/drivers/gpu/drm/panthor/
H A Dpanthor_device.c28 static int panthor_gpu_coherency_init(struct panthor_device *ptdev) in panthor_gpu_coherency_init() argument
30 ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT; in panthor_gpu_coherency_init()
32 if (!ptdev->coherent) in panthor_gpu_coherency_init()
38 if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) & in panthor_gpu_coherency_init()
42 drm_err(&ptdev->base, "Coherency not supported by the device"); in panthor_gpu_coherency_init()
46 static int panthor_clk_init(struct panthor_device *ptdev) in panthor_clk_init() argument
48 ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL); in panthor_clk_init()
49 if (IS_ERR(ptdev->clks.core)) in panthor_clk_init()
50 return dev_err_probe(ptdev->base.dev, in panthor_clk_init()
51 PTR_ERR(ptdev->clks.core), in panthor_clk_init()
[all …]
H A Dpanthor_pwr.c55 static void panthor_pwr_irq_handler(struct panthor_device *ptdev, u32 status) in panthor_pwr_irq_handler() argument
57 spin_lock(&ptdev->pwr->reqs_lock); in panthor_pwr_irq_handler()
58 gpu_write(ptdev, PWR_INT_CLEAR, status); in panthor_pwr_irq_handler()
61 drm_err(&ptdev->base, "PWR_IRQ: COMMAND_NOT_ALLOWED"); in panthor_pwr_irq_handler()
64 drm_err(&ptdev->base, "PWR_IRQ: COMMAND_INVALID"); in panthor_pwr_irq_handler()
66 if (status & ptdev->pwr->pending_reqs) { in panthor_pwr_irq_handler()
67 ptdev->pwr->pending_reqs &= ~status; in panthor_pwr_irq_handler()
68 wake_up_all(&ptdev->pwr->reqs_acked); in panthor_pwr_irq_handler()
70 spin_unlock(&ptdev->pwr->reqs_lock); in panthor_pwr_irq_handler()
74 static void panthor_pwr_write_command(struct panthor_device *ptdev, u32 command, u64 args) in panthor_pwr_write_command() argument
[all …]
H A Dpanthor_hw.c56 static char *get_gpu_model_name(struct panthor_device *ptdev) in get_gpu_model_name() argument
58 const u32 gpu_id = ptdev->gpu_info.gpu_id; in get_gpu_model_name()
61 const bool ray_intersection = !!(ptdev->gpu_info.gpu_features & in get_gpu_model_name()
63 const u8 shader_core_count = hweight64(ptdev->gpu_info.shader_present); in get_gpu_model_name()
112 static void panthor_gpu_info_init(struct panthor_device *ptdev) in panthor_gpu_info_init() argument
116 ptdev->gpu_info.csf_id = gpu_read(ptdev, GPU_CSF_ID); in panthor_gpu_info_init()
117 ptdev->gpu_info.gpu_rev = gpu_read(ptdev, GPU_REVID); in panthor_gpu_info_init()
118 ptdev->gpu_info.core_features = gpu_read(ptdev, GPU_CORE_FEATURES); in panthor_gpu_info_init()
119 ptdev->gpu_info.l2_features = gpu_read(ptdev, GPU_L2_FEATURES); in panthor_gpu_info_init()
120 ptdev->gpu_info.tiler_features = gpu_read(ptdev, GPU_TILER_FEATURES); in panthor_gpu_info_init()
[all …]
H A Dpanthor_gpu.c51 static void panthor_gpu_coherency_set(struct panthor_device *ptdev) in panthor_gpu_coherency_set() argument
53 gpu_write(ptdev, GPU_COHERENCY_PROTOCOL, in panthor_gpu_coherency_set()
54 ptdev->coherent ? GPU_COHERENCY_PROT_BIT(ACE_LITE) : GPU_COHERENCY_NONE); in panthor_gpu_coherency_set()
57 static void panthor_gpu_l2_config_set(struct panthor_device *ptdev) in panthor_gpu_l2_config_set() argument
59 const struct panthor_soc_data *data = ptdev->soc_data; in panthor_gpu_l2_config_set()
66 if (GPU_ARCH_MAJOR(ptdev->gpu_info.gpu_id) < 11) { in panthor_gpu_l2_config_set()
67 drm_err(&ptdev->base, "Custom ASN hash not supported by the device"); in panthor_gpu_l2_config_set()
72 gpu_write(ptdev, GPU_ASN_HASH(i), data->asn_hash[i]); in panthor_gpu_l2_config_set()
74 l2_config = gpu_read(ptdev, GPU_L2_CONFIG); in panthor_gpu_l2_config_set()
76 gpu_write(ptdev, GPU_L2_CONFIG, l2_config); in panthor_gpu_l2_config_set()
[all …]
H A Dpanthor_fw.c272 struct panthor_vm *panthor_fw_vm(struct panthor_device *ptdev) in panthor_fw_vm() argument
274 return ptdev->fw->vm; in panthor_fw_vm()
284 panthor_fw_get_glb_iface(struct panthor_device *ptdev) in panthor_fw_get_glb_iface() argument
286 return &ptdev->fw->iface.global; in panthor_fw_get_glb_iface()
297 panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot) in panthor_fw_get_csg_iface() argument
299 if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS)) in panthor_fw_get_csg_iface()
302 return &ptdev->fw->iface.groups[csg_slot]; in panthor_fw_get_csg_iface()
314 panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot) in panthor_fw_get_cs_iface() argument
316 if (drm_WARN_ON(&ptdev->base, csg_slot >= MAX_CSGS || cs_slot >= MAX_CS_PER_CSG)) in panthor_fw_get_cs_iface()
319 return &ptdev->fw->iface.streams[csg_slot][cs_slot]; in panthor_fw_get_cs_iface()
[all …]
H A Dpanthor_device.h71 struct panthor_device *ptdev; member
250 struct panthor_device *ptdev; member
280 int panthor_device_init(struct panthor_device *ptdev);
281 void panthor_device_unplug(struct panthor_device *ptdev);
286 static inline void panthor_device_schedule_reset(struct panthor_device *ptdev) in panthor_device_schedule_reset() argument
288 if (!atomic_cmpxchg(&ptdev->reset.pending, 0, 1) && in panthor_device_schedule_reset()
289 atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE) in panthor_device_schedule_reset()
290 queue_work(ptdev->reset.wq, &ptdev->reset.work); in panthor_device_schedule_reset()
298 static inline bool panthor_device_reset_is_pending(struct panthor_device *ptdev) in panthor_device_reset_is_pending() argument
300 return atomic_read(&ptdev->reset.pending) != 0; in panthor_device_reset_is_pending()
[all …]
H A Dpanthor_mmu.c250 struct panthor_device *ptdev; member
433 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); in alloc_pt()
434 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), in alloc_pt()
444 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in alloc_pt()
450 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || in alloc_pt()
451 drm_WARN_ON(&vm->ptdev->base, in alloc_pt()
486 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) in free_pt()
493 static int wait_ready(struct panthor_device *ptdev, u32 as_nr) in wait_ready() argument
501 ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val, in wait_ready()
506 panthor_device_schedule_reset(ptdev); in wait_ready()
[all …]
H A Dpanthor_hw.h15 int (*soft_reset)(struct panthor_device *ptdev);
18 void (*l2_power_off)(struct panthor_device *ptdev);
21 int (*l2_power_on)(struct panthor_device *ptdev);
34 int panthor_hw_init(struct panthor_device *ptdev);
36 static inline int panthor_hw_soft_reset(struct panthor_device *ptdev) in panthor_hw_soft_reset() argument
38 return ptdev->hw->ops.soft_reset(ptdev); in panthor_hw_soft_reset()
41 static inline int panthor_hw_l2_power_on(struct panthor_device *ptdev) in panthor_hw_l2_power_on() argument
43 return ptdev->hw->ops.l2_power_on(ptdev); in panthor_hw_l2_power_on()
46 static inline void panthor_hw_l2_power_off(struct panthor_device *ptdev) in panthor_hw_l2_power_off() argument
48 ptdev->hw->ops.l2_power_off(ptdev); in panthor_hw_l2_power_off()
[all …]
H A Dpanthor_sched.c154 struct panthor_device *ptdev; member
543 struct panthor_device *ptdev; member
742 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
756 !panthor_device_reset_is_pending((sched)->ptdev)) \
772 !panthor_device_reset_is_pending((sched)->ptdev)) \
876 struct panthor_device *ptdev = group->ptdev; in panthor_queue_get_syncwait_obj() local
887 if (drm_WARN_ON(&ptdev->base, IS_ERR_OR_NULL(bo))) in panthor_queue_get_syncwait_obj()
892 if (drm_WARN_ON(&ptdev->base, ret)) in panthor_queue_get_syncwait_obj()
896 if (drm_WARN_ON(&ptdev->base, !queue->syncwait.kmap)) in panthor_queue_get_syncwait_obj()
912 drm_WARN_ON(&group->ptdev->base, in group_free_queue()
[all …]
H A Dpanthor_gpu.h12 int panthor_gpu_init(struct panthor_device *ptdev);
13 void panthor_gpu_unplug(struct panthor_device *ptdev);
14 void panthor_gpu_suspend(struct panthor_device *ptdev);
15 void panthor_gpu_resume(struct panthor_device *ptdev);
17 int panthor_gpu_block_power_on(struct panthor_device *ptdev,
21 int panthor_gpu_block_power_off(struct panthor_device *ptdev,
31 #define panthor_gpu_power_on(ptdev, type, mask, timeout_us) \ argument
32 panthor_gpu_block_power_on(ptdev, #type, \
43 #define panthor_gpu_power_off(ptdev, type, mask, timeout_us) \ argument
44 panthor_gpu_block_power_off(ptdev, #type, \
[all …]
H A Dpanthor_pwr.h9 void panthor_pwr_unplug(struct panthor_device *ptdev);
11 int panthor_pwr_init(struct panthor_device *ptdev);
13 int panthor_pwr_reset_soft(struct panthor_device *ptdev);
15 void panthor_pwr_l2_power_off(struct panthor_device *ptdev);
17 int panthor_pwr_l2_power_on(struct panthor_device *ptdev);
19 void panthor_pwr_suspend(struct panthor_device *ptdev);
21 void panthor_pwr_resume(struct panthor_device *ptdev);
H A Dpanthor_fw.h479 panthor_fw_get_glb_iface(struct panthor_device *ptdev);
482 panthor_fw_get_csg_iface(struct panthor_device *ptdev, u32 csg_slot);
485 panthor_fw_get_cs_iface(struct panthor_device *ptdev, u32 csg_slot, u32 cs_slot);
487 u64 panthor_fw_csg_endpoint_req_get(struct panthor_device *ptdev,
490 void panthor_fw_csg_endpoint_req_set(struct panthor_device *ptdev,
493 void panthor_fw_csg_endpoint_req_update(struct panthor_device *ptdev,
497 int panthor_fw_csg_wait_acks(struct panthor_device *ptdev, u32 csg_id, u32 req_mask,
500 int panthor_fw_glb_wait_acks(struct panthor_device *ptdev, u32 req_mask, u32 *acked,
503 void panthor_fw_ring_csg_doorbells(struct panthor_device *ptdev, u32 csg_slot);
506 panthor_fw_alloc_queue_iface_mem(struct panthor_device *ptdev,
[all …]
H A Dpanthor_mmu.h19 int panthor_mmu_init(struct panthor_device *ptdev);
20 void panthor_mmu_unplug(struct panthor_device *ptdev);
21 void panthor_mmu_pre_reset(struct panthor_device *ptdev);
22 void panthor_mmu_post_reset(struct panthor_device *ptdev);
23 void panthor_mmu_suspend(struct panthor_device *ptdev);
24 void panthor_mmu_resume(struct panthor_device *ptdev);
44 struct panthor_vm *panthor_vm_create(struct panthor_device *ptdev, bool for_mcu,
62 int panthor_vm_pool_create_vm(struct panthor_device *ptdev,
H A Dpanthor_gem.c27 struct panthor_device *ptdev = container_of(bo->base.base.dev, in panthor_gem_debugfs_bo_add() local
33 mutex_lock(&ptdev->gems.lock); in panthor_gem_debugfs_bo_add()
34 list_add_tail(&bo->debugfs.node, &ptdev->gems.node); in panthor_gem_debugfs_bo_add()
35 mutex_unlock(&ptdev->gems.lock); in panthor_gem_debugfs_bo_add()
40 struct panthor_device *ptdev = container_of(bo->base.base.dev, in panthor_gem_debugfs_bo_rm() local
46 mutex_lock(&ptdev->gems.lock); in panthor_gem_debugfs_bo_rm()
48 mutex_unlock(&ptdev->gems.lock); in panthor_gem_debugfs_bo_rm()
122 panthor_kernel_bo_create(struct panthor_device *ptdev, struct panthor_vm *vm, in panthor_kernel_bo_create() argument
132 if (drm_WARN_ON(&ptdev->base, !vm)) in panthor_kernel_bo_create()
139 obj = drm_gem_shmem_create(&ptdev->base, size); in panthor_kernel_bo_create()
[all …]