xref: /linux/drivers/accel/ivpu/ivpu_hw.h (revision 51c7960b87f465d01ea8d8ff174e81dd69f3b2b4)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2020-2025 Intel Corporation
4  */
5 
6 #ifndef __IVPU_HW_H__
7 #define __IVPU_HW_H__
8 
9 #include "ivpu_drv.h"
10 #include "ivpu_hw_btrs.h"
11 #include "ivpu_hw_ip.h"
12 
13 struct ivpu_addr_range {
14 	resource_size_t start;
15 	resource_size_t end;
16 };
17 
18 struct ivpu_hw_info {
19 	struct {
20 		bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
21 		bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
22 	} irq;
23 	struct {
24 		struct ivpu_addr_range global;
25 		struct ivpu_addr_range user;
26 		struct ivpu_addr_range shave;
27 		struct ivpu_addr_range dma;
28 	} ranges;
29 	struct {
30 		u8 min_ratio;
31 		u8 max_ratio;
32 		/*
33 		 * Pll ratio for the efficiency frequency. The VPU has optimum
34 		 * performance to power ratio at this frequency.
35 		 */
36 		u8 pn_ratio;
37 		u32 profiling_freq;
38 	} pll;
39 	struct {
40 		u32 grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
41 		u32 process_quantum[VPU_HWS_NUM_PRIORITY_BANDS];
42 		u32 process_grace_period[VPU_HWS_NUM_PRIORITY_BANDS];
43 	} hws;
44 	u32 tile_fuse;
45 	u32 sku;
46 	u16 config;
47 	int dma_bits;
48 	ktime_t d0i3_entry_host_ts;
49 	u64 d0i3_entry_vpu_ts;
50 	atomic_t firewall_irq_counter;
51 };
52 
53 int ivpu_hw_init(struct ivpu_device *vdev);
54 int ivpu_hw_power_up(struct ivpu_device *vdev);
55 int ivpu_hw_power_down(struct ivpu_device *vdev);
56 int ivpu_hw_reset(struct ivpu_device *vdev);
57 int ivpu_hw_boot_fw(struct ivpu_device *vdev);
58 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable);
59 void ivpu_irq_handlers_init(struct ivpu_device *vdev);
60 void ivpu_hw_irq_enable(struct ivpu_device *vdev);
61 void ivpu_hw_irq_disable(struct ivpu_device *vdev);
62 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
63 
ivpu_hw_btrs_irq_handler(struct ivpu_device * vdev,int irq)64 static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
65 {
66 	return vdev->hw->irq.btrs_irq_handler(vdev, irq);
67 }
68 
ivpu_hw_ip_irq_handler(struct ivpu_device * vdev,int irq)69 static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
70 {
71 	return vdev->hw->irq.ip_irq_handler(vdev, irq);
72 }
73 
ivpu_hw_range_init(struct ivpu_addr_range * range,u64 start,u64 size)74 static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
75 {
76 	range->start = start;
77 	range->end = start + size;
78 }
79 
ivpu_hw_range_size(const struct ivpu_addr_range * range)80 static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
81 {
82 	return range->end - range->start;
83 }
84 
ivpu_hw_dpu_max_freq_get(struct ivpu_device * vdev)85 static inline u32 ivpu_hw_dpu_max_freq_get(struct ivpu_device *vdev)
86 {
87 	return ivpu_hw_btrs_dpu_max_freq_get(vdev);
88 }
89 
ivpu_hw_dpu_freq_get(struct ivpu_device * vdev)90 static inline u32 ivpu_hw_dpu_freq_get(struct ivpu_device *vdev)
91 {
92 	return ivpu_hw_btrs_dpu_freq_get(vdev);
93 }
94 
ivpu_hw_irq_clear(struct ivpu_device * vdev)95 static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
96 {
97 	ivpu_hw_ip_irq_clear(vdev);
98 }
99 
ivpu_hw_profiling_freq_get(struct ivpu_device * vdev)100 static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
101 {
102 	return vdev->hw->pll.profiling_freq;
103 }
104 
ivpu_hw_diagnose_failure(struct ivpu_device * vdev)105 static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
106 {
107 	ivpu_hw_ip_diagnose_failure(vdev);
108 	ivpu_hw_btrs_diagnose_failure(vdev);
109 }
110 
ivpu_hw_telemetry_offset_get(struct ivpu_device * vdev)111 static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev)
112 {
113 	return ivpu_hw_btrs_telemetry_offset_get(vdev);
114 }
115 
ivpu_hw_telemetry_size_get(struct ivpu_device * vdev)116 static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev)
117 {
118 	return ivpu_hw_btrs_telemetry_size_get(vdev);
119 }
120 
ivpu_hw_telemetry_enable_get(struct ivpu_device * vdev)121 static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev)
122 {
123 	return ivpu_hw_btrs_telemetry_enable_get(vdev);
124 }
125 
ivpu_hw_is_idle(struct ivpu_device * vdev)126 static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
127 {
128 	return ivpu_hw_btrs_is_idle(vdev);
129 }
130 
ivpu_hw_wait_for_idle(struct ivpu_device * vdev)131 static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
132 {
133 	return ivpu_hw_btrs_wait_for_idle(vdev);
134 }
135 
ivpu_hw_ipc_tx_set(struct ivpu_device * vdev,u32 vpu_addr)136 static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
137 {
138 	ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr);
139 }
140 
ivpu_hw_db_set(struct ivpu_device * vdev,u32 db_id)141 static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id)
142 {
143 	ivpu_hw_ip_db_set(vdev, db_id);
144 }
145 
ivpu_hw_ipc_rx_addr_get(struct ivpu_device * vdev)146 static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev)
147 {
148 	return ivpu_hw_ip_ipc_rx_addr_get(vdev);
149 }
150 
ivpu_hw_ipc_rx_count_get(struct ivpu_device * vdev)151 static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev)
152 {
153 	return ivpu_hw_ip_ipc_rx_count_get(vdev);
154 }
155 
156 #endif /* __IVPU_HW_H__ */
157