1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #ifndef __IVPU_HW_H__
7 #define __IVPU_HW_H__
8
9 #include <linux/kfifo.h>
10
11 #include "ivpu_drv.h"
12 #include "ivpu_hw_btrs.h"
13 #include "ivpu_hw_ip.h"
14
15 #define IVPU_HW_IRQ_FIFO_LENGTH 1024
16
17 #define IVPU_HW_IRQ_SRC_IPC 1
18 #define IVPU_HW_IRQ_SRC_MMU_EVTQ 2
19 #define IVPU_HW_IRQ_SRC_DCT 3
20
21 struct ivpu_addr_range {
22 resource_size_t start;
23 resource_size_t end;
24 };
25
26 struct ivpu_hw_info {
27 struct {
28 bool (*btrs_irq_handler)(struct ivpu_device *vdev, int irq);
29 bool (*ip_irq_handler)(struct ivpu_device *vdev, int irq);
30 DECLARE_KFIFO(fifo, u8, IVPU_HW_IRQ_FIFO_LENGTH);
31 } irq;
32 struct {
33 struct ivpu_addr_range global;
34 struct ivpu_addr_range user;
35 struct ivpu_addr_range shave;
36 struct ivpu_addr_range dma;
37 } ranges;
38 struct {
39 u8 min_ratio;
40 u8 max_ratio;
41 /*
42 * Pll ratio for the efficiency frequency. The VPU has optimum
43 * performance to power ratio at this frequency.
44 */
45 u8 pn_ratio;
46 u32 profiling_freq;
47 } pll;
48 u32 tile_fuse;
49 u32 sched_mode;
50 u32 sku;
51 u16 config;
52 int dma_bits;
53 ktime_t d0i3_entry_host_ts;
54 u64 d0i3_entry_vpu_ts;
55 atomic_t firewall_irq_counter;
56 };
57
58 int ivpu_hw_init(struct ivpu_device *vdev);
59 int ivpu_hw_power_up(struct ivpu_device *vdev);
60 int ivpu_hw_power_down(struct ivpu_device *vdev);
61 int ivpu_hw_reset(struct ivpu_device *vdev);
62 int ivpu_hw_boot_fw(struct ivpu_device *vdev);
63 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable);
64 void ivpu_irq_handlers_init(struct ivpu_device *vdev);
65 void ivpu_hw_irq_enable(struct ivpu_device *vdev);
66 void ivpu_hw_irq_disable(struct ivpu_device *vdev);
67 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr);
68
ivpu_hw_btrs_irq_handler(struct ivpu_device * vdev,int irq)69 static inline u32 ivpu_hw_btrs_irq_handler(struct ivpu_device *vdev, int irq)
70 {
71 return vdev->hw->irq.btrs_irq_handler(vdev, irq);
72 }
73
ivpu_hw_ip_irq_handler(struct ivpu_device * vdev,int irq)74 static inline u32 ivpu_hw_ip_irq_handler(struct ivpu_device *vdev, int irq)
75 {
76 return vdev->hw->irq.ip_irq_handler(vdev, irq);
77 }
78
ivpu_hw_range_init(struct ivpu_addr_range * range,u64 start,u64 size)79 static inline void ivpu_hw_range_init(struct ivpu_addr_range *range, u64 start, u64 size)
80 {
81 range->start = start;
82 range->end = start + size;
83 }
84
ivpu_hw_range_size(const struct ivpu_addr_range * range)85 static inline u64 ivpu_hw_range_size(const struct ivpu_addr_range *range)
86 {
87 return range->end - range->start;
88 }
89
ivpu_hw_ratio_to_freq(struct ivpu_device * vdev,u32 ratio)90 static inline u32 ivpu_hw_ratio_to_freq(struct ivpu_device *vdev, u32 ratio)
91 {
92 return ivpu_hw_btrs_ratio_to_freq(vdev, ratio);
93 }
94
ivpu_hw_irq_clear(struct ivpu_device * vdev)95 static inline void ivpu_hw_irq_clear(struct ivpu_device *vdev)
96 {
97 ivpu_hw_ip_irq_clear(vdev);
98 }
99
ivpu_hw_pll_freq_get(struct ivpu_device * vdev)100 static inline u32 ivpu_hw_pll_freq_get(struct ivpu_device *vdev)
101 {
102 return ivpu_hw_btrs_pll_freq_get(vdev);
103 }
104
ivpu_hw_profiling_freq_get(struct ivpu_device * vdev)105 static inline u32 ivpu_hw_profiling_freq_get(struct ivpu_device *vdev)
106 {
107 return vdev->hw->pll.profiling_freq;
108 }
109
ivpu_hw_diagnose_failure(struct ivpu_device * vdev)110 static inline void ivpu_hw_diagnose_failure(struct ivpu_device *vdev)
111 {
112 ivpu_hw_ip_diagnose_failure(vdev);
113 ivpu_hw_btrs_diagnose_failure(vdev);
114 }
115
ivpu_hw_telemetry_offset_get(struct ivpu_device * vdev)116 static inline u32 ivpu_hw_telemetry_offset_get(struct ivpu_device *vdev)
117 {
118 return ivpu_hw_btrs_telemetry_offset_get(vdev);
119 }
120
ivpu_hw_telemetry_size_get(struct ivpu_device * vdev)121 static inline u32 ivpu_hw_telemetry_size_get(struct ivpu_device *vdev)
122 {
123 return ivpu_hw_btrs_telemetry_size_get(vdev);
124 }
125
ivpu_hw_telemetry_enable_get(struct ivpu_device * vdev)126 static inline u32 ivpu_hw_telemetry_enable_get(struct ivpu_device *vdev)
127 {
128 return ivpu_hw_btrs_telemetry_enable_get(vdev);
129 }
130
ivpu_hw_is_idle(struct ivpu_device * vdev)131 static inline bool ivpu_hw_is_idle(struct ivpu_device *vdev)
132 {
133 return ivpu_hw_btrs_is_idle(vdev);
134 }
135
ivpu_hw_wait_for_idle(struct ivpu_device * vdev)136 static inline int ivpu_hw_wait_for_idle(struct ivpu_device *vdev)
137 {
138 return ivpu_hw_btrs_wait_for_idle(vdev);
139 }
140
ivpu_hw_ipc_tx_set(struct ivpu_device * vdev,u32 vpu_addr)141 static inline void ivpu_hw_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
142 {
143 ivpu_hw_ip_ipc_tx_set(vdev, vpu_addr);
144 }
145
ivpu_hw_db_set(struct ivpu_device * vdev,u32 db_id)146 static inline void ivpu_hw_db_set(struct ivpu_device *vdev, u32 db_id)
147 {
148 ivpu_hw_ip_db_set(vdev, db_id);
149 }
150
ivpu_hw_ipc_rx_addr_get(struct ivpu_device * vdev)151 static inline u32 ivpu_hw_ipc_rx_addr_get(struct ivpu_device *vdev)
152 {
153 return ivpu_hw_ip_ipc_rx_addr_get(vdev);
154 }
155
ivpu_hw_ipc_rx_count_get(struct ivpu_device * vdev)156 static inline u32 ivpu_hw_ipc_rx_count_get(struct ivpu_device *vdev)
157 {
158 return ivpu_hw_ip_ipc_rx_count_get(vdev);
159 }
160
161 #endif /* __IVPU_HW_H__ */
162