1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_fw.h"
8 #include "ivpu_gem.h"
9 #include "ivpu_hw.h"
10 #include "ivpu_hw_37xx_reg.h"
11 #include "ivpu_hw_40xx_reg.h"
12 #include "ivpu_hw_btrs.h"
13 #include "ivpu_hw_ip.h"
14 #include "ivpu_hw_reg_io.h"
15 #include "ivpu_mmu.h"
16 #include "ivpu_pm.h"
17
18 #define PWR_ISLAND_STATUS_TIMEOUT_US (5 * USEC_PER_MSEC)
19
20 #define TIM_SAFE_ENABLE 0xf1d0dead
21 #define TIM_WATCHDOG_RESET_VALUE 0xffffffff
22
23 #define ICB_0_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
24 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
25 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
26 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
27 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
28 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
29 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
30
31 #define ICB_1_IRQ_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
32 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
33 (REG_FLD(VPU_37XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
34
35 #define ICB_0_1_IRQ_MASK_37XX ((((u64)ICB_1_IRQ_MASK_37XX) << 32) | ICB_0_IRQ_MASK_37XX)
36
37 #define ICB_0_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT)) | \
38 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT)) | \
39 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT)) | \
40 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT)) | \
41 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT)) | \
42 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT)) | \
43 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT)))
44
45 #define ICB_1_IRQ_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_2_INT)) | \
46 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_3_INT)) | \
47 (REG_FLD(VPU_40XX_HOST_SS_ICB_STATUS_1, CPU_INT_REDIRECT_4_INT)))
48
49 #define ICB_0_1_IRQ_MASK_40XX ((((u64)ICB_1_IRQ_MASK_40XX) << 32) | ICB_0_IRQ_MASK_40XX)
50
51 #define ITF_FIREWALL_VIOLATION_MASK_37XX ((REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
52 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
53 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
54 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
55 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
56 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
57 (REG_FLD(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
58
59 #define ITF_FIREWALL_VIOLATION_MASK_40XX ((REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_ROM_CMX)) | \
60 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_DBG)) | \
61 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, CSS_CTRL)) | \
62 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, DEC400)) | \
63 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_NCE)) | \
64 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI)) | \
65 (REG_FLD(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, MSS_MBI_CMX)))
66
wait_for_ip_bar(struct ivpu_device * vdev)67 static int wait_for_ip_bar(struct ivpu_device *vdev)
68 {
69 return REGV_POLL_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, AON, 0, 100);
70 }
71
host_ss_rst_clr(struct ivpu_device * vdev)72 static void host_ss_rst_clr(struct ivpu_device *vdev)
73 {
74 u32 val = 0;
75
76 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, TOP_NOC, val);
77 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, DSS_MAS, val);
78 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_CLR, MSS_MAS, val);
79
80 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_CLR, val);
81 }
82
host_ss_noc_qreqn_check_37xx(struct ivpu_device * vdev,u32 exp_val)83 static int host_ss_noc_qreqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
84 {
85 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
86
87 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
88 return -EIO;
89
90 return 0;
91 }
92
host_ss_noc_qreqn_check_40xx(struct ivpu_device * vdev,u32 exp_val)93 static int host_ss_noc_qreqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
94 {
95 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
96
97 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, exp_val, val))
98 return -EIO;
99
100 return 0;
101 }
102
host_ss_noc_qreqn_check(struct ivpu_device * vdev,u32 exp_val)103 static int host_ss_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
104 {
105 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
106 return host_ss_noc_qreqn_check_37xx(vdev, exp_val);
107 else
108 return host_ss_noc_qreqn_check_40xx(vdev, exp_val);
109 }
110
host_ss_noc_qacceptn_check_37xx(struct ivpu_device * vdev,u32 exp_val)111 static int host_ss_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
112 {
113 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QACCEPTN);
114
115 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
116 return -EIO;
117
118 return 0;
119 }
120
host_ss_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)121 static int host_ss_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
122 {
123 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QACCEPTN);
124
125 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QACCEPTN, TOP_SOCMMIO, exp_val, val))
126 return -EIO;
127
128 return 0;
129 }
130
host_ss_noc_qacceptn_check(struct ivpu_device * vdev,u32 exp_val)131 static int host_ss_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
132 {
133 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
134 return host_ss_noc_qacceptn_check_37xx(vdev, exp_val);
135 else
136 return host_ss_noc_qacceptn_check_40xx(vdev, exp_val);
137 }
138
host_ss_noc_qdeny_check_37xx(struct ivpu_device * vdev,u32 exp_val)139 static int host_ss_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
140 {
141 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QDENY);
142
143 if (!REG_TEST_FLD_NUM(VPU_37XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
144 return -EIO;
145
146 return 0;
147 }
148
host_ss_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)149 static int host_ss_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
150 {
151 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QDENY);
152
153 if (!REG_TEST_FLD_NUM(VPU_40XX_HOST_SS_NOC_QDENY, TOP_SOCMMIO, exp_val, val))
154 return -EIO;
155
156 return 0;
157 }
158
host_ss_noc_qdeny_check(struct ivpu_device * vdev,u32 exp_val)159 static int host_ss_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
160 {
161 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
162 return host_ss_noc_qdeny_check_37xx(vdev, exp_val);
163 else
164 return host_ss_noc_qdeny_check_40xx(vdev, exp_val);
165 }
166
top_noc_qrenqn_check_37xx(struct ivpu_device * vdev,u32 exp_val)167 static int top_noc_qrenqn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
168 {
169 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
170
171 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
172 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
173 return -EIO;
174
175 return 0;
176 }
177
top_noc_qrenqn_check_40xx(struct ivpu_device * vdev,u32 exp_val)178 static int top_noc_qrenqn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
179 {
180 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
181
182 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, exp_val, val) ||
183 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, exp_val, val))
184 return -EIO;
185
186 return 0;
187 }
188
top_noc_qreqn_check(struct ivpu_device * vdev,u32 exp_val)189 static int top_noc_qreqn_check(struct ivpu_device *vdev, u32 exp_val)
190 {
191 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
192 return top_noc_qrenqn_check_37xx(vdev, exp_val);
193 else
194 return top_noc_qrenqn_check_40xx(vdev, exp_val);
195 }
196
ivpu_hw_ip_host_ss_configure(struct ivpu_device * vdev)197 int ivpu_hw_ip_host_ss_configure(struct ivpu_device *vdev)
198 {
199 int ret;
200
201 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
202 ret = wait_for_ip_bar(vdev);
203 if (ret) {
204 ivpu_err(vdev, "Timed out waiting for NPU IP bar\n");
205 return ret;
206 }
207 host_ss_rst_clr(vdev);
208 }
209
210 ret = host_ss_noc_qreqn_check(vdev, 0x0);
211 if (ret) {
212 ivpu_err(vdev, "Failed qreqn check: %d\n", ret);
213 return ret;
214 }
215
216 ret = host_ss_noc_qacceptn_check(vdev, 0x0);
217 if (ret) {
218 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
219 return ret;
220 }
221
222 ret = host_ss_noc_qdeny_check(vdev, 0x0);
223 if (ret)
224 ivpu_err(vdev, "Failed qdeny check %d\n", ret);
225
226 return ret;
227 }
228
idle_gen_drive_37xx(struct ivpu_device * vdev,bool enable)229 static void idle_gen_drive_37xx(struct ivpu_device *vdev, bool enable)
230 {
231 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN);
232
233 if (enable)
234 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
235 else
236 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, EN, val);
237
238 REGV_WR32(VPU_37XX_HOST_SS_AON_VPU_IDLE_GEN, val);
239 }
240
idle_gen_drive_40xx(struct ivpu_device * vdev,bool enable)241 static void idle_gen_drive_40xx(struct ivpu_device *vdev, bool enable)
242 {
243 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_IDLE_GEN);
244
245 if (enable)
246 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
247 else
248 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_IDLE_GEN, EN, val);
249
250 REGV_WR32(VPU_40XX_HOST_SS_AON_IDLE_GEN, val);
251 }
252
ivpu_hw_ip_idle_gen_enable(struct ivpu_device * vdev)253 void ivpu_hw_ip_idle_gen_enable(struct ivpu_device *vdev)
254 {
255 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
256 idle_gen_drive_37xx(vdev, true);
257 else
258 idle_gen_drive_40xx(vdev, true);
259 }
260
ivpu_hw_ip_idle_gen_disable(struct ivpu_device * vdev)261 void ivpu_hw_ip_idle_gen_disable(struct ivpu_device *vdev)
262 {
263 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
264 idle_gen_drive_37xx(vdev, false);
265 else
266 idle_gen_drive_40xx(vdev, false);
267 }
268
269 static void
pwr_island_delay_set_50xx(struct ivpu_device * vdev,u32 post,u32 post1,u32 post2,u32 status)270 pwr_island_delay_set_50xx(struct ivpu_device *vdev, u32 post, u32 post1, u32 post2, u32 status)
271 {
272 u32 val;
273
274 val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY);
275 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST_DLY, post, val);
276 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST1_DLY, post1, val);
277 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, POST2_DLY, post2, val);
278 REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_EN_POST_DLY, val);
279
280 val = REGV_RD32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY);
281 val = REG_SET_FLD_NUM(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, STATUS_DLY, status, val);
282 REGV_WR32(VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY, val);
283 }
284
pwr_island_trickle_drive_37xx(struct ivpu_device * vdev,bool enable)285 static void pwr_island_trickle_drive_37xx(struct ivpu_device *vdev, bool enable)
286 {
287 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
288
289 if (enable)
290 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
291 else
292 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, MSS_CPU, val);
293
294 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
295 }
296
pwr_island_trickle_drive_40xx(struct ivpu_device * vdev,bool enable)297 static void pwr_island_trickle_drive_40xx(struct ivpu_device *vdev, bool enable)
298 {
299 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0);
300
301 if (enable)
302 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
303 else
304 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, CSS_CPU, val);
305
306 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_TRICKLE_EN0, val);
307 }
308
pwr_island_drive_37xx(struct ivpu_device * vdev,bool enable)309 static void pwr_island_drive_37xx(struct ivpu_device *vdev, bool enable)
310 {
311 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0);
312
313 if (enable)
314 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
315 else
316 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, CSS_CPU, val);
317
318 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
319 }
320
pwr_island_drive_40xx(struct ivpu_device * vdev,bool enable)321 static void pwr_island_drive_40xx(struct ivpu_device *vdev, bool enable)
322 {
323 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0);
324
325 if (enable)
326 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
327 else
328 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, MSS_CPU, val);
329
330 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISLAND_EN0, val);
331 }
332
pwr_island_enable(struct ivpu_device * vdev)333 static void pwr_island_enable(struct ivpu_device *vdev)
334 {
335 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
336 pwr_island_trickle_drive_37xx(vdev, true);
337 ndelay(500);
338 pwr_island_drive_37xx(vdev, true);
339 } else {
340 pwr_island_trickle_drive_40xx(vdev, true);
341 ndelay(500);
342 pwr_island_drive_40xx(vdev, true);
343 }
344 }
345
wait_for_pwr_island_status(struct ivpu_device * vdev,u32 exp_val)346 static int wait_for_pwr_island_status(struct ivpu_device *vdev, u32 exp_val)
347 {
348 if (IVPU_WA(punit_disabled))
349 return 0;
350
351 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
352 return REGV_POLL_FLD(VPU_37XX_HOST_SS_AON_PWR_ISLAND_STATUS0, MSS_CPU, exp_val,
353 PWR_ISLAND_STATUS_TIMEOUT_US);
354 else
355 return REGV_POLL_FLD(VPU_40XX_HOST_SS_AON_PWR_ISLAND_STATUS0, CSS_CPU, exp_val,
356 PWR_ISLAND_STATUS_TIMEOUT_US);
357 }
358
pwr_island_isolation_drive_37xx(struct ivpu_device * vdev,bool enable)359 static void pwr_island_isolation_drive_37xx(struct ivpu_device *vdev, bool enable)
360 {
361 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0);
362
363 if (enable)
364 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
365 else
366 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, MSS_CPU, val);
367
368 REGV_WR32(VPU_37XX_HOST_SS_AON_PWR_ISO_EN0, val);
369 }
370
pwr_island_isolation_drive_40xx(struct ivpu_device * vdev,bool enable)371 static void pwr_island_isolation_drive_40xx(struct ivpu_device *vdev, bool enable)
372 {
373 u32 val = REGV_RD32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0);
374
375 if (enable)
376 val = REG_SET_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
377 else
378 val = REG_CLR_FLD(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, CSS_CPU, val);
379
380 REGV_WR32(VPU_40XX_HOST_SS_AON_PWR_ISO_EN0, val);
381 }
382
pwr_island_isolation_drive(struct ivpu_device * vdev,bool enable)383 static void pwr_island_isolation_drive(struct ivpu_device *vdev, bool enable)
384 {
385 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
386 pwr_island_isolation_drive_37xx(vdev, enable);
387 else
388 pwr_island_isolation_drive_40xx(vdev, enable);
389 }
390
pwr_island_isolation_disable(struct ivpu_device * vdev)391 static void pwr_island_isolation_disable(struct ivpu_device *vdev)
392 {
393 pwr_island_isolation_drive(vdev, false);
394 }
395
host_ss_clk_drive_37xx(struct ivpu_device * vdev,bool enable)396 static void host_ss_clk_drive_37xx(struct ivpu_device *vdev, bool enable)
397 {
398 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_CLK_SET);
399
400 if (enable) {
401 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
402 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
403 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
404 } else {
405 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, TOP_NOC, val);
406 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, DSS_MAS, val);
407 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_CLK_SET, MSS_MAS, val);
408 }
409
410 REGV_WR32(VPU_37XX_HOST_SS_CPR_CLK_SET, val);
411 }
412
host_ss_clk_drive_40xx(struct ivpu_device * vdev,bool enable)413 static void host_ss_clk_drive_40xx(struct ivpu_device *vdev, bool enable)
414 {
415 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_CLK_EN);
416
417 if (enable) {
418 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
419 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
420 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
421 } else {
422 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, TOP_NOC, val);
423 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, DSS_MAS, val);
424 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_CLK_EN, CSS_MAS, val);
425 }
426
427 REGV_WR32(VPU_40XX_HOST_SS_CPR_CLK_EN, val);
428 }
429
host_ss_clk_drive(struct ivpu_device * vdev,bool enable)430 static void host_ss_clk_drive(struct ivpu_device *vdev, bool enable)
431 {
432 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
433 host_ss_clk_drive_37xx(vdev, enable);
434 else
435 host_ss_clk_drive_40xx(vdev, enable);
436 }
437
host_ss_clk_enable(struct ivpu_device * vdev)438 static void host_ss_clk_enable(struct ivpu_device *vdev)
439 {
440 host_ss_clk_drive(vdev, true);
441 }
442
host_ss_rst_drive_37xx(struct ivpu_device * vdev,bool enable)443 static void host_ss_rst_drive_37xx(struct ivpu_device *vdev, bool enable)
444 {
445 u32 val = REGV_RD32(VPU_37XX_HOST_SS_CPR_RST_SET);
446
447 if (enable) {
448 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
449 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
450 val = REG_SET_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
451 } else {
452 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, TOP_NOC, val);
453 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, DSS_MAS, val);
454 val = REG_CLR_FLD(VPU_37XX_HOST_SS_CPR_RST_SET, MSS_MAS, val);
455 }
456
457 REGV_WR32(VPU_37XX_HOST_SS_CPR_RST_SET, val);
458 }
459
host_ss_rst_drive_40xx(struct ivpu_device * vdev,bool enable)460 static void host_ss_rst_drive_40xx(struct ivpu_device *vdev, bool enable)
461 {
462 u32 val = REGV_RD32(VPU_40XX_HOST_SS_CPR_RST_EN);
463
464 if (enable) {
465 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
466 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
467 val = REG_SET_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
468 } else {
469 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, TOP_NOC, val);
470 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, DSS_MAS, val);
471 val = REG_CLR_FLD(VPU_40XX_HOST_SS_CPR_RST_EN, CSS_MAS, val);
472 }
473
474 REGV_WR32(VPU_40XX_HOST_SS_CPR_RST_EN, val);
475 }
476
host_ss_rst_drive(struct ivpu_device * vdev,bool enable)477 static void host_ss_rst_drive(struct ivpu_device *vdev, bool enable)
478 {
479 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
480 host_ss_rst_drive_37xx(vdev, enable);
481 else
482 host_ss_rst_drive_40xx(vdev, enable);
483 }
484
host_ss_rst_enable(struct ivpu_device * vdev)485 static void host_ss_rst_enable(struct ivpu_device *vdev)
486 {
487 host_ss_rst_drive(vdev, true);
488 }
489
host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device * vdev,bool enable)490 static void host_ss_noc_qreqn_top_socmmio_drive_37xx(struct ivpu_device *vdev, bool enable)
491 {
492 u32 val = REGV_RD32(VPU_37XX_HOST_SS_NOC_QREQN);
493
494 if (enable)
495 val = REG_SET_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
496 else
497 val = REG_CLR_FLD(VPU_37XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
498 REGV_WR32(VPU_37XX_HOST_SS_NOC_QREQN, val);
499 }
500
host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device * vdev,bool enable)501 static void host_ss_noc_qreqn_top_socmmio_drive_40xx(struct ivpu_device *vdev, bool enable)
502 {
503 u32 val = REGV_RD32(VPU_40XX_HOST_SS_NOC_QREQN);
504
505 if (enable)
506 val = REG_SET_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
507 else
508 val = REG_CLR_FLD(VPU_40XX_HOST_SS_NOC_QREQN, TOP_SOCMMIO, val);
509 REGV_WR32(VPU_40XX_HOST_SS_NOC_QREQN, val);
510 }
511
host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device * vdev,bool enable)512 static void host_ss_noc_qreqn_top_socmmio_drive(struct ivpu_device *vdev, bool enable)
513 {
514 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
515 host_ss_noc_qreqn_top_socmmio_drive_37xx(vdev, enable);
516 else
517 host_ss_noc_qreqn_top_socmmio_drive_40xx(vdev, enable);
518 }
519
host_ss_axi_drive(struct ivpu_device * vdev,bool enable)520 static int host_ss_axi_drive(struct ivpu_device *vdev, bool enable)
521 {
522 int ret;
523
524 host_ss_noc_qreqn_top_socmmio_drive(vdev, enable);
525
526 ret = host_ss_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
527 if (ret) {
528 ivpu_err(vdev, "Failed HOST SS NOC QACCEPTN check: %d\n", ret);
529 return ret;
530 }
531
532 ret = host_ss_noc_qdeny_check(vdev, 0x0);
533 if (ret)
534 ivpu_err(vdev, "Failed HOST SS NOC QDENY check: %d\n", ret);
535
536 return ret;
537 }
538
top_noc_qreqn_drive_40xx(struct ivpu_device * vdev,bool enable)539 static void top_noc_qreqn_drive_40xx(struct ivpu_device *vdev, bool enable)
540 {
541 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QREQN);
542
543 if (enable) {
544 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
545 val = REG_SET_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
546 } else {
547 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, CPU_CTRL, val);
548 val = REG_CLR_FLD(VPU_40XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
549 }
550
551 REGV_WR32(VPU_40XX_TOP_NOC_QREQN, val);
552 }
553
top_noc_qreqn_drive_37xx(struct ivpu_device * vdev,bool enable)554 static void top_noc_qreqn_drive_37xx(struct ivpu_device *vdev, bool enable)
555 {
556 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QREQN);
557
558 if (enable) {
559 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
560 val = REG_SET_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
561 } else {
562 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, CPU_CTRL, val);
563 val = REG_CLR_FLD(VPU_37XX_TOP_NOC_QREQN, HOSTIF_L2CACHE, val);
564 }
565
566 REGV_WR32(VPU_37XX_TOP_NOC_QREQN, val);
567 }
568
top_noc_qreqn_drive(struct ivpu_device * vdev,bool enable)569 static void top_noc_qreqn_drive(struct ivpu_device *vdev, bool enable)
570 {
571 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
572 top_noc_qreqn_drive_37xx(vdev, enable);
573 else
574 top_noc_qreqn_drive_40xx(vdev, enable);
575 }
576
ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device * vdev)577 int ivpu_hw_ip_host_ss_axi_enable(struct ivpu_device *vdev)
578 {
579 return host_ss_axi_drive(vdev, true);
580 }
581
top_noc_qacceptn_check_37xx(struct ivpu_device * vdev,u32 exp_val)582 static int top_noc_qacceptn_check_37xx(struct ivpu_device *vdev, u32 exp_val)
583 {
584 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QACCEPTN);
585
586 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
587 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
588 return -EIO;
589
590 return 0;
591 }
592
top_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)593 static int top_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
594 {
595 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QACCEPTN);
596
597 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, CPU_CTRL, exp_val, val) ||
598 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QACCEPTN, HOSTIF_L2CACHE, exp_val, val))
599 return -EIO;
600
601 return 0;
602 }
603
top_noc_qacceptn_check(struct ivpu_device * vdev,u32 exp_val)604 static int top_noc_qacceptn_check(struct ivpu_device *vdev, u32 exp_val)
605 {
606 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
607 return top_noc_qacceptn_check_37xx(vdev, exp_val);
608 else
609 return top_noc_qacceptn_check_40xx(vdev, exp_val);
610 }
611
top_noc_qdeny_check_37xx(struct ivpu_device * vdev,u32 exp_val)612 static int top_noc_qdeny_check_37xx(struct ivpu_device *vdev, u32 exp_val)
613 {
614 u32 val = REGV_RD32(VPU_37XX_TOP_NOC_QDENY);
615
616 if (!REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
617 !REG_TEST_FLD_NUM(VPU_37XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
618 return -EIO;
619
620 return 0;
621 }
622
top_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)623 static int top_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
624 {
625 u32 val = REGV_RD32(VPU_40XX_TOP_NOC_QDENY);
626
627 if (!REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, CPU_CTRL, exp_val, val) ||
628 !REG_TEST_FLD_NUM(VPU_40XX_TOP_NOC_QDENY, HOSTIF_L2CACHE, exp_val, val))
629 return -EIO;
630
631 return 0;
632 }
633
top_noc_qdeny_check(struct ivpu_device * vdev,u32 exp_val)634 static int top_noc_qdeny_check(struct ivpu_device *vdev, u32 exp_val)
635 {
636 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
637 return top_noc_qdeny_check_37xx(vdev, exp_val);
638 else
639 return top_noc_qdeny_check_40xx(vdev, exp_val);
640 }
641
top_noc_drive(struct ivpu_device * vdev,bool enable)642 static int top_noc_drive(struct ivpu_device *vdev, bool enable)
643 {
644 int ret;
645
646 top_noc_qreqn_drive(vdev, enable);
647
648 ret = top_noc_qacceptn_check(vdev, enable ? 0x1 : 0x0);
649 if (ret) {
650 ivpu_err(vdev, "Failed TOP NOC QACCEPTN check: %d\n", ret);
651 return ret;
652 }
653
654 ret = top_noc_qdeny_check(vdev, 0x0);
655 if (ret)
656 ivpu_err(vdev, "Failed TOP NOC QDENY check: %d\n", ret);
657
658 return ret;
659 }
660
ivpu_hw_ip_top_noc_enable(struct ivpu_device * vdev)661 int ivpu_hw_ip_top_noc_enable(struct ivpu_device *vdev)
662 {
663 return top_noc_drive(vdev, true);
664 }
665
dpu_active_drive_37xx(struct ivpu_device * vdev,bool enable)666 static void dpu_active_drive_37xx(struct ivpu_device *vdev, bool enable)
667 {
668 u32 val = REGV_RD32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE);
669
670 if (enable)
671 val = REG_SET_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
672 else
673 val = REG_CLR_FLD(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, DPU_ACTIVE, val);
674
675 REGV_WR32(VPU_37XX_HOST_SS_AON_DPU_ACTIVE, val);
676 }
677
pwr_island_delay_set(struct ivpu_device * vdev)678 static void pwr_island_delay_set(struct ivpu_device *vdev)
679 {
680 bool high = vdev->hw->pll.profiling_freq == PLL_PROFILING_FREQ_HIGH;
681 u32 post, post1, post2, status;
682
683 if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
684 return;
685
686 switch (ivpu_device_id(vdev)) {
687 case PCI_DEVICE_ID_WCL:
688 case PCI_DEVICE_ID_PTL_P:
689 post = high ? 18 : 0;
690 post1 = 0;
691 post2 = 0;
692 status = high ? 46 : 3;
693 break;
694
695 case PCI_DEVICE_ID_NVL:
696 post = high ? 198 : 17;
697 post1 = 0;
698 post2 = high ? 198 : 17;
699 status = 0;
700 break;
701
702 default:
703 dump_stack();
704 ivpu_err(vdev, "Unknown device ID\n");
705 return;
706 }
707
708 pwr_island_delay_set_50xx(vdev, post, post1, post2, status);
709 }
710
ivpu_hw_ip_pwr_domain_enable(struct ivpu_device * vdev)711 int ivpu_hw_ip_pwr_domain_enable(struct ivpu_device *vdev)
712 {
713 int ret;
714
715 pwr_island_delay_set(vdev);
716 pwr_island_enable(vdev);
717
718 ret = wait_for_pwr_island_status(vdev, 0x1);
719 if (ret) {
720 ivpu_err(vdev, "Timed out waiting for power island status\n");
721 return ret;
722 }
723
724 ret = top_noc_qreqn_check(vdev, 0x0);
725 if (ret) {
726 ivpu_err(vdev, "Failed TOP NOC QREQN check %d\n", ret);
727 return ret;
728 }
729
730 host_ss_clk_enable(vdev);
731 pwr_island_isolation_disable(vdev);
732 host_ss_rst_enable(vdev);
733
734 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
735 dpu_active_drive_37xx(vdev, true);
736
737 return ret;
738 }
739
ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device * vdev)740 u64 ivpu_hw_ip_read_perf_timer_counter(struct ivpu_device *vdev)
741 {
742 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
743 return REGV_RD64(VPU_37XX_CPU_SS_TIM_PERF_FREE_CNT);
744 else
745 return REGV_RD64(VPU_40XX_CPU_SS_TIM_PERF_EXT_FREE_CNT);
746 }
747
ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device * vdev)748 static void ivpu_hw_ip_snoop_disable_37xx(struct ivpu_device *vdev)
749 {
750 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES);
751
752 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, NOSNOOP_OVERRIDE_EN, val);
753 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AW_NOSNOOP_OVERRIDE, val);
754
755 if (ivpu_is_force_snoop_enabled(vdev))
756 val = REG_CLR_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
757 else
758 val = REG_SET_FLD(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, AR_NOSNOOP_OVERRIDE, val);
759
760 REGV_WR32(VPU_37XX_HOST_IF_TCU_PTW_OVERRIDES, val);
761 }
762
ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device * vdev)763 static void ivpu_hw_ip_snoop_disable_40xx(struct ivpu_device *vdev)
764 {
765 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES);
766
767 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, SNOOP_OVERRIDE_EN, val);
768 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AW_SNOOP_OVERRIDE, val);
769
770 if (ivpu_is_force_snoop_enabled(vdev))
771 val = REG_SET_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
772 else
773 val = REG_CLR_FLD(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, AR_SNOOP_OVERRIDE, val);
774
775 REGV_WR32(VPU_40XX_HOST_IF_TCU_PTW_OVERRIDES, val);
776 }
777
ivpu_hw_ip_snoop_disable(struct ivpu_device * vdev)778 void ivpu_hw_ip_snoop_disable(struct ivpu_device *vdev)
779 {
780 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
781 return ivpu_hw_ip_snoop_disable_37xx(vdev);
782 else
783 return ivpu_hw_ip_snoop_disable_40xx(vdev);
784 }
785
ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device * vdev)786 static void ivpu_hw_ip_tbu_mmu_enable_37xx(struct ivpu_device *vdev)
787 {
788 u32 val = REGV_RD32(VPU_37XX_HOST_IF_TBU_MMUSSIDV);
789
790 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
791 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
792 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
793 val = REG_SET_FLD(VPU_37XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
794
795 REGV_WR32(VPU_37XX_HOST_IF_TBU_MMUSSIDV, val);
796 }
797
ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device * vdev)798 static void ivpu_hw_ip_tbu_mmu_enable_40xx(struct ivpu_device *vdev)
799 {
800 u32 val = REGV_RD32(VPU_40XX_HOST_IF_TBU_MMUSSIDV);
801
802 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_AWMMUSSIDV, val);
803 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU0_ARMMUSSIDV, val);
804 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_AWMMUSSIDV, val);
805 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU1_ARMMUSSIDV, val);
806 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_AWMMUSSIDV, val);
807 val = REG_SET_FLD(VPU_40XX_HOST_IF_TBU_MMUSSIDV, TBU2_ARMMUSSIDV, val);
808
809 REGV_WR32(VPU_40XX_HOST_IF_TBU_MMUSSIDV, val);
810 }
811
ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device * vdev)812 void ivpu_hw_ip_tbu_mmu_enable(struct ivpu_device *vdev)
813 {
814 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
815 return ivpu_hw_ip_tbu_mmu_enable_37xx(vdev);
816 else
817 return ivpu_hw_ip_tbu_mmu_enable_40xx(vdev);
818 }
819
get_entry_point_addr(struct ivpu_device * vdev)820 static inline u64 get_entry_point_addr(struct ivpu_device *vdev)
821 {
822 if (ivpu_fw_is_warm_boot(vdev))
823 return vdev->fw->warm_boot_entry_point;
824 else
825 return vdev->fw->cold_boot_entry_point;
826 }
827
soc_cpu_boot_37xx(struct ivpu_device * vdev)828 static int soc_cpu_boot_37xx(struct ivpu_device *vdev)
829 {
830 u32 val;
831
832 val = REGV_RD32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC);
833 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTRUN0, val);
834
835 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RSTVEC, val);
836 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
837
838 val = REG_SET_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
839 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
840
841 val = REG_CLR_FLD(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, IRQI_RESUME0, val);
842 REGV_WR32(VPU_37XX_CPU_SS_MSSCPU_CPR_LEON_RT_VEC, val);
843
844 val = get_entry_point_addr(vdev) >> 9;
845 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
846
847 val = REG_SET_FLD(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, DONE, val);
848 REGV_WR32(VPU_37XX_HOST_SS_LOADING_ADDRESS_LO, val);
849
850 return 0;
851 }
852
cpu_noc_qacceptn_check_40xx(struct ivpu_device * vdev,u32 exp_val)853 static int cpu_noc_qacceptn_check_40xx(struct ivpu_device *vdev, u32 exp_val)
854 {
855 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN);
856
857 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QACCEPTN, TOP_MMIO, exp_val, val))
858 return -EIO;
859
860 return 0;
861 }
862
cpu_noc_qdeny_check_40xx(struct ivpu_device * vdev,u32 exp_val)863 static int cpu_noc_qdeny_check_40xx(struct ivpu_device *vdev, u32 exp_val)
864 {
865 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QDENY);
866
867 if (!REG_TEST_FLD_NUM(VPU_40XX_CPU_SS_CPR_NOC_QDENY, TOP_MMIO, exp_val, val))
868 return -EIO;
869
870 return 0;
871 }
872
cpu_noc_top_mmio_drive_40xx(struct ivpu_device * vdev,bool enable)873 static void cpu_noc_top_mmio_drive_40xx(struct ivpu_device *vdev, bool enable)
874 {
875 u32 val = REGV_RD32(VPU_40XX_CPU_SS_CPR_NOC_QREQN);
876
877 if (enable)
878 val = REG_SET_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
879 else
880 val = REG_CLR_FLD(VPU_40XX_CPU_SS_CPR_NOC_QREQN, TOP_MMIO, val);
881 REGV_WR32(VPU_40XX_CPU_SS_CPR_NOC_QREQN, val);
882 }
883
soc_cpu_drive_40xx(struct ivpu_device * vdev,bool enable)884 static int soc_cpu_drive_40xx(struct ivpu_device *vdev, bool enable)
885 {
886 int ret;
887
888 cpu_noc_top_mmio_drive_40xx(vdev, enable);
889
890 ret = cpu_noc_qacceptn_check_40xx(vdev, enable ? 0x1 : 0x0);
891 if (ret) {
892 ivpu_err(vdev, "Failed qacceptn check: %d\n", ret);
893 return ret;
894 }
895
896 ret = cpu_noc_qdeny_check_40xx(vdev, 0x0);
897 if (ret)
898 ivpu_err(vdev, "Failed qdeny check: %d\n", ret);
899
900 return ret;
901 }
902
soc_cpu_set_entry_point_40xx(struct ivpu_device * vdev,u64 entry_point)903 static void soc_cpu_set_entry_point_40xx(struct ivpu_device *vdev, u64 entry_point)
904 {
905 u64 val64;
906 u32 val;
907
908 val64 = entry_point;
909 val64 <<= ffs(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO_IMAGE_LOCATION_MASK) - 1;
910 REGV_WR64(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val64);
911
912 val = REGV_RD32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO);
913 val = REG_SET_FLD(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, DONE, val);
914 REGV_WR32(VPU_40XX_HOST_SS_VERIFICATION_ADDRESS_LO, val);
915 }
916
soc_cpu_boot_40xx(struct ivpu_device * vdev)917 static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
918 {
919 int ret;
920
921 ret = soc_cpu_drive_40xx(vdev, true);
922 if (ret) {
923 ivpu_err(vdev, "Failed to enable SOC CPU: %d\n", ret);
924 return ret;
925 }
926
927 soc_cpu_set_entry_point_40xx(vdev, get_entry_point_addr(vdev));
928
929 return 0;
930 }
931
soc_cpu_boot_60xx(struct ivpu_device * vdev)932 static int soc_cpu_boot_60xx(struct ivpu_device *vdev)
933 {
934 soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point);
935
936 return 0;
937 }
938
ivpu_hw_ip_soc_cpu_boot(struct ivpu_device * vdev)939 int ivpu_hw_ip_soc_cpu_boot(struct ivpu_device *vdev)
940 {
941 int ret;
942
943 switch (ivpu_hw_ip_gen(vdev)) {
944 case IVPU_HW_IP_37XX:
945 ret = soc_cpu_boot_37xx(vdev);
946 break;
947
948 case IVPU_HW_IP_40XX:
949 case IVPU_HW_IP_50XX:
950 ret = soc_cpu_boot_40xx(vdev);
951 break;
952
953 default:
954 ret = soc_cpu_boot_60xx(vdev);
955 }
956
957 if (ret)
958 return ret;
959
960 ivpu_dbg(vdev, PM, "Booting firmware, mode: %s\n",
961 ivpu_fw_is_warm_boot(vdev) ? "warm boot" : "cold boot");
962
963 return 0;
964 }
965
wdt_disable_37xx(struct ivpu_device * vdev)966 static void wdt_disable_37xx(struct ivpu_device *vdev)
967 {
968 u32 val;
969
970 /* Enable writing and set non-zero WDT value */
971 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
972 REGV_WR32(VPU_37XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
973
974 /* Enable writing and disable watchdog timer */
975 REGV_WR32(VPU_37XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
976 REGV_WR32(VPU_37XX_CPU_SS_TIM_WDOG_EN, 0);
977
978 /* Now clear the timeout interrupt */
979 val = REGV_RD32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG);
980 val = REG_CLR_FLD(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
981 REGV_WR32(VPU_37XX_CPU_SS_TIM_GEN_CONFIG, val);
982 }
983
wdt_disable_40xx(struct ivpu_device * vdev)984 static void wdt_disable_40xx(struct ivpu_device *vdev)
985 {
986 u32 val;
987
988 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
989 REGV_WR32(VPU_40XX_CPU_SS_TIM_WATCHDOG, TIM_WATCHDOG_RESET_VALUE);
990
991 REGV_WR32(VPU_40XX_CPU_SS_TIM_SAFE, TIM_SAFE_ENABLE);
992 REGV_WR32(VPU_40XX_CPU_SS_TIM_WDOG_EN, 0);
993
994 val = REGV_RD32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG);
995 val = REG_CLR_FLD(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, WDOG_TO_INT_CLR, val);
996 REGV_WR32(VPU_40XX_CPU_SS_TIM_GEN_CONFIG, val);
997 }
998
ivpu_hw_ip_wdt_disable(struct ivpu_device * vdev)999 void ivpu_hw_ip_wdt_disable(struct ivpu_device *vdev)
1000 {
1001 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1002 return wdt_disable_37xx(vdev);
1003 else
1004 return wdt_disable_40xx(vdev);
1005 }
1006
ipc_rx_count_get_37xx(struct ivpu_device * vdev)1007 static u32 ipc_rx_count_get_37xx(struct ivpu_device *vdev)
1008 {
1009 u32 count = readl(vdev->regv + VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT);
1010
1011 return REG_GET_FLD(VPU_37XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
1012 }
1013
ipc_rx_count_get_40xx(struct ivpu_device * vdev)1014 static u32 ipc_rx_count_get_40xx(struct ivpu_device *vdev)
1015 {
1016 u32 count = readl(vdev->regv + VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT);
1017
1018 return REG_GET_FLD(VPU_40XX_HOST_SS_TIM_IPC_FIFO_STAT, FILL_LEVEL, count);
1019 }
1020
ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device * vdev)1021 u32 ivpu_hw_ip_ipc_rx_count_get(struct ivpu_device *vdev)
1022 {
1023 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1024 return ipc_rx_count_get_37xx(vdev);
1025 else
1026 return ipc_rx_count_get_40xx(vdev);
1027 }
1028
ivpu_hw_ip_irq_enable(struct ivpu_device * vdev)1029 void ivpu_hw_ip_irq_enable(struct ivpu_device *vdev)
1030 {
1031 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1032 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_37XX);
1033 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_37XX);
1034 } else {
1035 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, ITF_FIREWALL_VIOLATION_MASK_40XX);
1036 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, ICB_0_1_IRQ_MASK_40XX);
1037 }
1038 }
1039
ivpu_hw_ip_irq_disable(struct ivpu_device * vdev)1040 void ivpu_hw_ip_irq_disable(struct ivpu_device *vdev)
1041 {
1042 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
1043 REGV_WR64(VPU_37XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1044 REGV_WR32(VPU_37XX_HOST_SS_FW_SOC_IRQ_EN, 0x0);
1045 } else {
1046 REGV_WR64(VPU_40XX_HOST_SS_ICB_ENABLE_0, 0x0ull);
1047 REGV_WR32(VPU_40XX_HOST_SS_FW_SOC_IRQ_EN, 0x0ul);
1048 }
1049 }
1050
diagnose_failure_37xx(struct ivpu_device * vdev)1051 static void diagnose_failure_37xx(struct ivpu_device *vdev)
1052 {
1053 u32 reg = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1054
1055 if (ipc_rx_count_get_37xx(vdev))
1056 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1057
1058 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1059 ivpu_err(vdev, "WDT MSS timeout detected\n");
1060
1061 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1062 ivpu_err(vdev, "WDT NCE timeout detected\n");
1063
1064 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1065 ivpu_err(vdev, "NOC Firewall irq detected\n");
1066 }
1067
diagnose_failure_40xx(struct ivpu_device * vdev)1068 static void diagnose_failure_40xx(struct ivpu_device *vdev)
1069 {
1070 u32 reg = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1071
1072 if (ipc_rx_count_get_40xx(vdev))
1073 ivpu_err(vdev, "IPC FIFO queue not empty, missed IPC IRQ");
1074
1075 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, reg))
1076 ivpu_err(vdev, "WDT MSS timeout detected\n");
1077
1078 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, reg))
1079 ivpu_err(vdev, "WDT NCE timeout detected\n");
1080
1081 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, reg))
1082 ivpu_err(vdev, "NOC Firewall irq detected\n");
1083 }
1084
ivpu_hw_ip_diagnose_failure(struct ivpu_device * vdev)1085 void ivpu_hw_ip_diagnose_failure(struct ivpu_device *vdev)
1086 {
1087 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1088 diagnose_failure_37xx(vdev);
1089 else
1090 diagnose_failure_40xx(vdev);
1091 }
1092
ivpu_hw_ip_irq_clear(struct ivpu_device * vdev)1093 void ivpu_hw_ip_irq_clear(struct ivpu_device *vdev)
1094 {
1095 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1096 REGV_WR64(VPU_37XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_37XX);
1097 else
1098 REGV_WR64(VPU_40XX_HOST_SS_ICB_CLEAR_0, ICB_0_1_IRQ_MASK_40XX);
1099 }
1100
irq_wdt_nce_handler(struct ivpu_device * vdev)1101 static void irq_wdt_nce_handler(struct ivpu_device *vdev)
1102 {
1103 ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
1104 }
1105
irq_wdt_mss_handler(struct ivpu_device * vdev)1106 static void irq_wdt_mss_handler(struct ivpu_device *vdev)
1107 {
1108 ivpu_hw_ip_wdt_disable(vdev);
1109 ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
1110 }
1111
irq_noc_firewall_handler(struct ivpu_device * vdev)1112 static void irq_noc_firewall_handler(struct ivpu_device *vdev)
1113 {
1114 atomic_inc(&vdev->hw->firewall_irq_counter);
1115
1116 ivpu_dbg(vdev, IRQ, "NOC Firewall interrupt detected, counter %d\n",
1117 atomic_read(&vdev->hw->firewall_irq_counter));
1118 }
1119
1120 /* Handler for IRQs from NPU core */
ivpu_hw_ip_irq_handler_37xx(struct ivpu_device * vdev,int irq)1121 bool ivpu_hw_ip_irq_handler_37xx(struct ivpu_device *vdev, int irq)
1122 {
1123 u32 status = REGV_RD32(VPU_37XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_37XX;
1124
1125 if (!status)
1126 return false;
1127
1128 REGV_WR32(VPU_37XX_HOST_SS_ICB_CLEAR_0, status);
1129
1130 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1131 ivpu_mmu_irq_evtq_handler(vdev);
1132
1133 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1134 ivpu_ipc_irq_handler(vdev);
1135
1136 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1137 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1138
1139 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1140 ivpu_mmu_irq_gerr_handler(vdev);
1141
1142 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1143 irq_wdt_mss_handler(vdev);
1144
1145 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1146 irq_wdt_nce_handler(vdev);
1147
1148 if (REG_TEST_FLD(VPU_37XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1149 irq_noc_firewall_handler(vdev);
1150
1151 return true;
1152 }
1153
1154 /* Handler for IRQs from NPU core */
ivpu_hw_ip_irq_handler_40xx(struct ivpu_device * vdev,int irq)1155 bool ivpu_hw_ip_irq_handler_40xx(struct ivpu_device *vdev, int irq)
1156 {
1157 u32 status = REGV_RD32(VPU_40XX_HOST_SS_ICB_STATUS_0) & ICB_0_IRQ_MASK_40XX;
1158
1159 if (!status)
1160 return false;
1161
1162 REGV_WR32(VPU_40XX_HOST_SS_ICB_CLEAR_0, status);
1163
1164 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_0_INT, status))
1165 ivpu_mmu_irq_evtq_handler(vdev);
1166
1167 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, HOST_IPC_FIFO_INT, status))
1168 ivpu_ipc_irq_handler(vdev);
1169
1170 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_1_INT, status))
1171 ivpu_dbg(vdev, IRQ, "MMU sync complete\n");
1172
1173 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, MMU_IRQ_2_INT, status))
1174 ivpu_mmu_irq_gerr_handler(vdev);
1175
1176 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_0_INT, status))
1177 irq_wdt_mss_handler(vdev);
1178
1179 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, CPU_INT_REDIRECT_1_INT, status))
1180 irq_wdt_nce_handler(vdev);
1181
1182 if (REG_TEST_FLD(VPU_40XX_HOST_SS_ICB_STATUS_0, NOC_FIREWALL_INT, status))
1183 irq_noc_firewall_handler(vdev);
1184
1185 return true;
1186 }
1187
db_set_37xx(struct ivpu_device * vdev,u32 db_id)1188 static void db_set_37xx(struct ivpu_device *vdev, u32 db_id)
1189 {
1190 u32 reg_stride = VPU_37XX_CPU_SS_DOORBELL_1 - VPU_37XX_CPU_SS_DOORBELL_0;
1191 u32 val = REG_FLD(VPU_37XX_CPU_SS_DOORBELL_0, SET);
1192
1193 REGV_WR32I(VPU_37XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1194 }
1195
db_set_40xx(struct ivpu_device * vdev,u32 db_id)1196 static void db_set_40xx(struct ivpu_device *vdev, u32 db_id)
1197 {
1198 u32 reg_stride = VPU_40XX_CPU_SS_DOORBELL_1 - VPU_40XX_CPU_SS_DOORBELL_0;
1199 u32 val = REG_FLD(VPU_40XX_CPU_SS_DOORBELL_0, SET);
1200
1201 REGV_WR32I(VPU_40XX_CPU_SS_DOORBELL_0, reg_stride, db_id, val);
1202 }
1203
ivpu_hw_ip_db_set(struct ivpu_device * vdev,u32 db_id)1204 void ivpu_hw_ip_db_set(struct ivpu_device *vdev, u32 db_id)
1205 {
1206 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1207 db_set_37xx(vdev, db_id);
1208 else
1209 db_set_40xx(vdev, db_id);
1210 }
1211
ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device * vdev)1212 u32 ivpu_hw_ip_ipc_rx_addr_get(struct ivpu_device *vdev)
1213 {
1214 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1215 return REGV_RD32(VPU_37XX_HOST_SS_TIM_IPC_FIFO_ATM);
1216 else
1217 return REGV_RD32(VPU_40XX_HOST_SS_TIM_IPC_FIFO_ATM);
1218 }
1219
ivpu_hw_ip_ipc_tx_set(struct ivpu_device * vdev,u32 vpu_addr)1220 void ivpu_hw_ip_ipc_tx_set(struct ivpu_device *vdev, u32 vpu_addr)
1221 {
1222 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
1223 REGV_WR32(VPU_37XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1224 else
1225 REGV_WR32(VPU_40XX_CPU_SS_TIM_IPC_FIFO, vpu_addr);
1226 }
1227