1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 - 2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
10
11 #include <linux/dmi.h>
12 #include <linux/fault-inject.h>
13 #include <linux/pm_runtime.h>
14
15 #ifdef CONFIG_FAULT_INJECTION
16 DECLARE_FAULT_ATTR(ivpu_hw_failure);
17
18 static char *ivpu_fail_hw;
19 module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
20 MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
21 #endif
22
platform_to_str(u32 platform)23 static char *platform_to_str(u32 platform)
24 {
25 switch (platform) {
26 case IVPU_PLATFORM_SILICON:
27 return "SILICON";
28 case IVPU_PLATFORM_SIMICS:
29 return "SIMICS";
30 case IVPU_PLATFORM_FPGA:
31 return "FPGA";
32 case IVPU_PLATFORM_HSLE:
33 return "HSLE";
34 default:
35 return "Invalid platform";
36 }
37 }
38
platform_init(struct ivpu_device * vdev)39 static void platform_init(struct ivpu_device *vdev)
40 {
41 int platform = ivpu_hw_btrs_platform_read(vdev);
42
43 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
44
45 switch (platform) {
46 case IVPU_PLATFORM_SILICON:
47 case IVPU_PLATFORM_SIMICS:
48 case IVPU_PLATFORM_FPGA:
49 case IVPU_PLATFORM_HSLE:
50 vdev->platform = platform;
51 break;
52
53 default:
54 ivpu_err(vdev, "Invalid platform type: %d\n", platform);
55 break;
56 }
57 }
58
wa_init(struct ivpu_device * vdev)59 static void wa_init(struct ivpu_device *vdev)
60 {
61 vdev->wa.punit_disabled = false;
62 vdev->wa.clear_runtime_mem = false;
63
64 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
65 vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
66
67 if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
68 ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0)
69 vdev->wa.disable_clock_relinquish = true;
70
71 if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
72 vdev->wa.disable_clock_relinquish = false;
73
74 if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
75 vdev->wa.disable_clock_relinquish = true;
76
77 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
78 vdev->wa.wp0_during_power_up = true;
79
80 if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
81 vdev->wa.disable_d0i2 = true;
82
83 IVPU_PRINT_WA(punit_disabled);
84 IVPU_PRINT_WA(clear_runtime_mem);
85 IVPU_PRINT_WA(interrupt_clear_with_0);
86 IVPU_PRINT_WA(disable_clock_relinquish);
87 IVPU_PRINT_WA(wp0_during_power_up);
88 IVPU_PRINT_WA(disable_d0i2);
89 }
90
timeouts_init(struct ivpu_device * vdev)91 static void timeouts_init(struct ivpu_device *vdev)
92 {
93 if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
94 vdev->timeout.boot = -1;
95 vdev->timeout.jsm = -1;
96 vdev->timeout.tdr = -1;
97 vdev->timeout.autosuspend = -1;
98 vdev->timeout.d0i3_entry_msg = -1;
99 } else if (ivpu_is_fpga(vdev)) {
100 vdev->timeout.boot = 50;
101 vdev->timeout.jsm = 15000;
102 vdev->timeout.tdr = 30000;
103 vdev->timeout.autosuspend = -1;
104 vdev->timeout.d0i3_entry_msg = 500;
105 vdev->timeout.state_dump_msg = 10000;
106 } else if (ivpu_is_simics(vdev)) {
107 vdev->timeout.boot = 50;
108 vdev->timeout.jsm = 500;
109 vdev->timeout.tdr = 10000;
110 vdev->timeout.autosuspend = 100;
111 vdev->timeout.d0i3_entry_msg = 100;
112 vdev->timeout.state_dump_msg = 10;
113 } else {
114 vdev->timeout.boot = 1000;
115 vdev->timeout.jsm = 500;
116 vdev->timeout.tdr = 2000;
117 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
118 vdev->timeout.autosuspend = 10;
119 else
120 vdev->timeout.autosuspend = 100;
121 vdev->timeout.d0i3_entry_msg = 5;
122 vdev->timeout.state_dump_msg = 100;
123 }
124 }
125
priority_bands_init(struct ivpu_device * vdev)126 static void priority_bands_init(struct ivpu_device *vdev)
127 {
128 /* Idle */
129 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
130 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
131 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
132 /* Normal */
133 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
134 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
135 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
136 /* Focus */
137 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
138 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
139 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
140 /* Realtime */
141 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
142 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
143 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
144 }
145
memory_ranges_init(struct ivpu_device * vdev)146 static void memory_ranges_init(struct ivpu_device *vdev)
147 {
148 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
149 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
150 ivpu_hw_range_init(&vdev->hw->ranges.user, 0x88000000, 511 * SZ_1M);
151 ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
152 ivpu_hw_range_init(&vdev->hw->ranges.dma, 0x200000000, SZ_128G);
153 } else {
154 ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
155 ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x80000000, SZ_2G);
156 ivpu_hw_range_init(&vdev->hw->ranges.user, 0x100000000, SZ_256G);
157 vdev->hw->ranges.dma = vdev->hw->ranges.user;
158 }
159 }
160
wp_enable(struct ivpu_device * vdev)161 static int wp_enable(struct ivpu_device *vdev)
162 {
163 return ivpu_hw_btrs_wp_drive(vdev, true);
164 }
165
wp_disable(struct ivpu_device * vdev)166 static int wp_disable(struct ivpu_device *vdev)
167 {
168 return ivpu_hw_btrs_wp_drive(vdev, false);
169 }
170
ivpu_hw_power_up(struct ivpu_device * vdev)171 int ivpu_hw_power_up(struct ivpu_device *vdev)
172 {
173 int ret;
174
175 if (IVPU_WA(wp0_during_power_up)) {
176 /* WP requests may fail when powering down, so issue WP 0 here */
177 ret = wp_disable(vdev);
178 if (ret)
179 ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
180 }
181
182 ret = ivpu_hw_btrs_d0i3_disable(vdev);
183 if (ret)
184 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
185
186 ret = wp_enable(vdev);
187 if (ret) {
188 ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
189 return ret;
190 }
191
192 if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
193 if (IVPU_WA(disable_clock_relinquish))
194 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
195 ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
196 ivpu_hw_btrs_ats_print_lnl(vdev);
197 }
198
199 ret = ivpu_hw_ip_host_ss_configure(vdev);
200 if (ret) {
201 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
202 return ret;
203 }
204
205 ivpu_hw_ip_idle_gen_disable(vdev);
206
207 ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
208 if (ret) {
209 ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
210 return ret;
211 }
212
213 ret = ivpu_hw_ip_pwr_domain_enable(vdev);
214 if (ret) {
215 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
216 return ret;
217 }
218
219 ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
220 if (ret) {
221 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
222 return ret;
223 }
224
225 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
226 ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
227
228 ret = ivpu_hw_ip_top_noc_enable(vdev);
229 if (ret)
230 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
231
232 return ret;
233 }
234
save_d0i3_entry_timestamp(struct ivpu_device * vdev)235 static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
236 {
237 vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
238 vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
239 }
240
ivpu_hw_reset(struct ivpu_device * vdev)241 int ivpu_hw_reset(struct ivpu_device *vdev)
242 {
243 int ret = 0;
244
245 if (ivpu_hw_btrs_ip_reset(vdev)) {
246 ivpu_err(vdev, "Failed to reset NPU IP\n");
247 ret = -EIO;
248 }
249
250 if (wp_disable(vdev)) {
251 ivpu_err(vdev, "Failed to disable workpoint\n");
252 ret = -EIO;
253 }
254
255 return ret;
256 }
257
ivpu_hw_power_down(struct ivpu_device * vdev)258 int ivpu_hw_power_down(struct ivpu_device *vdev)
259 {
260 int ret = 0;
261
262 save_d0i3_entry_timestamp(vdev);
263
264 if (!ivpu_hw_is_idle(vdev))
265 ivpu_warn(vdev, "NPU not idle during power down\n");
266
267 if (ivpu_hw_reset(vdev)) {
268 ivpu_err(vdev, "Failed to reset NPU\n");
269 ret = -EIO;
270 }
271
272 if (ivpu_hw_btrs_d0i3_enable(vdev)) {
273 ivpu_err(vdev, "Failed to enter D0I3\n");
274 ret = -EIO;
275 }
276
277 return ret;
278 }
279
ivpu_hw_init(struct ivpu_device * vdev)280 int ivpu_hw_init(struct ivpu_device *vdev)
281 {
282 ivpu_hw_btrs_info_init(vdev);
283 ivpu_hw_btrs_freq_ratios_init(vdev);
284 priority_bands_init(vdev);
285 memory_ranges_init(vdev);
286 platform_init(vdev);
287 wa_init(vdev);
288 timeouts_init(vdev);
289 atomic_set(&vdev->hw->firewall_irq_counter, 0);
290
291 #ifdef CONFIG_FAULT_INJECTION
292 if (ivpu_fail_hw)
293 setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
294 #endif
295
296 return 0;
297 }
298
ivpu_hw_boot_fw(struct ivpu_device * vdev)299 int ivpu_hw_boot_fw(struct ivpu_device *vdev)
300 {
301 int ret;
302
303 ivpu_hw_ip_snoop_disable(vdev);
304 ivpu_hw_ip_tbu_mmu_enable(vdev);
305 ret = ivpu_hw_ip_soc_cpu_boot(vdev);
306 if (ret)
307 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
308
309 return ret;
310 }
311
ivpu_hw_profiling_freq_drive(struct ivpu_device * vdev,bool enable)312 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
313 {
314 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
315 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
316 return;
317 }
318
319 if (enable)
320 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
321 else
322 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
323 }
324
ivpu_irq_handlers_init(struct ivpu_device * vdev)325 void ivpu_irq_handlers_init(struct ivpu_device *vdev)
326 {
327 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
328 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
329 else
330 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
331
332 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
333 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
334 else
335 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
336 }
337
ivpu_hw_irq_enable(struct ivpu_device * vdev)338 void ivpu_hw_irq_enable(struct ivpu_device *vdev)
339 {
340 ivpu_hw_ip_irq_enable(vdev);
341 ivpu_hw_btrs_irq_enable(vdev);
342 }
343
ivpu_hw_irq_disable(struct ivpu_device * vdev)344 void ivpu_hw_irq_disable(struct ivpu_device *vdev)
345 {
346 ivpu_hw_btrs_irq_disable(vdev);
347 ivpu_hw_ip_irq_disable(vdev);
348 }
349
ivpu_hw_irq_handler(int irq,void * ptr)350 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
351 {
352 struct ivpu_device *vdev = ptr;
353 bool ip_handled, btrs_handled;
354
355 ivpu_hw_btrs_global_int_disable(vdev);
356
357 btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
358 if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
359 ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
360 else
361 ip_handled = false;
362
363 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
364 ivpu_hw_btrs_global_int_enable(vdev);
365
366 if (!ip_handled && !btrs_handled)
367 return IRQ_NONE;
368
369 pm_runtime_mark_last_busy(vdev->drm.dev);
370 return IRQ_HANDLED;
371 }
372