1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 - 2024 Intel Corporation
4 */
5
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
10
11 #include <asm/msr-index.h>
12 #include <asm/msr.h>
13 #include <linux/dmi.h>
14 #include <linux/fault-inject.h>
15 #include <linux/pm_runtime.h>
16
17 #ifdef CONFIG_FAULT_INJECTION
18 DECLARE_FAULT_ATTR(ivpu_hw_failure);
19
20 static char *ivpu_fail_hw;
21 module_param_named_unsafe(fail_hw, ivpu_fail_hw, charp, 0444);
22 MODULE_PARM_DESC(fail_hw, "<interval>,<probability>,<space>,<times>");
23 #endif
24
25 #define FW_SHARED_MEM_ALIGNMENT SZ_512K /* VPU MTRR limitation */
26
27 #define ECC_MCA_SIGNAL_ENABLE_MASK 0xff
28
platform_to_str(u32 platform)29 static char *platform_to_str(u32 platform)
30 {
31 switch (platform) {
32 case IVPU_PLATFORM_SILICON:
33 return "SILICON";
34 case IVPU_PLATFORM_SIMICS:
35 return "SIMICS";
36 case IVPU_PLATFORM_FPGA:
37 return "FPGA";
38 case IVPU_PLATFORM_HSLE:
39 return "HSLE";
40 default:
41 return "Invalid platform";
42 }
43 }
44
platform_init(struct ivpu_device * vdev)45 static void platform_init(struct ivpu_device *vdev)
46 {
47 int platform = ivpu_hw_btrs_platform_read(vdev);
48
49 ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n", platform_to_str(platform), platform);
50
51 switch (platform) {
52 case IVPU_PLATFORM_SILICON:
53 case IVPU_PLATFORM_SIMICS:
54 case IVPU_PLATFORM_FPGA:
55 case IVPU_PLATFORM_HSLE:
56 vdev->platform = platform;
57 break;
58
59 default:
60 ivpu_err(vdev, "Invalid platform type: %d\n", platform);
61 break;
62 }
63 }
64
wa_init(struct ivpu_device * vdev)65 static void wa_init(struct ivpu_device *vdev)
66 {
67 vdev->wa.punit_disabled = false;
68 vdev->wa.clear_runtime_mem = false;
69
70 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
71 vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
72
73 if ((ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL &&
74 ivpu_revision(vdev) < IVPU_HW_IP_REV_LNL_B0) ||
75 (ivpu_device_id(vdev) == PCI_DEVICE_ID_NVL &&
76 ivpu_revision(vdev) == IVPU_HW_IP_REV_NVL_A0))
77 vdev->wa.disable_clock_relinquish = true;
78
79 if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_ENABLE)
80 vdev->wa.disable_clock_relinquish = false;
81
82 if (ivpu_test_mode & IVPU_TEST_MODE_CLK_RELINQ_DISABLE)
83 vdev->wa.disable_clock_relinquish = true;
84
85 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
86 vdev->wa.wp0_during_power_up = true;
87
88 if (ivpu_test_mode & IVPU_TEST_MODE_D0I2_DISABLE)
89 vdev->wa.disable_d0i2 = true;
90
91 IVPU_PRINT_WA(punit_disabled);
92 IVPU_PRINT_WA(clear_runtime_mem);
93 IVPU_PRINT_WA(interrupt_clear_with_0);
94 IVPU_PRINT_WA(disable_clock_relinquish);
95 IVPU_PRINT_WA(wp0_during_power_up);
96 IVPU_PRINT_WA(disable_d0i2);
97 }
98
timeouts_init(struct ivpu_device * vdev)99 static void timeouts_init(struct ivpu_device *vdev)
100 {
101 if (ivpu_test_mode & IVPU_TEST_MODE_DISABLE_TIMEOUTS) {
102 vdev->timeout.boot = -1;
103 vdev->timeout.jsm = -1;
104 vdev->timeout.tdr = -1;
105 vdev->timeout.inference = -1;
106 vdev->timeout.autosuspend = -1;
107 vdev->timeout.d0i3_entry_msg = -1;
108 } else if (ivpu_is_fpga(vdev)) {
109 vdev->timeout.boot = 50;
110 vdev->timeout.jsm = 15000;
111 vdev->timeout.tdr = 30000;
112 vdev->timeout.inference = 900000;
113 vdev->timeout.autosuspend = -1;
114 vdev->timeout.d0i3_entry_msg = 500;
115 vdev->timeout.state_dump_msg = 10000;
116 } else if (ivpu_is_simics(vdev)) {
117 vdev->timeout.boot = 50;
118 vdev->timeout.jsm = 500;
119 vdev->timeout.tdr = 10000;
120 vdev->timeout.inference = 300000;
121 vdev->timeout.autosuspend = 100;
122 vdev->timeout.d0i3_entry_msg = 100;
123 vdev->timeout.state_dump_msg = 10;
124 } else {
125 vdev->timeout.boot = 1000;
126 vdev->timeout.jsm = 500;
127 vdev->timeout.tdr = 2000;
128 vdev->timeout.inference = 60000;
129 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
130 vdev->timeout.autosuspend = 10;
131 else
132 vdev->timeout.autosuspend = 100;
133 vdev->timeout.d0i3_entry_msg = 5;
134 vdev->timeout.state_dump_msg = 100;
135 }
136 }
137
priority_bands_init(struct ivpu_device * vdev)138 static void priority_bands_init(struct ivpu_device *vdev)
139 {
140 /* Idle */
141 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 0;
142 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 50000;
143 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_IDLE] = 160000;
144 /* Normal */
145 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
146 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 50000;
147 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_NORMAL] = 300000;
148 /* Focus */
149 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
150 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 50000;
151 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_FOCUS] = 200000;
152 /* Realtime */
153 vdev->hw->hws.grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 0;
154 vdev->hw->hws.process_grace_period[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 50000;
155 vdev->hw->hws.process_quantum[VPU_JOB_SCHEDULING_PRIORITY_BAND_REALTIME] = 200000;
156 }
157
ivpu_hw_range_init(struct ivpu_device * vdev,struct ivpu_addr_range * range,u64 start,u64 size)158 int ivpu_hw_range_init(struct ivpu_device *vdev, struct ivpu_addr_range *range, u64 start, u64 size)
159 {
160 u64 end;
161
162 if (!range || check_add_overflow(start, size, &end)) {
163 ivpu_err(vdev, "Invalid range: start 0x%llx size %llu\n", start, size);
164 return -EINVAL;
165 }
166
167 range->start = start;
168 range->end = end;
169
170 return 0;
171 }
172
memory_ranges_init(struct ivpu_device * vdev)173 static void memory_ranges_init(struct ivpu_device *vdev)
174 {
175 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
176 ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x84800000, SZ_64M);
177 ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
178 ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0xa0000000, 511 * SZ_1M);
179 ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x180000000, SZ_2G);
180 ivpu_hw_range_init(vdev, &vdev->hw->ranges.dma, 0x200000000, SZ_128G);
181 } else {
182 ivpu_hw_range_init(vdev, &vdev->hw->ranges.runtime, 0x80000000, SZ_64M);
183 ivpu_hw_range_init(vdev, &vdev->hw->ranges.global, 0x90000000, SZ_256M);
184 ivpu_hw_range_init(vdev, &vdev->hw->ranges.shave, 0x80000000, SZ_2G);
185 ivpu_hw_range_init(vdev, &vdev->hw->ranges.user, 0x100000000, SZ_256G);
186 vdev->hw->ranges.dma = vdev->hw->ranges.user;
187 }
188
189 drm_WARN_ON(&vdev->drm, !IS_ALIGNED(vdev->hw->ranges.global.start,
190 FW_SHARED_MEM_ALIGNMENT));
191 }
192
wp_enable(struct ivpu_device * vdev)193 static int wp_enable(struct ivpu_device *vdev)
194 {
195 return ivpu_hw_btrs_wp_drive(vdev, true);
196 }
197
wp_disable(struct ivpu_device * vdev)198 static int wp_disable(struct ivpu_device *vdev)
199 {
200 return ivpu_hw_btrs_wp_drive(vdev, false);
201 }
202
ivpu_hw_power_up(struct ivpu_device * vdev)203 int ivpu_hw_power_up(struct ivpu_device *vdev)
204 {
205 int ret;
206
207 if (IVPU_WA(wp0_during_power_up)) {
208 /* WP requests may fail when powering down, so issue WP 0 here */
209 ret = wp_disable(vdev);
210 if (ret)
211 ivpu_warn(vdev, "Failed to disable workpoint: %d\n", ret);
212 }
213
214 ret = ivpu_hw_btrs_d0i3_disable(vdev);
215 if (ret)
216 ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
217
218 ret = wp_enable(vdev);
219 if (ret) {
220 ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
221 return ret;
222 }
223
224 if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
225 if (IVPU_WA(disable_clock_relinquish))
226 ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
227 ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
228 ivpu_hw_btrs_ats_print_lnl(vdev);
229 }
230
231 ret = ivpu_hw_ip_host_ss_configure(vdev);
232 if (ret) {
233 ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
234 return ret;
235 }
236
237 ivpu_hw_ip_idle_gen_disable(vdev);
238
239 ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
240 if (ret) {
241 ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
242 return ret;
243 }
244
245 ret = ivpu_hw_ip_pwr_domain_enable(vdev);
246 if (ret) {
247 ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
248 return ret;
249 }
250
251 ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
252 if (ret) {
253 ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
254 return ret;
255 }
256
257 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
258 ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
259
260 ret = ivpu_hw_ip_top_noc_enable(vdev);
261 if (ret)
262 ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
263
264 return ret;
265 }
266
save_d0i3_entry_timestamp(struct ivpu_device * vdev)267 static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
268 {
269 vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
270 vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
271 }
272
ivpu_hw_reset(struct ivpu_device * vdev)273 int ivpu_hw_reset(struct ivpu_device *vdev)
274 {
275 int ret = 0;
276
277 if (ivpu_hw_btrs_ip_reset(vdev)) {
278 ivpu_err(vdev, "Failed to reset NPU IP\n");
279 ret = -EIO;
280 }
281
282 if (wp_disable(vdev)) {
283 ivpu_err(vdev, "Failed to disable workpoint\n");
284 ret = -EIO;
285 }
286
287 return ret;
288 }
289
ivpu_hw_power_down(struct ivpu_device * vdev)290 int ivpu_hw_power_down(struct ivpu_device *vdev)
291 {
292 int ret = 0;
293
294 save_d0i3_entry_timestamp(vdev);
295
296 if (!ivpu_hw_is_idle(vdev))
297 ivpu_warn(vdev, "NPU not idle during power down\n");
298
299 if (ivpu_hw_reset(vdev)) {
300 ivpu_err(vdev, "Failed to reset NPU\n");
301 ret = -EIO;
302 }
303
304 if (ivpu_hw_btrs_d0i3_enable(vdev)) {
305 ivpu_err(vdev, "Failed to enter D0I3\n");
306 ret = -EIO;
307 }
308
309 return ret;
310 }
311
ivpu_hw_init(struct ivpu_device * vdev)312 int ivpu_hw_init(struct ivpu_device *vdev)
313 {
314 ivpu_hw_btrs_info_init(vdev);
315 ivpu_hw_btrs_freq_ratios_init(vdev);
316 priority_bands_init(vdev);
317 memory_ranges_init(vdev);
318 platform_init(vdev);
319 wa_init(vdev);
320 timeouts_init(vdev);
321 atomic_set(&vdev->hw->firewall_irq_counter, 0);
322
323 #ifdef CONFIG_FAULT_INJECTION
324 if (ivpu_fail_hw)
325 setup_fault_attr(&ivpu_hw_failure, ivpu_fail_hw);
326 #endif
327
328 return 0;
329 }
330
ivpu_hw_boot_fw(struct ivpu_device * vdev)331 int ivpu_hw_boot_fw(struct ivpu_device *vdev)
332 {
333 int ret;
334
335 ivpu_hw_ip_snoop_disable(vdev);
336 ivpu_hw_ip_tbu_mmu_enable(vdev);
337 ret = ivpu_hw_ip_soc_cpu_boot(vdev);
338 if (ret)
339 ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
340
341 return ret;
342 }
343
ivpu_hw_profiling_freq_drive(struct ivpu_device * vdev,bool enable)344 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
345 {
346 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
347 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
348 return;
349 }
350
351 if (enable)
352 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
353 else
354 vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
355 }
356
ivpu_irq_handlers_init(struct ivpu_device * vdev)357 void ivpu_irq_handlers_init(struct ivpu_device *vdev)
358 {
359 if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
360 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
361 else
362 vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
363
364 if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
365 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
366 else
367 vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
368 }
369
ivpu_hw_irq_enable(struct ivpu_device * vdev)370 void ivpu_hw_irq_enable(struct ivpu_device *vdev)
371 {
372 ivpu_hw_ip_irq_enable(vdev);
373 ivpu_hw_btrs_irq_enable(vdev);
374 }
375
ivpu_hw_irq_disable(struct ivpu_device * vdev)376 void ivpu_hw_irq_disable(struct ivpu_device *vdev)
377 {
378 ivpu_hw_btrs_irq_disable(vdev);
379 ivpu_hw_ip_irq_disable(vdev);
380 }
381
ivpu_hw_irq_handler(int irq,void * ptr)382 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
383 {
384 struct ivpu_device *vdev = ptr;
385 bool ip_handled, btrs_handled;
386
387 ivpu_hw_btrs_global_int_disable(vdev);
388
389 btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
390 if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
391 ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
392 else
393 ip_handled = false;
394
395 /* Re-enable global interrupts to re-trigger MSI for pending interrupts */
396 ivpu_hw_btrs_global_int_enable(vdev);
397
398 if (!ip_handled && !btrs_handled)
399 return IRQ_NONE;
400
401 pm_runtime_mark_last_busy(vdev->drm.dev);
402 return IRQ_HANDLED;
403 }
404
ivpu_hw_uses_ecc_mca_signal(struct ivpu_device * vdev)405 bool ivpu_hw_uses_ecc_mca_signal(struct ivpu_device *vdev)
406 {
407 unsigned long long msr_integrity_caps;
408 int ret;
409
410 if (ivpu_hw_ip_gen(vdev) < IVPU_HW_IP_50XX)
411 return false;
412
413 ret = rdmsrq_safe(MSR_INTEGRITY_CAPS, &msr_integrity_caps);
414 if (ret) {
415 ivpu_warn(vdev, "Error reading MSR_INTEGRITY_CAPS: %d", ret);
416 return false;
417 }
418
419 ivpu_dbg(vdev, MISC, "MSR_INTEGRITY_CAPS: 0x%llx\n", msr_integrity_caps);
420
421 return msr_integrity_caps & ECC_MCA_SIGNAL_ENABLE_MASK;
422 }
423