xref: /linux/drivers/accel/ivpu/ivpu_hw.c (revision f680df51ca5f4ab364c9bbfcdbd2737e32092454)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020 - 2024 Intel Corporation
4  */
5 
6 #include "ivpu_drv.h"
7 #include "ivpu_hw.h"
8 #include "ivpu_hw_btrs.h"
9 #include "ivpu_hw_ip.h"
10 
11 #include <linux/dmi.h>
12 
13 static char *platform_to_str(u32 platform)
14 {
15 	switch (platform) {
16 	case IVPU_PLATFORM_SILICON:
17 		return "SILICON";
18 	case IVPU_PLATFORM_SIMICS:
19 		return "SIMICS";
20 	case IVPU_PLATFORM_FPGA:
21 		return "FPGA";
22 	default:
23 		return "Invalid platform";
24 	}
25 }
26 
27 static const struct dmi_system_id dmi_platform_simulation[] = {
28 	{
29 		.ident = "Intel Simics",
30 		.matches = {
31 			DMI_MATCH(DMI_BOARD_NAME, "lnlrvp"),
32 			DMI_MATCH(DMI_BOARD_VERSION, "1.0"),
33 			DMI_MATCH(DMI_BOARD_SERIAL, "123456789"),
34 		},
35 	},
36 	{
37 		.ident = "Intel Simics",
38 		.matches = {
39 			DMI_MATCH(DMI_BOARD_NAME, "Simics"),
40 		},
41 	},
42 	{ }
43 };
44 
45 static void platform_init(struct ivpu_device *vdev)
46 {
47 	if (dmi_check_system(dmi_platform_simulation))
48 		vdev->platform = IVPU_PLATFORM_SIMICS;
49 	else
50 		vdev->platform = IVPU_PLATFORM_SILICON;
51 
52 	ivpu_dbg(vdev, MISC, "Platform type: %s (%d)\n",
53 		 platform_to_str(vdev->platform), vdev->platform);
54 }
55 
56 static void wa_init(struct ivpu_device *vdev)
57 {
58 	vdev->wa.punit_disabled = ivpu_is_fpga(vdev);
59 	vdev->wa.clear_runtime_mem = false;
60 
61 	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
62 		vdev->wa.interrupt_clear_with_0 = ivpu_hw_btrs_irqs_clear_with_0_mtl(vdev);
63 
64 	if (ivpu_device_id(vdev) == PCI_DEVICE_ID_LNL)
65 		vdev->wa.disable_clock_relinquish = true;
66 
67 	IVPU_PRINT_WA(punit_disabled);
68 	IVPU_PRINT_WA(clear_runtime_mem);
69 	IVPU_PRINT_WA(interrupt_clear_with_0);
70 	IVPU_PRINT_WA(disable_clock_relinquish);
71 }
72 
73 static void timeouts_init(struct ivpu_device *vdev)
74 {
75 	if (ivpu_is_fpga(vdev)) {
76 		vdev->timeout.boot = 100000;
77 		vdev->timeout.jsm = 50000;
78 		vdev->timeout.tdr = 2000000;
79 		vdev->timeout.reschedule_suspend = 1000;
80 		vdev->timeout.autosuspend = -1;
81 		vdev->timeout.d0i3_entry_msg = 500;
82 	} else if (ivpu_is_simics(vdev)) {
83 		vdev->timeout.boot = 50;
84 		vdev->timeout.jsm = 500;
85 		vdev->timeout.tdr = 10000;
86 		vdev->timeout.reschedule_suspend = 10;
87 		vdev->timeout.autosuspend = -1;
88 		vdev->timeout.d0i3_entry_msg = 100;
89 	} else {
90 		vdev->timeout.boot = 1000;
91 		vdev->timeout.jsm = 500;
92 		vdev->timeout.tdr = 2000;
93 		vdev->timeout.reschedule_suspend = 10;
94 		vdev->timeout.autosuspend = 10;
95 		vdev->timeout.d0i3_entry_msg = 5;
96 	}
97 }
98 
99 static void memory_ranges_init(struct ivpu_device *vdev)
100 {
101 	if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
102 		ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
103 		ivpu_hw_range_init(&vdev->hw->ranges.user,   0xc0000000, 255 * SZ_1M);
104 		ivpu_hw_range_init(&vdev->hw->ranges.shave, 0x180000000, SZ_2G);
105 		ivpu_hw_range_init(&vdev->hw->ranges.dma,   0x200000000, SZ_8G);
106 	} else {
107 		ivpu_hw_range_init(&vdev->hw->ranges.global, 0x80000000, SZ_512M);
108 		ivpu_hw_range_init(&vdev->hw->ranges.user,   0x80000000, SZ_256M);
109 		ivpu_hw_range_init(&vdev->hw->ranges.shave,  0x80000000 + SZ_256M, SZ_2G - SZ_256M);
110 		ivpu_hw_range_init(&vdev->hw->ranges.dma,   0x200000000, SZ_8G);
111 	}
112 }
113 
114 static int wp_enable(struct ivpu_device *vdev)
115 {
116 	return ivpu_hw_btrs_wp_drive(vdev, true);
117 }
118 
119 static int wp_disable(struct ivpu_device *vdev)
120 {
121 	return ivpu_hw_btrs_wp_drive(vdev, false);
122 }
123 
124 int ivpu_hw_power_up(struct ivpu_device *vdev)
125 {
126 	int ret;
127 
128 	ret = ivpu_hw_btrs_d0i3_disable(vdev);
129 	if (ret)
130 		ivpu_warn(vdev, "Failed to disable D0I3: %d\n", ret);
131 
132 	ret = wp_enable(vdev);
133 	if (ret) {
134 		ivpu_err(vdev, "Failed to enable workpoint: %d\n", ret);
135 		return ret;
136 	}
137 
138 	if (ivpu_hw_btrs_gen(vdev) >= IVPU_HW_BTRS_LNL) {
139 		if (IVPU_WA(disable_clock_relinquish))
140 			ivpu_hw_btrs_clock_relinquish_disable_lnl(vdev);
141 		ivpu_hw_btrs_profiling_freq_reg_set_lnl(vdev);
142 		ivpu_hw_btrs_ats_print_lnl(vdev);
143 	}
144 
145 	ret = ivpu_hw_ip_host_ss_configure(vdev);
146 	if (ret) {
147 		ivpu_err(vdev, "Failed to configure host SS: %d\n", ret);
148 		return ret;
149 	}
150 
151 	ivpu_hw_ip_idle_gen_disable(vdev);
152 
153 	ret = ivpu_hw_btrs_wait_for_clock_res_own_ack(vdev);
154 	if (ret) {
155 		ivpu_err(vdev, "Timed out waiting for clock resource own ACK\n");
156 		return ret;
157 	}
158 
159 	ret = ivpu_hw_ip_pwr_domain_enable(vdev);
160 	if (ret) {
161 		ivpu_err(vdev, "Failed to enable power domain: %d\n", ret);
162 		return ret;
163 	}
164 
165 	ret = ivpu_hw_ip_host_ss_axi_enable(vdev);
166 	if (ret) {
167 		ivpu_err(vdev, "Failed to enable AXI: %d\n", ret);
168 		return ret;
169 	}
170 
171 	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_LNL)
172 		ivpu_hw_btrs_set_port_arbitration_weights_lnl(vdev);
173 
174 	ret = ivpu_hw_ip_top_noc_enable(vdev);
175 	if (ret)
176 		ivpu_err(vdev, "Failed to enable TOP NOC: %d\n", ret);
177 
178 	return ret;
179 }
180 
181 static void save_d0i3_entry_timestamp(struct ivpu_device *vdev)
182 {
183 	vdev->hw->d0i3_entry_host_ts = ktime_get_boottime();
184 	vdev->hw->d0i3_entry_vpu_ts = ivpu_hw_ip_read_perf_timer_counter(vdev);
185 }
186 
187 int ivpu_hw_reset(struct ivpu_device *vdev)
188 {
189 	int ret = 0;
190 
191 	if (ivpu_hw_btrs_ip_reset(vdev)) {
192 		ivpu_err(vdev, "Failed to reset NPU IP\n");
193 		ret = -EIO;
194 	}
195 
196 	if (wp_disable(vdev)) {
197 		ivpu_err(vdev, "Failed to disable workpoint\n");
198 		ret = -EIO;
199 	}
200 
201 	return ret;
202 }
203 
204 int ivpu_hw_power_down(struct ivpu_device *vdev)
205 {
206 	int ret = 0;
207 
208 	save_d0i3_entry_timestamp(vdev);
209 
210 	if (!ivpu_hw_is_idle(vdev))
211 		ivpu_warn(vdev, "NPU not idle during power down\n");
212 
213 	if (ivpu_hw_reset(vdev)) {
214 		ivpu_err(vdev, "Failed to reset NPU\n");
215 		ret = -EIO;
216 	}
217 
218 	if (ivpu_hw_btrs_d0i3_enable(vdev)) {
219 		ivpu_err(vdev, "Failed to enter D0I3\n");
220 		ret = -EIO;
221 	}
222 
223 	return ret;
224 }
225 
226 int ivpu_hw_init(struct ivpu_device *vdev)
227 {
228 	ivpu_hw_btrs_info_init(vdev);
229 	ivpu_hw_btrs_freq_ratios_init(vdev);
230 	memory_ranges_init(vdev);
231 	platform_init(vdev);
232 	wa_init(vdev);
233 	timeouts_init(vdev);
234 
235 	return 0;
236 }
237 
238 int ivpu_hw_boot_fw(struct ivpu_device *vdev)
239 {
240 	int ret;
241 
242 	ivpu_hw_ip_snoop_disable(vdev);
243 	ivpu_hw_ip_tbu_mmu_enable(vdev);
244 	ret = ivpu_hw_ip_soc_cpu_boot(vdev);
245 	if (ret)
246 		ivpu_err(vdev, "Failed to boot SOC CPU: %d\n", ret);
247 
248 	return ret;
249 }
250 
251 void ivpu_hw_profiling_freq_drive(struct ivpu_device *vdev, bool enable)
252 {
253 	if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX) {
254 		vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
255 		return;
256 	}
257 
258 	if (enable)
259 		vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_HIGH;
260 	else
261 		vdev->hw->pll.profiling_freq = PLL_PROFILING_FREQ_DEFAULT;
262 }
263 
264 void ivpu_irq_handlers_init(struct ivpu_device *vdev)
265 {
266 	INIT_KFIFO(vdev->hw->irq.fifo);
267 
268 	if (ivpu_hw_ip_gen(vdev) == IVPU_HW_IP_37XX)
269 		vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_37xx;
270 	else
271 		vdev->hw->irq.ip_irq_handler = ivpu_hw_ip_irq_handler_40xx;
272 
273 	if (ivpu_hw_btrs_gen(vdev) == IVPU_HW_BTRS_MTL)
274 		vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_mtl;
275 	else
276 		vdev->hw->irq.btrs_irq_handler = ivpu_hw_btrs_irq_handler_lnl;
277 }
278 
279 void ivpu_hw_irq_enable(struct ivpu_device *vdev)
280 {
281 	kfifo_reset(&vdev->hw->irq.fifo);
282 	ivpu_hw_ip_irq_enable(vdev);
283 	ivpu_hw_btrs_irq_enable(vdev);
284 }
285 
286 void ivpu_hw_irq_disable(struct ivpu_device *vdev)
287 {
288 	ivpu_hw_btrs_irq_disable(vdev);
289 	ivpu_hw_ip_irq_disable(vdev);
290 }
291 
292 irqreturn_t ivpu_hw_irq_handler(int irq, void *ptr)
293 {
294 	struct ivpu_device *vdev = ptr;
295 	bool ip_handled, btrs_handled;
296 
297 	ivpu_hw_btrs_global_int_disable(vdev);
298 
299 	btrs_handled = ivpu_hw_btrs_irq_handler(vdev, irq);
300 	if (!ivpu_hw_is_idle((vdev)) || !btrs_handled)
301 		ip_handled = ivpu_hw_ip_irq_handler(vdev, irq);
302 	else
303 		ip_handled = false;
304 
305 	/* Re-enable global interrupts to re-trigger MSI for pending interrupts */
306 	ivpu_hw_btrs_global_int_enable(vdev);
307 
308 	if (!kfifo_is_empty(&vdev->hw->irq.fifo))
309 		return IRQ_WAKE_THREAD;
310 	if (ip_handled || btrs_handled)
311 		return IRQ_HANDLED;
312 	return IRQ_NONE;
313 }
314