xref: /linux/drivers/accel/ivpu/ivpu_fw.c (revision 852be13f3bd32c1eab808840cfac41b1fea25991)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/highmem.h>
8 #include <linux/moduleparam.h>
9 #include <linux/pci.h>
10 
11 #include "vpu_boot_api.h"
12 #include "ivpu_drv.h"
13 #include "ivpu_fw.h"
14 #include "ivpu_gem.h"
15 #include "ivpu_hw.h"
16 #include "ivpu_ipc.h"
17 #include "ivpu_pm.h"
18 
19 #define FW_GLOBAL_MEM_START	(2ull * SZ_1G)
20 #define FW_GLOBAL_MEM_END	(3ull * SZ_1G)
21 #define FW_SHARED_MEM_SIZE	SZ_256M /* Must be aligned to FW_SHARED_MEM_ALIGNMENT */
22 #define FW_SHARED_MEM_ALIGNMENT	SZ_128K /* VPU MTRR limitation */
23 #define FW_RUNTIME_MAX_SIZE	SZ_512M
24 #define FW_SHAVE_NN_MAX_SIZE	SZ_2M
25 #define FW_RUNTIME_MIN_ADDR	(FW_GLOBAL_MEM_START)
26 #define FW_RUNTIME_MAX_ADDR	(FW_GLOBAL_MEM_END - FW_SHARED_MEM_SIZE)
27 #define FW_VERSION_HEADER_SIZE	SZ_4K
28 #define FW_FILE_IMAGE_OFFSET	(VPU_FW_HEADER_SIZE + FW_VERSION_HEADER_SIZE)
29 
30 #define WATCHDOG_MSS_REDIRECT	32
31 #define WATCHDOG_NCE_REDIRECT	33
32 
33 #define ADDR_TO_L2_CACHE_CFG(addr) ((addr) >> 31)
34 
35 #define IVPU_FW_CHECK_API(vdev, fw_hdr, name) ivpu_fw_check_api(vdev, fw_hdr, #name, \
36 								  VPU_##name##_API_VER_INDEX, \
37 								  VPU_##name##_API_VER_MAJOR, \
38 								  VPU_##name##_API_VER_MINOR)
39 
40 static char *ivpu_firmware;
41 module_param_named_unsafe(firmware, ivpu_firmware, charp, 0644);
42 MODULE_PARM_DESC(firmware, "VPU firmware binary in /lib/firmware/..");
43 
44 static int ivpu_fw_request(struct ivpu_device *vdev)
45 {
46 	static const char * const fw_names[] = {
47 		"mtl_vpu.bin",
48 		"intel/vpu/mtl_vpu_v0.0.bin"
49 	};
50 	int ret = -ENOENT;
51 	int i;
52 
53 	if (ivpu_firmware)
54 		return request_firmware(&vdev->fw->file, ivpu_firmware, vdev->drm.dev);
55 
56 	for (i = 0; i < ARRAY_SIZE(fw_names); i++) {
57 		ret = firmware_request_nowarn(&vdev->fw->file, fw_names[i], vdev->drm.dev);
58 		if (!ret)
59 			return 0;
60 	}
61 
62 	ivpu_err(vdev, "Failed to request firmware: %d\n", ret);
63 	return ret;
64 }
65 
66 static void
67 ivpu_fw_check_api(struct ivpu_device *vdev, const struct vpu_firmware_header *fw_hdr,
68 		  const char *str, int index, u16 expected_major, u16 expected_minor)
69 {
70 	u16 major = (u16)(fw_hdr->api_version[index] >> 16);
71 	u16 minor = (u16)(fw_hdr->api_version[index]);
72 
73 	if (major != expected_major) {
74 		ivpu_warn(vdev, "Incompatible FW %s API version: %d.%d (expected %d.%d)\n",
75 			  str, major, minor, expected_major, expected_minor);
76 	}
77 	ivpu_dbg(vdev, FW_BOOT, "FW %s API version: %d.%d (expected %d.%d)\n",
78 		 str, major, minor, expected_major, expected_minor);
79 }
80 
81 static int ivpu_fw_parse(struct ivpu_device *vdev)
82 {
83 	struct ivpu_fw_info *fw = vdev->fw;
84 	const struct vpu_firmware_header *fw_hdr = (const void *)fw->file->data;
85 	u64 runtime_addr, image_load_addr, runtime_size, image_size;
86 
87 	if (fw->file->size <= FW_FILE_IMAGE_OFFSET) {
88 		ivpu_err(vdev, "Firmware file is too small: %zu\n", fw->file->size);
89 		return -EINVAL;
90 	}
91 
92 	if (fw_hdr->header_version != VPU_FW_HEADER_VERSION) {
93 		ivpu_err(vdev, "Invalid firmware header version: %u\n", fw_hdr->header_version);
94 		return -EINVAL;
95 	}
96 
97 	runtime_addr = fw_hdr->boot_params_load_address;
98 	runtime_size = fw_hdr->runtime_size;
99 	image_load_addr = fw_hdr->image_load_address;
100 	image_size = fw_hdr->image_size;
101 
102 	if (runtime_addr < FW_RUNTIME_MIN_ADDR || runtime_addr > FW_RUNTIME_MAX_ADDR) {
103 		ivpu_err(vdev, "Invalid firmware runtime address: 0x%llx\n", runtime_addr);
104 		return -EINVAL;
105 	}
106 
107 	if (runtime_size < fw->file->size || runtime_size > FW_RUNTIME_MAX_SIZE) {
108 		ivpu_err(vdev, "Invalid firmware runtime size: %llu\n", runtime_size);
109 		return -EINVAL;
110 	}
111 
112 	if (FW_FILE_IMAGE_OFFSET + image_size > fw->file->size) {
113 		ivpu_err(vdev, "Invalid image size: %llu\n", image_size);
114 		return -EINVAL;
115 	}
116 
117 	if (image_load_addr < runtime_addr ||
118 	    image_load_addr + image_size > runtime_addr + runtime_size) {
119 		ivpu_err(vdev, "Invalid firmware load address size: 0x%llx and size %llu\n",
120 			 image_load_addr, image_size);
121 		return -EINVAL;
122 	}
123 
124 	if (fw_hdr->shave_nn_fw_size > FW_SHAVE_NN_MAX_SIZE) {
125 		ivpu_err(vdev, "SHAVE NN firmware is too big: %u\n", fw_hdr->shave_nn_fw_size);
126 		return -EINVAL;
127 	}
128 
129 	if (fw_hdr->entry_point < image_load_addr ||
130 	    fw_hdr->entry_point >= image_load_addr + image_size) {
131 		ivpu_err(vdev, "Invalid entry point: 0x%llx\n", fw_hdr->entry_point);
132 		return -EINVAL;
133 	}
134 
135 	fw->runtime_addr = runtime_addr;
136 	fw->runtime_size = runtime_size;
137 	fw->image_load_offset = image_load_addr - runtime_addr;
138 	fw->image_size = image_size;
139 	fw->shave_nn_size = PAGE_ALIGN(fw_hdr->shave_nn_fw_size);
140 
141 	fw->cold_boot_entry_point = fw_hdr->entry_point;
142 	fw->entry_point = fw->cold_boot_entry_point;
143 
144 	ivpu_dbg(vdev, FW_BOOT, "Header version: 0x%x, format 0x%x\n",
145 		 fw_hdr->header_version, fw_hdr->image_format);
146 	ivpu_dbg(vdev, FW_BOOT, "Size: file %lu image %u runtime %u shavenn %u\n",
147 		 fw->file->size, fw->image_size, fw->runtime_size, fw->shave_nn_size);
148 	ivpu_dbg(vdev, FW_BOOT, "Address: runtime 0x%llx, load 0x%llx, entry point 0x%llx\n",
149 		 fw->runtime_addr, image_load_addr, fw->entry_point);
150 	ivpu_dbg(vdev, FW_BOOT, "FW version: %s\n", (char *)fw_hdr + VPU_FW_HEADER_SIZE);
151 
152 	IVPU_FW_CHECK_API(vdev, fw_hdr, BOOT);
153 	IVPU_FW_CHECK_API(vdev, fw_hdr, JSM);
154 
155 	return 0;
156 }
157 
158 static void ivpu_fw_release(struct ivpu_device *vdev)
159 {
160 	release_firmware(vdev->fw->file);
161 }
162 
163 static int ivpu_fw_update_global_range(struct ivpu_device *vdev)
164 {
165 	struct ivpu_fw_info *fw = vdev->fw;
166 	u64 start = ALIGN(fw->runtime_addr + fw->runtime_size, FW_SHARED_MEM_ALIGNMENT);
167 	u64 size = FW_SHARED_MEM_SIZE;
168 
169 	if (start + size > FW_GLOBAL_MEM_END) {
170 		ivpu_err(vdev, "No space for shared region, start %lld, size %lld\n", start, size);
171 		return -EINVAL;
172 	}
173 
174 	ivpu_hw_init_range(&vdev->hw->ranges.global_low, start, size);
175 	return 0;
176 }
177 
178 static int ivpu_fw_mem_init(struct ivpu_device *vdev)
179 {
180 	struct ivpu_fw_info *fw = vdev->fw;
181 	int ret;
182 
183 	ret = ivpu_fw_update_global_range(vdev);
184 	if (ret)
185 		return ret;
186 
187 	fw->mem = ivpu_bo_alloc_internal(vdev, fw->runtime_addr, fw->runtime_size, DRM_IVPU_BO_WC);
188 	if (!fw->mem) {
189 		ivpu_err(vdev, "Failed to allocate firmware runtime memory\n");
190 		return -ENOMEM;
191 	}
192 
193 	if (fw->shave_nn_size) {
194 		fw->mem_shave_nn = ivpu_bo_alloc_internal(vdev, vdev->hw->ranges.global_high.start,
195 							  fw->shave_nn_size, DRM_IVPU_BO_UNCACHED);
196 		if (!fw->mem_shave_nn) {
197 			ivpu_err(vdev, "Failed to allocate shavenn buffer\n");
198 			ivpu_bo_free_internal(fw->mem);
199 			return -ENOMEM;
200 		}
201 	}
202 
203 	return 0;
204 }
205 
206 static void ivpu_fw_mem_fini(struct ivpu_device *vdev)
207 {
208 	struct ivpu_fw_info *fw = vdev->fw;
209 
210 	if (fw->mem_shave_nn) {
211 		ivpu_bo_free_internal(fw->mem_shave_nn);
212 		fw->mem_shave_nn = NULL;
213 	}
214 
215 	ivpu_bo_free_internal(fw->mem);
216 	fw->mem = NULL;
217 }
218 
219 int ivpu_fw_init(struct ivpu_device *vdev)
220 {
221 	int ret;
222 
223 	ret = ivpu_fw_request(vdev);
224 	if (ret)
225 		return ret;
226 
227 	ret = ivpu_fw_parse(vdev);
228 	if (ret)
229 		goto err_fw_release;
230 
231 	ret = ivpu_fw_mem_init(vdev);
232 	if (ret)
233 		goto err_fw_release;
234 
235 	return 0;
236 
237 err_fw_release:
238 	ivpu_fw_release(vdev);
239 	return ret;
240 }
241 
242 void ivpu_fw_fini(struct ivpu_device *vdev)
243 {
244 	ivpu_fw_mem_fini(vdev);
245 	ivpu_fw_release(vdev);
246 }
247 
248 int ivpu_fw_load(struct ivpu_device *vdev)
249 {
250 	struct ivpu_fw_info *fw = vdev->fw;
251 	u64 image_end_offset = fw->image_load_offset + fw->image_size;
252 
253 	memset(fw->mem->kvaddr, 0, fw->image_load_offset);
254 	memcpy(fw->mem->kvaddr + fw->image_load_offset,
255 	       fw->file->data + FW_FILE_IMAGE_OFFSET, fw->image_size);
256 
257 	if (IVPU_WA(clear_runtime_mem)) {
258 		u8 *start = fw->mem->kvaddr + image_end_offset;
259 		u64 size = fw->mem->base.size - image_end_offset;
260 
261 		memset(start, 0, size);
262 	}
263 
264 	wmb(); /* Flush WC buffers after writing fw->mem */
265 
266 	return 0;
267 }
268 
269 static void ivpu_fw_boot_params_print(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
270 {
271 	ivpu_dbg(vdev, FW_BOOT, "boot_params.magic = 0x%x\n",
272 		 boot_params->magic);
273 	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_id = 0x%x\n",
274 		 boot_params->vpu_id);
275 	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_count = 0x%x\n",
276 		 boot_params->vpu_count);
277 	ivpu_dbg(vdev, FW_BOOT, "boot_params.frequency = %u\n",
278 		 boot_params->frequency);
279 	ivpu_dbg(vdev, FW_BOOT, "boot_params.perf_clk_frequency = %u\n",
280 		 boot_params->perf_clk_frequency);
281 
282 	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_start = 0x%llx\n",
283 		 boot_params->ipc_header_area_start);
284 	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_header_area_size = 0x%x\n",
285 		 boot_params->ipc_header_area_size);
286 	ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_base = 0x%llx\n",
287 		 boot_params->shared_region_base);
288 	ivpu_dbg(vdev, FW_BOOT, "boot_params.shared_region_size = 0x%x\n",
289 		 boot_params->shared_region_size);
290 	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_start = 0x%llx\n",
291 		 boot_params->ipc_payload_area_start);
292 	ivpu_dbg(vdev, FW_BOOT, "boot_params.ipc_payload_area_size = 0x%x\n",
293 		 boot_params->ipc_payload_area_size);
294 	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_base = 0x%llx\n",
295 		 boot_params->global_aliased_pio_base);
296 	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_aliased_pio_size = 0x%x\n",
297 		 boot_params->global_aliased_pio_size);
298 
299 	ivpu_dbg(vdev, FW_BOOT, "boot_params.autoconfig = 0x%x\n",
300 		 boot_params->autoconfig);
301 
302 	ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 0x%x\n",
303 		 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use);
304 	ivpu_dbg(vdev, FW_BOOT, "boot_params.cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg = 0x%x\n",
305 		 boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg);
306 
307 	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_base = 0x%llx\n",
308 		 boot_params->global_memory_allocator_base);
309 	ivpu_dbg(vdev, FW_BOOT, "boot_params.global_memory_allocator_size = 0x%x\n",
310 		 boot_params->global_memory_allocator_size);
311 
312 	ivpu_dbg(vdev, FW_BOOT, "boot_params.shave_nn_fw_base = 0x%llx\n",
313 		 boot_params->shave_nn_fw_base);
314 
315 	ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_mss = 0x%x\n",
316 		 boot_params->watchdog_irq_mss);
317 	ivpu_dbg(vdev, FW_BOOT, "boot_params.watchdog_irq_nce = 0x%x\n",
318 		 boot_params->watchdog_irq_nce);
319 	ivpu_dbg(vdev, FW_BOOT, "boot_params.host_to_vpu_irq = 0x%x\n",
320 		 boot_params->host_to_vpu_irq);
321 	ivpu_dbg(vdev, FW_BOOT, "boot_params.job_done_irq = 0x%x\n",
322 		 boot_params->job_done_irq);
323 
324 	ivpu_dbg(vdev, FW_BOOT, "boot_params.host_version_id = 0x%x\n",
325 		 boot_params->host_version_id);
326 	ivpu_dbg(vdev, FW_BOOT, "boot_params.si_stepping = 0x%x\n",
327 		 boot_params->si_stepping);
328 	ivpu_dbg(vdev, FW_BOOT, "boot_params.device_id = 0x%llx\n",
329 		 boot_params->device_id);
330 	ivpu_dbg(vdev, FW_BOOT, "boot_params.feature_exclusion = 0x%llx\n",
331 		 boot_params->feature_exclusion);
332 	ivpu_dbg(vdev, FW_BOOT, "boot_params.sku = 0x%llx\n",
333 		 boot_params->sku);
334 	ivpu_dbg(vdev, FW_BOOT, "boot_params.min_freq_pll_ratio = 0x%x\n",
335 		 boot_params->min_freq_pll_ratio);
336 	ivpu_dbg(vdev, FW_BOOT, "boot_params.pn_freq_pll_ratio = 0x%x\n",
337 		 boot_params->pn_freq_pll_ratio);
338 	ivpu_dbg(vdev, FW_BOOT, "boot_params.max_freq_pll_ratio = 0x%x\n",
339 		 boot_params->max_freq_pll_ratio);
340 	ivpu_dbg(vdev, FW_BOOT, "boot_params.default_trace_level = 0x%x\n",
341 		 boot_params->default_trace_level);
342 	ivpu_dbg(vdev, FW_BOOT, "boot_params.tracing_buff_message_format_mask = 0x%llx\n",
343 		 boot_params->tracing_buff_message_format_mask);
344 	ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_destination_mask = 0x%x\n",
345 		 boot_params->trace_destination_mask);
346 	ivpu_dbg(vdev, FW_BOOT, "boot_params.trace_hw_component_mask = 0x%llx\n",
347 		 boot_params->trace_hw_component_mask);
348 	ivpu_dbg(vdev, FW_BOOT, "boot_params.boot_type = 0x%x\n",
349 		 boot_params->boot_type);
350 	ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_base = 0x%llx\n",
351 		 boot_params->punit_telemetry_sram_base);
352 	ivpu_dbg(vdev, FW_BOOT, "boot_params.punit_telemetry_sram_size = 0x%llx\n",
353 		 boot_params->punit_telemetry_sram_size);
354 	ivpu_dbg(vdev, FW_BOOT, "boot_params.vpu_telemetry_enable = 0x%x\n",
355 		 boot_params->vpu_telemetry_enable);
356 }
357 
358 void ivpu_fw_boot_params_setup(struct ivpu_device *vdev, struct vpu_boot_params *boot_params)
359 {
360 	struct ivpu_bo *ipc_mem_rx = vdev->ipc->mem_rx;
361 
362 	/* In case of warm boot we only have to reset the entrypoint addr */
363 	if (!ivpu_fw_is_cold_boot(vdev)) {
364 		boot_params->save_restore_ret_address = 0;
365 		vdev->pm->is_warmboot = true;
366 		return;
367 	}
368 
369 	vdev->pm->is_warmboot = false;
370 
371 	boot_params->magic = VPU_BOOT_PARAMS_MAGIC;
372 	boot_params->vpu_id = to_pci_dev(vdev->drm.dev)->bus->number;
373 	boot_params->frequency = ivpu_hw_reg_pll_freq_get(vdev);
374 
375 	/*
376 	 * Uncached region of VPU address space, covers IPC buffers, job queues
377 	 * and log buffers, programmable to L2$ Uncached by VPU MTRR
378 	 */
379 	boot_params->shared_region_base = vdev->hw->ranges.global_low.start;
380 	boot_params->shared_region_size = vdev->hw->ranges.global_low.end -
381 					  vdev->hw->ranges.global_low.start;
382 
383 	boot_params->ipc_header_area_start = ipc_mem_rx->vpu_addr;
384 	boot_params->ipc_header_area_size = ipc_mem_rx->base.size / 2;
385 
386 	boot_params->ipc_payload_area_start = ipc_mem_rx->vpu_addr + ipc_mem_rx->base.size / 2;
387 	boot_params->ipc_payload_area_size = ipc_mem_rx->base.size / 2;
388 
389 	boot_params->global_aliased_pio_base =
390 		vdev->hw->ranges.global_aliased_pio.start;
391 	boot_params->global_aliased_pio_size =
392 		ivpu_hw_range_size(&vdev->hw->ranges.global_aliased_pio);
393 
394 	/* Allow configuration for L2C_PAGE_TABLE with boot param value */
395 	boot_params->autoconfig = 1;
396 
397 	/* Enable L2 cache for first 2GB of high memory */
398 	boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].use = 1;
399 	boot_params->cache_defaults[VPU_BOOT_L2_CACHE_CFG_NN].cfg =
400 		ADDR_TO_L2_CACHE_CFG(vdev->hw->ranges.global_high.start);
401 
402 	if (vdev->fw->mem_shave_nn)
403 		boot_params->shave_nn_fw_base = vdev->fw->mem_shave_nn->vpu_addr;
404 
405 	boot_params->watchdog_irq_mss = WATCHDOG_MSS_REDIRECT;
406 	boot_params->watchdog_irq_nce = WATCHDOG_NCE_REDIRECT;
407 	boot_params->si_stepping = ivpu_revision(vdev);
408 	boot_params->device_id = ivpu_device_id(vdev);
409 	boot_params->feature_exclusion = vdev->hw->tile_fuse;
410 	boot_params->sku = vdev->hw->sku;
411 
412 	boot_params->min_freq_pll_ratio = vdev->hw->pll.min_ratio;
413 	boot_params->pn_freq_pll_ratio = vdev->hw->pll.pn_ratio;
414 	boot_params->max_freq_pll_ratio = vdev->hw->pll.max_ratio;
415 
416 	boot_params->punit_telemetry_sram_base = ivpu_hw_reg_telemetry_offset_get(vdev);
417 	boot_params->punit_telemetry_sram_size = ivpu_hw_reg_telemetry_size_get(vdev);
418 	boot_params->vpu_telemetry_enable = ivpu_hw_reg_telemetry_enable_get(vdev);
419 
420 	wmb(); /* Flush WC buffers after writing bootparams */
421 
422 	ivpu_fw_boot_params_print(vdev, boot_params);
423 }
424