xref: /linux/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c (revision 36f2ef2dd44e1c34b281336a41cf42a477d4b43f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2016-2019 Intel Corporation
4  */
5 
6 #include <linux/bitfield.h>
7 #include <linux/firmware.h>
8 #include <linux/highmem.h>
9 
10 #include <drm/drm_cache.h>
11 #include <drm/drm_print.h>
12 
13 #include "gem/i915_gem_lmem.h"
14 #include "intel_uc_fw.h"
15 #include "intel_uc_fw_abi.h"
16 #include "i915_drv.h"
17 #include "i915_reg.h"
18 
19 static inline struct intel_gt *
20 ____uc_fw_to_gt(struct intel_uc_fw *uc_fw, enum intel_uc_fw_type type)
21 {
22 	if (type == INTEL_UC_FW_TYPE_GUC)
23 		return container_of(uc_fw, struct intel_gt, uc.guc.fw);
24 
25 	GEM_BUG_ON(type != INTEL_UC_FW_TYPE_HUC);
26 	return container_of(uc_fw, struct intel_gt, uc.huc.fw);
27 }
28 
29 static inline struct intel_gt *__uc_fw_to_gt(struct intel_uc_fw *uc_fw)
30 {
31 	GEM_BUG_ON(uc_fw->status == INTEL_UC_FIRMWARE_UNINITIALIZED);
32 	return ____uc_fw_to_gt(uc_fw, uc_fw->type);
33 }
34 
35 #ifdef CONFIG_DRM_I915_DEBUG_GUC
36 void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
37 			       enum intel_uc_fw_status status)
38 {
39 	uc_fw->__status =  status;
40 	drm_dbg(&__uc_fw_to_gt(uc_fw)->i915->drm,
41 		"%s firmware -> %s\n",
42 		intel_uc_fw_type_repr(uc_fw->type),
43 		status == INTEL_UC_FIRMWARE_SELECTED ?
44 		uc_fw->path : intel_uc_fw_status_repr(status));
45 }
46 #endif
47 
48 /*
49  * List of required GuC and HuC binaries per-platform.
50  * Must be ordered based on platform + revid, from newer to older.
51  *
52  * Note that RKL and ADL-S have the same GuC/HuC device ID's and use the same
53  * firmware as TGL.
54  */
55 #define INTEL_GUC_FIRMWARE_DEFS(fw_def, guc_def) \
56 	fw_def(DG2,          0, guc_def(dg2,  70, 1, 2)) \
57 	fw_def(ALDERLAKE_P,  0, guc_def(adlp, 70, 1, 1)) \
58 	fw_def(ALDERLAKE_S,  0, guc_def(tgl,  70, 1, 1)) \
59 	fw_def(DG1,          0, guc_def(dg1,  70, 1, 1)) \
60 	fw_def(ROCKETLAKE,   0, guc_def(tgl,  70, 1, 1)) \
61 	fw_def(TIGERLAKE,    0, guc_def(tgl,  70, 1, 1)) \
62 	fw_def(JASPERLAKE,   0, guc_def(ehl,  70, 1, 1)) \
63 	fw_def(ELKHARTLAKE,  0, guc_def(ehl,  70, 1, 1)) \
64 	fw_def(ICELAKE,      0, guc_def(icl,  70, 1, 1)) \
65 	fw_def(COMETLAKE,    5, guc_def(cml,  70, 1, 1)) \
66 	fw_def(COMETLAKE,    0, guc_def(kbl,  70, 1, 1)) \
67 	fw_def(COFFEELAKE,   0, guc_def(kbl,  70, 1, 1)) \
68 	fw_def(GEMINILAKE,   0, guc_def(glk,  70, 1, 1)) \
69 	fw_def(KABYLAKE,     0, guc_def(kbl,  70, 1, 1)) \
70 	fw_def(BROXTON,      0, guc_def(bxt,  70, 1, 1)) \
71 	fw_def(SKYLAKE,      0, guc_def(skl,  70, 1, 1))
72 
73 #define INTEL_HUC_FIRMWARE_DEFS(fw_def, huc_def) \
74 	fw_def(ALDERLAKE_P,  0, huc_def(tgl,  7, 9, 3)) \
75 	fw_def(ALDERLAKE_S,  0, huc_def(tgl,  7, 9, 3)) \
76 	fw_def(DG1,          0, huc_def(dg1,  7, 9, 3)) \
77 	fw_def(ROCKETLAKE,   0, huc_def(tgl,  7, 9, 3)) \
78 	fw_def(TIGERLAKE,    0, huc_def(tgl,  7, 9, 3)) \
79 	fw_def(JASPERLAKE,   0, huc_def(ehl,  9, 0, 0)) \
80 	fw_def(ELKHARTLAKE,  0, huc_def(ehl,  9, 0, 0)) \
81 	fw_def(ICELAKE,      0, huc_def(icl,  9, 0, 0)) \
82 	fw_def(COMETLAKE,    5, huc_def(cml,  4, 0, 0)) \
83 	fw_def(COMETLAKE,    0, huc_def(kbl,  4, 0, 0)) \
84 	fw_def(COFFEELAKE,   0, huc_def(kbl,  4, 0, 0)) \
85 	fw_def(GEMINILAKE,   0, huc_def(glk,  4, 0, 0)) \
86 	fw_def(KABYLAKE,     0, huc_def(kbl,  4, 0, 0)) \
87 	fw_def(BROXTON,      0, huc_def(bxt,  2, 0, 0)) \
88 	fw_def(SKYLAKE,      0, huc_def(skl,  2, 0, 0))
89 
90 #define __MAKE_UC_FW_PATH(prefix_, name_, major_, minor_, patch_) \
91 	"i915/" \
92 	__stringify(prefix_) name_ \
93 	__stringify(major_) "." \
94 	__stringify(minor_) "." \
95 	__stringify(patch_) ".bin"
96 
97 #define MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_) \
98 	__MAKE_UC_FW_PATH(prefix_, "_guc_", major_, minor_, patch_)
99 
100 #define MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_) \
101 	__MAKE_UC_FW_PATH(prefix_, "_huc_", major_, minor_, bld_num_)
102 
103 /* All blobs need to be declared via MODULE_FIRMWARE() */
104 #define INTEL_UC_MODULE_FW(platform_, revid_, uc_) \
105 	MODULE_FIRMWARE(uc_);
106 
107 INTEL_GUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_GUC_FW_PATH)
108 INTEL_HUC_FIRMWARE_DEFS(INTEL_UC_MODULE_FW, MAKE_HUC_FW_PATH)
109 
110 /* The below structs and macros are used to iterate across the list of blobs */
111 struct __packed uc_fw_blob {
112 	u8 major;
113 	u8 minor;
114 	const char *path;
115 };
116 
117 #define UC_FW_BLOB(major_, minor_, path_) \
118 	{ .major = major_, .minor = minor_, .path = path_ }
119 
120 #define GUC_FW_BLOB(prefix_, major_, minor_, patch_) \
121 	UC_FW_BLOB(major_, minor_, \
122 		   MAKE_GUC_FW_PATH(prefix_, major_, minor_, patch_))
123 
124 #define HUC_FW_BLOB(prefix_, major_, minor_, bld_num_) \
125 	UC_FW_BLOB(major_, minor_, \
126 		   MAKE_HUC_FW_PATH(prefix_, major_, minor_, bld_num_))
127 
128 struct __packed uc_fw_platform_requirement {
129 	enum intel_platform p;
130 	u8 rev; /* first platform rev using this FW */
131 	const struct uc_fw_blob blob;
132 };
133 
134 #define MAKE_FW_LIST(platform_, revid_, uc_) \
135 { \
136 	.p = INTEL_##platform_, \
137 	.rev = revid_, \
138 	.blob = uc_, \
139 },
140 
141 struct fw_blobs_by_type {
142 	const struct uc_fw_platform_requirement *blobs;
143 	u32 count;
144 };
145 
146 static void
147 __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
148 {
149 	static const struct uc_fw_platform_requirement blobs_guc[] = {
150 		INTEL_GUC_FIRMWARE_DEFS(MAKE_FW_LIST, GUC_FW_BLOB)
151 	};
152 	static const struct uc_fw_platform_requirement blobs_huc[] = {
153 		INTEL_HUC_FIRMWARE_DEFS(MAKE_FW_LIST, HUC_FW_BLOB)
154 	};
155 	static const struct fw_blobs_by_type blobs_all[INTEL_UC_FW_NUM_TYPES] = {
156 		[INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) },
157 		[INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) },
158 	};
159 	const struct uc_fw_platform_requirement *fw_blobs;
160 	enum intel_platform p = INTEL_INFO(i915)->platform;
161 	u32 fw_count;
162 	u8 rev = INTEL_REVID(i915);
163 	int i;
164 
165 	/*
166 	 * The only difference between the ADL GuC FWs is the HWConfig support.
167 	 * ADL-N does not support HWConfig, so we should use the same binary as
168 	 * ADL-S, otherwise the GuC might attempt to fetch a config table that
169 	 * does not exist.
170 	 */
171 	if (IS_ADLP_N(i915))
172 		p = INTEL_ALDERLAKE_S;
173 
174 	GEM_BUG_ON(uc_fw->type >= ARRAY_SIZE(blobs_all));
175 	fw_blobs = blobs_all[uc_fw->type].blobs;
176 	fw_count = blobs_all[uc_fw->type].count;
177 
178 	for (i = 0; i < fw_count && p <= fw_blobs[i].p; i++) {
179 		if (p == fw_blobs[i].p && rev >= fw_blobs[i].rev) {
180 			const struct uc_fw_blob *blob = &fw_blobs[i].blob;
181 			uc_fw->path = blob->path;
182 			uc_fw->major_ver_wanted = blob->major;
183 			uc_fw->minor_ver_wanted = blob->minor;
184 			break;
185 		}
186 	}
187 
188 	/* make sure the list is ordered as expected */
189 	if (IS_ENABLED(CONFIG_DRM_I915_SELFTEST)) {
190 		for (i = 1; i < fw_count; i++) {
191 			if (fw_blobs[i].p < fw_blobs[i - 1].p)
192 				continue;
193 
194 			if (fw_blobs[i].p == fw_blobs[i - 1].p &&
195 			    fw_blobs[i].rev < fw_blobs[i - 1].rev)
196 				continue;
197 
198 			pr_err("invalid FW blob order: %s r%u comes before %s r%u\n",
199 			       intel_platform_name(fw_blobs[i - 1].p),
200 			       fw_blobs[i - 1].rev,
201 			       intel_platform_name(fw_blobs[i].p),
202 			       fw_blobs[i].rev);
203 
204 			uc_fw->path = NULL;
205 		}
206 	}
207 }
208 
209 static const char *__override_guc_firmware_path(struct drm_i915_private *i915)
210 {
211 	if (i915->params.enable_guc & ENABLE_GUC_MASK)
212 		return i915->params.guc_firmware_path;
213 	return "";
214 }
215 
216 static const char *__override_huc_firmware_path(struct drm_i915_private *i915)
217 {
218 	if (i915->params.enable_guc & ENABLE_GUC_LOAD_HUC)
219 		return i915->params.huc_firmware_path;
220 	return "";
221 }
222 
223 static void __uc_fw_user_override(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw)
224 {
225 	const char *path = NULL;
226 
227 	switch (uc_fw->type) {
228 	case INTEL_UC_FW_TYPE_GUC:
229 		path = __override_guc_firmware_path(i915);
230 		break;
231 	case INTEL_UC_FW_TYPE_HUC:
232 		path = __override_huc_firmware_path(i915);
233 		break;
234 	}
235 
236 	if (unlikely(path)) {
237 		uc_fw->path = path;
238 		uc_fw->user_overridden = true;
239 	}
240 }
241 
242 /**
243  * intel_uc_fw_init_early - initialize the uC object and select the firmware
244  * @uc_fw: uC firmware
245  * @type: type of uC
246  *
247  * Initialize the state of our uC object and relevant tracking and select the
248  * firmware to fetch and load.
249  */
250 void intel_uc_fw_init_early(struct intel_uc_fw *uc_fw,
251 			    enum intel_uc_fw_type type)
252 {
253 	struct drm_i915_private *i915 = ____uc_fw_to_gt(uc_fw, type)->i915;
254 
255 	/*
256 	 * we use FIRMWARE_UNINITIALIZED to detect checks against uc_fw->status
257 	 * before we're looked at the HW caps to see if we have uc support
258 	 */
259 	BUILD_BUG_ON(INTEL_UC_FIRMWARE_UNINITIALIZED);
260 	GEM_BUG_ON(uc_fw->status);
261 	GEM_BUG_ON(uc_fw->path);
262 
263 	uc_fw->type = type;
264 
265 	if (HAS_GT_UC(i915)) {
266 		__uc_fw_auto_select(i915, uc_fw);
267 		__uc_fw_user_override(i915, uc_fw);
268 	}
269 
270 	intel_uc_fw_change_status(uc_fw, uc_fw->path ? *uc_fw->path ?
271 				  INTEL_UC_FIRMWARE_SELECTED :
272 				  INTEL_UC_FIRMWARE_DISABLED :
273 				  INTEL_UC_FIRMWARE_NOT_SUPPORTED);
274 }
275 
276 static void __force_fw_fetch_failures(struct intel_uc_fw *uc_fw, int e)
277 {
278 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
279 	bool user = e == -EINVAL;
280 
281 	if (i915_inject_probe_error(i915, e)) {
282 		/* non-existing blob */
283 		uc_fw->path = "<invalid>";
284 		uc_fw->user_overridden = user;
285 	} else if (i915_inject_probe_error(i915, e)) {
286 		/* require next major version */
287 		uc_fw->major_ver_wanted += 1;
288 		uc_fw->minor_ver_wanted = 0;
289 		uc_fw->user_overridden = user;
290 	} else if (i915_inject_probe_error(i915, e)) {
291 		/* require next minor version */
292 		uc_fw->minor_ver_wanted += 1;
293 		uc_fw->user_overridden = user;
294 	} else if (uc_fw->major_ver_wanted &&
295 		   i915_inject_probe_error(i915, e)) {
296 		/* require prev major version */
297 		uc_fw->major_ver_wanted -= 1;
298 		uc_fw->minor_ver_wanted = 0;
299 		uc_fw->user_overridden = user;
300 	} else if (uc_fw->minor_ver_wanted &&
301 		   i915_inject_probe_error(i915, e)) {
302 		/* require prev minor version - hey, this should work! */
303 		uc_fw->minor_ver_wanted -= 1;
304 		uc_fw->user_overridden = user;
305 	} else if (user && i915_inject_probe_error(i915, e)) {
306 		/* officially unsupported platform */
307 		uc_fw->major_ver_wanted = 0;
308 		uc_fw->minor_ver_wanted = 0;
309 		uc_fw->user_overridden = true;
310 	}
311 }
312 
313 /**
314  * intel_uc_fw_fetch - fetch uC firmware
315  * @uc_fw: uC firmware
316  *
317  * Fetch uC firmware into GEM obj.
318  *
319  * Return: 0 on success, a negative errno code on failure.
320  */
321 int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
322 {
323 	struct drm_i915_private *i915 = __uc_fw_to_gt(uc_fw)->i915;
324 	struct device *dev = i915->drm.dev;
325 	struct drm_i915_gem_object *obj;
326 	const struct firmware *fw = NULL;
327 	struct uc_css_header *css;
328 	size_t size;
329 	int err;
330 
331 	GEM_BUG_ON(!i915->wopcm.size);
332 	GEM_BUG_ON(!intel_uc_fw_is_enabled(uc_fw));
333 
334 	err = i915_inject_probe_error(i915, -ENXIO);
335 	if (err)
336 		goto fail;
337 
338 	__force_fw_fetch_failures(uc_fw, -EINVAL);
339 	__force_fw_fetch_failures(uc_fw, -ESTALE);
340 
341 	err = request_firmware(&fw, uc_fw->path, dev);
342 	if (err)
343 		goto fail;
344 
345 	/* Check the size of the blob before examining buffer contents */
346 	if (unlikely(fw->size < sizeof(struct uc_css_header))) {
347 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
348 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
349 			 fw->size, sizeof(struct uc_css_header));
350 		err = -ENODATA;
351 		goto fail;
352 	}
353 
354 	css = (struct uc_css_header *)fw->data;
355 
356 	/* Check integrity of size values inside CSS header */
357 	size = (css->header_size_dw - css->key_size_dw - css->modulus_size_dw -
358 		css->exponent_size_dw) * sizeof(u32);
359 	if (unlikely(size != sizeof(struct uc_css_header))) {
360 		drm_warn(&i915->drm,
361 			 "%s firmware %s: unexpected header size: %zu != %zu\n",
362 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
363 			 fw->size, sizeof(struct uc_css_header));
364 		err = -EPROTO;
365 		goto fail;
366 	}
367 
368 	/* uCode size must calculated from other sizes */
369 	uc_fw->ucode_size = (css->size_dw - css->header_size_dw) * sizeof(u32);
370 
371 	/* now RSA */
372 	uc_fw->rsa_size = css->key_size_dw * sizeof(u32);
373 
374 	/* At least, it should have header, uCode and RSA. Size of all three. */
375 	size = sizeof(struct uc_css_header) + uc_fw->ucode_size + uc_fw->rsa_size;
376 	if (unlikely(fw->size < size)) {
377 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu < %zu\n",
378 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
379 			 fw->size, size);
380 		err = -ENOEXEC;
381 		goto fail;
382 	}
383 
384 	/* Sanity check whether this fw is not larger than whole WOPCM memory */
385 	size = __intel_uc_fw_get_upload_size(uc_fw);
386 	if (unlikely(size >= i915->wopcm.size)) {
387 		drm_warn(&i915->drm, "%s firmware %s: invalid size: %zu > %zu\n",
388 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
389 			 size, (size_t)i915->wopcm.size);
390 		err = -E2BIG;
391 		goto fail;
392 	}
393 
394 	/* Get version numbers from the CSS header */
395 	uc_fw->major_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MAJOR,
396 					   css->sw_version);
397 	uc_fw->minor_ver_found = FIELD_GET(CSS_SW_VERSION_UC_MINOR,
398 					   css->sw_version);
399 
400 	if (uc_fw->major_ver_found != uc_fw->major_ver_wanted ||
401 	    uc_fw->minor_ver_found < uc_fw->minor_ver_wanted) {
402 		drm_notice(&i915->drm, "%s firmware %s: unexpected version: %u.%u != %u.%u\n",
403 			   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
404 			   uc_fw->major_ver_found, uc_fw->minor_ver_found,
405 			   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted);
406 		if (!intel_uc_fw_is_overridden(uc_fw)) {
407 			err = -ENOEXEC;
408 			goto fail;
409 		}
410 	}
411 
412 	if (uc_fw->type == INTEL_UC_FW_TYPE_GUC)
413 		uc_fw->private_data_size = css->private_data_size;
414 
415 	if (HAS_LMEM(i915)) {
416 		obj = i915_gem_object_create_lmem_from_data(i915, fw->data, fw->size);
417 		if (!IS_ERR(obj))
418 			obj->flags |= I915_BO_ALLOC_PM_EARLY;
419 	} else {
420 		obj = i915_gem_object_create_shmem_from_data(i915, fw->data, fw->size);
421 	}
422 
423 	if (IS_ERR(obj)) {
424 		err = PTR_ERR(obj);
425 		goto fail;
426 	}
427 
428 	uc_fw->obj = obj;
429 	uc_fw->size = fw->size;
430 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
431 
432 	release_firmware(fw);
433 	return 0;
434 
435 fail:
436 	intel_uc_fw_change_status(uc_fw, err == -ENOENT ?
437 				  INTEL_UC_FIRMWARE_MISSING :
438 				  INTEL_UC_FIRMWARE_ERROR);
439 
440 	drm_notice(&i915->drm, "%s firmware %s: fetch failed with error %d\n",
441 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path, err);
442 	drm_info(&i915->drm, "%s firmware(s) can be downloaded from %s\n",
443 		 intel_uc_fw_type_repr(uc_fw->type), INTEL_UC_FIRMWARE_URL);
444 
445 	release_firmware(fw);		/* OK even if fw is NULL */
446 	return err;
447 }
448 
449 static u32 uc_fw_ggtt_offset(struct intel_uc_fw *uc_fw)
450 {
451 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
452 	struct drm_mm_node *node = &ggtt->uc_fw;
453 
454 	GEM_BUG_ON(!drm_mm_node_allocated(node));
455 	GEM_BUG_ON(upper_32_bits(node->start));
456 	GEM_BUG_ON(upper_32_bits(node->start + node->size - 1));
457 
458 	return lower_32_bits(node->start);
459 }
460 
461 static void uc_fw_bind_ggtt(struct intel_uc_fw *uc_fw)
462 {
463 	struct drm_i915_gem_object *obj = uc_fw->obj;
464 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
465 	struct i915_vma_resource *dummy = &uc_fw->dummy;
466 	u32 pte_flags = 0;
467 
468 	dummy->start = uc_fw_ggtt_offset(uc_fw);
469 	dummy->node_size = obj->base.size;
470 	dummy->bi.pages = obj->mm.pages;
471 
472 	GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
473 	GEM_BUG_ON(dummy->node_size > ggtt->uc_fw.size);
474 
475 	/* uc_fw->obj cache domains were not controlled across suspend */
476 	if (i915_gem_object_has_struct_page(obj))
477 		drm_clflush_sg(dummy->bi.pages);
478 
479 	if (i915_gem_object_is_lmem(obj))
480 		pte_flags |= PTE_LM;
481 
482 	ggtt->vm.insert_entries(&ggtt->vm, dummy, I915_CACHE_NONE, pte_flags);
483 }
484 
485 static void uc_fw_unbind_ggtt(struct intel_uc_fw *uc_fw)
486 {
487 	struct drm_i915_gem_object *obj = uc_fw->obj;
488 	struct i915_ggtt *ggtt = __uc_fw_to_gt(uc_fw)->ggtt;
489 	u64 start = uc_fw_ggtt_offset(uc_fw);
490 
491 	ggtt->vm.clear_range(&ggtt->vm, start, obj->base.size);
492 }
493 
494 static int uc_fw_xfer(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
495 {
496 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
497 	struct intel_uncore *uncore = gt->uncore;
498 	u64 offset;
499 	int ret;
500 
501 	ret = i915_inject_probe_error(gt->i915, -ETIMEDOUT);
502 	if (ret)
503 		return ret;
504 
505 	intel_uncore_forcewake_get(uncore, FORCEWAKE_ALL);
506 
507 	/* Set the source address for the uCode */
508 	offset = uc_fw_ggtt_offset(uc_fw);
509 	GEM_BUG_ON(upper_32_bits(offset) & 0xFFFF0000);
510 	intel_uncore_write_fw(uncore, DMA_ADDR_0_LOW, lower_32_bits(offset));
511 	intel_uncore_write_fw(uncore, DMA_ADDR_0_HIGH, upper_32_bits(offset));
512 
513 	/* Set the DMA destination */
514 	intel_uncore_write_fw(uncore, DMA_ADDR_1_LOW, dst_offset);
515 	intel_uncore_write_fw(uncore, DMA_ADDR_1_HIGH, DMA_ADDRESS_SPACE_WOPCM);
516 
517 	/*
518 	 * Set the transfer size. The header plus uCode will be copied to WOPCM
519 	 * via DMA, excluding any other components
520 	 */
521 	intel_uncore_write_fw(uncore, DMA_COPY_SIZE,
522 			      sizeof(struct uc_css_header) + uc_fw->ucode_size);
523 
524 	/* Start the DMA */
525 	intel_uncore_write_fw(uncore, DMA_CTRL,
526 			      _MASKED_BIT_ENABLE(dma_flags | START_DMA));
527 
528 	/* Wait for DMA to finish */
529 	ret = intel_wait_for_register_fw(uncore, DMA_CTRL, START_DMA, 0, 100);
530 	if (ret)
531 		drm_err(&gt->i915->drm, "DMA for %s fw failed, DMA_CTRL=%u\n",
532 			intel_uc_fw_type_repr(uc_fw->type),
533 			intel_uncore_read_fw(uncore, DMA_CTRL));
534 
535 	/* Disable the bits once DMA is over */
536 	intel_uncore_write_fw(uncore, DMA_CTRL, _MASKED_BIT_DISABLE(dma_flags));
537 
538 	intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL);
539 
540 	return ret;
541 }
542 
543 /**
544  * intel_uc_fw_upload - load uC firmware using custom loader
545  * @uc_fw: uC firmware
546  * @dst_offset: destination offset
547  * @dma_flags: flags for flags for dma ctrl
548  *
549  * Loads uC firmware and updates internal flags.
550  *
551  * Return: 0 on success, non-zero on failure.
552  */
553 int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
554 {
555 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
556 	int err;
557 
558 	/* make sure the status was cleared the last time we reset the uc */
559 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
560 
561 	err = i915_inject_probe_error(gt->i915, -ENOEXEC);
562 	if (err)
563 		return err;
564 
565 	if (!intel_uc_fw_is_loadable(uc_fw))
566 		return -ENOEXEC;
567 
568 	/* Call custom loader */
569 	uc_fw_bind_ggtt(uc_fw);
570 	err = uc_fw_xfer(uc_fw, dst_offset, dma_flags);
571 	uc_fw_unbind_ggtt(uc_fw);
572 	if (err)
573 		goto fail;
574 
575 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_TRANSFERRED);
576 	return 0;
577 
578 fail:
579 	i915_probe_error(gt->i915, "Failed to load %s firmware %s (%d)\n",
580 			 intel_uc_fw_type_repr(uc_fw->type), uc_fw->path,
581 			 err);
582 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOAD_FAIL);
583 	return err;
584 }
585 
586 static inline bool uc_fw_need_rsa_in_memory(struct intel_uc_fw *uc_fw)
587 {
588 	/*
589 	 * The HW reads the GuC RSA from memory if the key size is > 256 bytes,
590 	 * while it reads it from the 64 RSA registers if it is smaller.
591 	 * The HuC RSA is always read from memory.
592 	 */
593 	return uc_fw->type == INTEL_UC_FW_TYPE_HUC || uc_fw->rsa_size > 256;
594 }
595 
596 static int uc_fw_rsa_data_create(struct intel_uc_fw *uc_fw)
597 {
598 	struct intel_gt *gt = __uc_fw_to_gt(uc_fw);
599 	struct i915_vma *vma;
600 	size_t copied;
601 	void *vaddr;
602 	int err;
603 
604 	err = i915_inject_probe_error(gt->i915, -ENXIO);
605 	if (err)
606 		return err;
607 
608 	if (!uc_fw_need_rsa_in_memory(uc_fw))
609 		return 0;
610 
611 	/*
612 	 * uC firmwares will sit above GUC_GGTT_TOP and will not map through
613 	 * GGTT. Unfortunately, this means that the GuC HW cannot perform the uC
614 	 * authentication from memory, as the RSA offset now falls within the
615 	 * GuC inaccessible range. We resort to perma-pinning an additional vma
616 	 * within the accessible range that only contains the RSA signature.
617 	 * The GuC HW can use this extra pinning to perform the authentication
618 	 * since its GGTT offset will be GuC accessible.
619 	 */
620 	GEM_BUG_ON(uc_fw->rsa_size > PAGE_SIZE);
621 	vma = intel_guc_allocate_vma(&gt->uc.guc, PAGE_SIZE);
622 	if (IS_ERR(vma))
623 		return PTR_ERR(vma);
624 
625 	vaddr = i915_gem_object_pin_map_unlocked(vma->obj,
626 						 i915_coherent_map_type(gt->i915, vma->obj, true));
627 	if (IS_ERR(vaddr)) {
628 		i915_vma_unpin_and_release(&vma, 0);
629 		err = PTR_ERR(vaddr);
630 		goto unpin_out;
631 	}
632 
633 	copied = intel_uc_fw_copy_rsa(uc_fw, vaddr, vma->size);
634 	i915_gem_object_unpin_map(vma->obj);
635 
636 	if (copied < uc_fw->rsa_size) {
637 		err = -ENOMEM;
638 		goto unpin_out;
639 	}
640 
641 	uc_fw->rsa_data = vma;
642 
643 	return 0;
644 
645 unpin_out:
646 	i915_vma_unpin_and_release(&vma, 0);
647 	return err;
648 }
649 
650 static void uc_fw_rsa_data_destroy(struct intel_uc_fw *uc_fw)
651 {
652 	i915_vma_unpin_and_release(&uc_fw->rsa_data, 0);
653 }
654 
655 int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
656 {
657 	int err;
658 
659 	/* this should happen before the load! */
660 	GEM_BUG_ON(intel_uc_fw_is_loaded(uc_fw));
661 
662 	if (!intel_uc_fw_is_available(uc_fw))
663 		return -ENOEXEC;
664 
665 	err = i915_gem_object_pin_pages_unlocked(uc_fw->obj);
666 	if (err) {
667 		DRM_DEBUG_DRIVER("%s fw pin-pages err=%d\n",
668 				 intel_uc_fw_type_repr(uc_fw->type), err);
669 		goto out;
670 	}
671 
672 	err = uc_fw_rsa_data_create(uc_fw);
673 	if (err) {
674 		DRM_DEBUG_DRIVER("%s fw rsa data creation failed, err=%d\n",
675 				 intel_uc_fw_type_repr(uc_fw->type), err);
676 		goto out_unpin;
677 	}
678 
679 	return 0;
680 
681 out_unpin:
682 	i915_gem_object_unpin_pages(uc_fw->obj);
683 out:
684 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_INIT_FAIL);
685 	return err;
686 }
687 
688 void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
689 {
690 	uc_fw_rsa_data_destroy(uc_fw);
691 
692 	if (i915_gem_object_has_pinned_pages(uc_fw->obj))
693 		i915_gem_object_unpin_pages(uc_fw->obj);
694 
695 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
696 }
697 
698 /**
699  * intel_uc_fw_cleanup_fetch - cleanup uC firmware
700  * @uc_fw: uC firmware
701  *
702  * Cleans up uC firmware by releasing the firmware GEM obj.
703  */
704 void intel_uc_fw_cleanup_fetch(struct intel_uc_fw *uc_fw)
705 {
706 	if (!intel_uc_fw_is_available(uc_fw))
707 		return;
708 
709 	i915_gem_object_put(fetch_and_zero(&uc_fw->obj));
710 
711 	intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_SELECTED);
712 }
713 
714 /**
715  * intel_uc_fw_copy_rsa - copy fw RSA to buffer
716  *
717  * @uc_fw: uC firmware
718  * @dst: dst buffer
719  * @max_len: max number of bytes to copy
720  *
721  * Return: number of copied bytes.
722  */
723 size_t intel_uc_fw_copy_rsa(struct intel_uc_fw *uc_fw, void *dst, u32 max_len)
724 {
725 	struct intel_memory_region *mr = uc_fw->obj->mm.region;
726 	u32 size = min_t(u32, uc_fw->rsa_size, max_len);
727 	u32 offset = sizeof(struct uc_css_header) + uc_fw->ucode_size;
728 	struct sgt_iter iter;
729 	size_t count = 0;
730 	int idx;
731 
732 	/* Called during reset handling, must be atomic [no fs_reclaim] */
733 	GEM_BUG_ON(!intel_uc_fw_is_available(uc_fw));
734 
735 	idx = offset >> PAGE_SHIFT;
736 	offset = offset_in_page(offset);
737 	if (i915_gem_object_has_struct_page(uc_fw->obj)) {
738 		struct page *page;
739 
740 		for_each_sgt_page(page, iter, uc_fw->obj->mm.pages) {
741 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
742 			void *vaddr;
743 
744 			if (idx > 0) {
745 				idx--;
746 				continue;
747 			}
748 
749 			vaddr = kmap_atomic(page);
750 			memcpy(dst, vaddr + offset, len);
751 			kunmap_atomic(vaddr);
752 
753 			offset = 0;
754 			dst += len;
755 			size -= len;
756 			count += len;
757 			if (!size)
758 				break;
759 		}
760 	} else {
761 		dma_addr_t addr;
762 
763 		for_each_sgt_daddr(addr, iter, uc_fw->obj->mm.pages) {
764 			u32 len = min_t(u32, size, PAGE_SIZE - offset);
765 			void __iomem *vaddr;
766 
767 			if (idx > 0) {
768 				idx--;
769 				continue;
770 			}
771 
772 			vaddr = io_mapping_map_atomic_wc(&mr->iomap,
773 							 addr - mr->region.start);
774 			memcpy_fromio(dst, vaddr + offset, len);
775 			io_mapping_unmap_atomic(vaddr);
776 
777 			offset = 0;
778 			dst += len;
779 			size -= len;
780 			count += len;
781 			if (!size)
782 				break;
783 		}
784 	}
785 
786 	return count;
787 }
788 
789 /**
790  * intel_uc_fw_dump - dump information about uC firmware
791  * @uc_fw: uC firmware
792  * @p: the &drm_printer
793  *
794  * Pretty printer for uC firmware.
795  */
796 void intel_uc_fw_dump(const struct intel_uc_fw *uc_fw, struct drm_printer *p)
797 {
798 	drm_printf(p, "%s firmware: %s\n",
799 		   intel_uc_fw_type_repr(uc_fw->type), uc_fw->path);
800 	drm_printf(p, "\tstatus: %s\n",
801 		   intel_uc_fw_status_repr(uc_fw->status));
802 	drm_printf(p, "\tversion: wanted %u.%u, found %u.%u\n",
803 		   uc_fw->major_ver_wanted, uc_fw->minor_ver_wanted,
804 		   uc_fw->major_ver_found, uc_fw->minor_ver_found);
805 	drm_printf(p, "\tuCode: %u bytes\n", uc_fw->ucode_size);
806 	drm_printf(p, "\tRSA: %u bytes\n", uc_fw->rsa_size);
807 }
808