xref: /linux/drivers/gpu/drm/xe/xe_huc.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_huc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/gsc_pxp_commands_abi.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_bo.h"
16 #include "xe_device.h"
17 #include "xe_force_wake.h"
18 #include "xe_gsc_submit.h"
19 #include "xe_gt.h"
20 #include "xe_gt_printk.h"
21 #include "xe_guc.h"
22 #include "xe_map.h"
23 #include "xe_mmio.h"
24 #include "xe_sriov.h"
25 #include "xe_uc_fw.h"
26 
27 static struct xe_gt *
28 huc_to_gt(struct xe_huc *huc)
29 {
30 	return container_of(huc, struct xe_gt, uc.huc);
31 }
32 
33 static struct xe_device *
34 huc_to_xe(struct xe_huc *huc)
35 {
36 	return gt_to_xe(huc_to_gt(huc));
37 }
38 
39 static struct xe_guc *
40 huc_to_guc(struct xe_huc *huc)
41 {
42 	return &container_of(huc, struct xe_uc, huc)->guc;
43 }
44 
45 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
46 static int huc_alloc_gsc_pkt(struct xe_huc *huc)
47 {
48 	struct xe_gt *gt = huc_to_gt(huc);
49 	struct xe_device *xe = gt_to_xe(gt);
50 	struct xe_bo *bo;
51 
52 	/* we use a single object for both input and output */
53 	bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt),
54 					  PXP43_HUC_AUTH_INOUT_SIZE * 2,
55 					  XE_BO_FLAG_SYSTEM |
56 					  XE_BO_FLAG_GGTT);
57 	if (IS_ERR(bo))
58 		return PTR_ERR(bo);
59 
60 	huc->gsc_pkt = bo;
61 
62 	return 0;
63 }
64 
65 int xe_huc_init(struct xe_huc *huc)
66 {
67 	struct xe_gt *gt = huc_to_gt(huc);
68 	struct xe_device *xe = gt_to_xe(gt);
69 	int ret;
70 
71 	huc->fw.type = XE_UC_FW_TYPE_HUC;
72 
73 	/*
74 	 * The HuC is only available on the media GT on most platforms.  The
75 	 * exception to that rule are the old Xe1 platforms where there was
76 	 * no separate GT for media IP, so the HuC was part of the primary
77 	 * GT.  Such platforms have graphics versions 12.55 and earlier.
78 	 */
79 	if (!xe_gt_is_media_type(gt) && GRAPHICS_VERx100(xe) > 1255) {
80 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
81 		return 0;
82 	}
83 
84 	ret = xe_uc_fw_init(&huc->fw);
85 	if (ret)
86 		goto out;
87 
88 	if (!xe_uc_fw_is_enabled(&huc->fw))
89 		return 0;
90 
91 	if (IS_SRIOV_VF(xe))
92 		return 0;
93 
94 	if (huc->fw.has_gsc_headers) {
95 		ret = huc_alloc_gsc_pkt(huc);
96 		if (ret)
97 			goto out;
98 	}
99 
100 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
101 
102 	return 0;
103 
104 out:
105 	xe_gt_err(gt, "HuC: initialization failed: %pe\n", ERR_PTR(ret));
106 	return ret;
107 }
108 
109 int xe_huc_init_post_hwconfig(struct xe_huc *huc)
110 {
111 	struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
112 	struct xe_device *xe = huc_to_xe(huc);
113 	int ret;
114 
115 	if (!IS_DGFX(huc_to_xe(huc)))
116 		return 0;
117 
118 	if (!xe_uc_fw_is_loadable(&huc->fw))
119 		return 0;
120 
121 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
122 	if (ret)
123 		return ret;
124 
125 	return 0;
126 }
127 
128 int xe_huc_upload(struct xe_huc *huc)
129 {
130 	if (!xe_uc_fw_is_loadable(&huc->fw))
131 		return 0;
132 	return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
133 }
134 
135 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
136 	xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
137 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \
138 	xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
139 
140 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
141 				 u32 wr_offset, u32 huc_offset, u32 huc_size)
142 {
143 	xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
144 
145 	huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
146 	huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
147 	huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
148 	huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
149 			sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
150 	huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
151 	huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
152 
153 	return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
154 }
155 
156 static int huc_auth_via_gsccs(struct xe_huc *huc)
157 {
158 	struct xe_gt *gt = huc_to_gt(huc);
159 	struct xe_device *xe = gt_to_xe(gt);
160 	struct xe_bo *pkt = huc->gsc_pkt;
161 	u32 wr_offset;
162 	u32 rd_offset;
163 	u64 ggtt_offset;
164 	u32 out_status;
165 	int retry = 5;
166 	int err = 0;
167 
168 	if (!pkt)
169 		return -ENODEV;
170 
171 	ggtt_offset = xe_bo_ggtt_addr(pkt);
172 
173 	wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
174 				       sizeof(struct pxp43_new_huc_auth_in));
175 	wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
176 					  xe_bo_ggtt_addr(huc->fw.bo),
177 					  xe_bo_size(huc->fw.bo));
178 	do {
179 		err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
180 					       ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
181 					       PXP43_HUC_AUTH_INOUT_SIZE);
182 		if (err)
183 			break;
184 
185 		if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
186 						    PXP43_HUC_AUTH_INOUT_SIZE)) {
187 			err = -EBUSY;
188 			msleep(50);
189 		}
190 	} while (--retry && err == -EBUSY);
191 
192 	if (err) {
193 		xe_gt_err(gt, "HuC: failed to submit GSC request to auth: %pe\n", ERR_PTR(err));
194 		return err;
195 	}
196 
197 	err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
198 				     sizeof(struct pxp43_huc_auth_out), &rd_offset);
199 	if (err) {
200 		xe_gt_err(gt, "HuC: invalid GSC reply for auth: %pe\n", ERR_PTR(err));
201 		return err;
202 	}
203 
204 	/*
205 	 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
206 	 * authenticated. If the same error is ever returned with HuC not loaded
207 	 * we'll still catch it when we check the authentication bit later.
208 	 */
209 	out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
210 	if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
211 		xe_gt_err(gt, "HuC: authentication failed with GSC error = %#x\n", out_status);
212 		return -EIO;
213 	}
214 
215 	return 0;
216 }
217 
218 static const struct {
219 	const char *name;
220 	struct xe_reg reg;
221 	u32 val;
222 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
223 	[XE_HUC_AUTH_VIA_GUC] = { "GuC",
224 				  HUC_KERNEL_LOAD_INFO,
225 				  HUC_LOAD_SUCCESSFUL },
226 	[XE_HUC_AUTH_VIA_GSC] = { "GSC",
227 				  HECI_FWSTS5(MTL_GSC_HECI1_BASE),
228 				  HECI1_FWSTS5_HUC_AUTH_DONE },
229 };
230 
231 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
232 {
233 	struct xe_gt *gt = huc_to_gt(huc);
234 
235 	return xe_mmio_read32(&gt->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
236 }
237 
238 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
239 {
240 	struct xe_gt *gt = huc_to_gt(huc);
241 	struct xe_guc *guc = huc_to_guc(huc);
242 	int ret;
243 
244 	if (!xe_uc_fw_is_loadable(&huc->fw))
245 		return 0;
246 
247 	/* On newer platforms the HuC survives reset, so no need to re-auth */
248 	if (xe_huc_is_authenticated(huc, type)) {
249 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
250 		return 0;
251 	}
252 
253 	if (!xe_uc_fw_is_loaded(&huc->fw))
254 		return -ENOEXEC;
255 
256 	switch (type) {
257 	case XE_HUC_AUTH_VIA_GUC:
258 		ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
259 				      xe_uc_fw_rsa_offset(&huc->fw));
260 		break;
261 	case XE_HUC_AUTH_VIA_GSC:
262 		ret = huc_auth_via_gsccs(huc);
263 		break;
264 	default:
265 		XE_WARN_ON(type);
266 		return -EINVAL;
267 	}
268 	if (ret) {
269 		xe_gt_err(gt, "HuC: failed to trigger auth via %s: %pe\n",
270 			  huc_auth_modes[type].name, ERR_PTR(ret));
271 		goto fail;
272 	}
273 
274 	ret = xe_mmio_wait32(&gt->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val,
275 			     huc_auth_modes[type].val, 100000, NULL, false);
276 	if (ret) {
277 		xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
278 		goto fail;
279 	}
280 
281 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
282 	xe_gt_dbg(gt, "HuC: authenticated via %s\n", huc_auth_modes[type].name);
283 
284 	return 0;
285 
286 fail:
287 	xe_gt_err(gt, "HuC: authentication via %s failed: %pe\n",
288 		  huc_auth_modes[type].name, ERR_PTR(ret));
289 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
290 
291 	return ret;
292 }
293 
294 void xe_huc_sanitize(struct xe_huc *huc)
295 {
296 	xe_uc_fw_sanitize(&huc->fw);
297 }
298 
299 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
300 {
301 	struct xe_gt *gt = huc_to_gt(huc);
302 
303 	xe_uc_fw_print(&huc->fw, p);
304 
305 	if (!xe_uc_fw_is_enabled(&huc->fw))
306 		return;
307 
308 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
309 	if (!fw_ref.domains)
310 		return;
311 
312 	drm_printf(p, "\nHuC status: 0x%08x\n",
313 		   xe_mmio_read32(&gt->mmio, HUC_KERNEL_LOAD_INFO));
314 }
315