xref: /linux/drivers/gpu/drm/xe/xe_huc.c (revision 569d7db70e5dcf13fbf072f10e9096577ac1e565)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_huc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/gsc_pxp_commands_abi.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_assert.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gsc_submit.h"
20 #include "xe_gt.h"
21 #include "xe_guc.h"
22 #include "xe_map.h"
23 #include "xe_mmio.h"
24 #include "xe_uc_fw.h"
25 
26 static struct xe_gt *
27 huc_to_gt(struct xe_huc *huc)
28 {
29 	return container_of(huc, struct xe_gt, uc.huc);
30 }
31 
32 static struct xe_device *
33 huc_to_xe(struct xe_huc *huc)
34 {
35 	return gt_to_xe(huc_to_gt(huc));
36 }
37 
38 static struct xe_guc *
39 huc_to_guc(struct xe_huc *huc)
40 {
41 	return &container_of(huc, struct xe_uc, huc)->guc;
42 }
43 
44 static void free_gsc_pkt(struct drm_device *drm, void *arg)
45 {
46 	struct xe_huc *huc = arg;
47 
48 	xe_bo_unpin_map_no_vm(huc->gsc_pkt);
49 	huc->gsc_pkt = NULL;
50 }
51 
52 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
53 static int huc_alloc_gsc_pkt(struct xe_huc *huc)
54 {
55 	struct xe_gt *gt = huc_to_gt(huc);
56 	struct xe_device *xe = gt_to_xe(gt);
57 	struct xe_bo *bo;
58 
59 	/* we use a single object for both input and output */
60 	bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
61 				  PXP43_HUC_AUTH_INOUT_SIZE * 2,
62 				  ttm_bo_type_kernel,
63 				  XE_BO_FLAG_SYSTEM |
64 				  XE_BO_FLAG_GGTT);
65 	if (IS_ERR(bo))
66 		return PTR_ERR(bo);
67 
68 	huc->gsc_pkt = bo;
69 
70 	return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
71 }
72 
73 int xe_huc_init(struct xe_huc *huc)
74 {
75 	struct xe_gt *gt = huc_to_gt(huc);
76 	struct xe_tile *tile = gt_to_tile(gt);
77 	struct xe_device *xe = gt_to_xe(gt);
78 	int ret;
79 
80 	huc->fw.type = XE_UC_FW_TYPE_HUC;
81 
82 	/* On platforms with a media GT the HuC is only available there */
83 	if (tile->media_gt && (gt != tile->media_gt)) {
84 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
85 		return 0;
86 	}
87 
88 	ret = xe_uc_fw_init(&huc->fw);
89 	if (ret)
90 		goto out;
91 
92 	if (!xe_uc_fw_is_enabled(&huc->fw))
93 		return 0;
94 
95 	if (huc->fw.has_gsc_headers) {
96 		ret = huc_alloc_gsc_pkt(huc);
97 		if (ret)
98 			goto out;
99 	}
100 
101 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
102 
103 	return 0;
104 
105 out:
106 	drm_err(&xe->drm, "HuC init failed with %d", ret);
107 	return ret;
108 }
109 
110 int xe_huc_init_post_hwconfig(struct xe_huc *huc)
111 {
112 	struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
113 	struct xe_device *xe = huc_to_xe(huc);
114 	int ret;
115 
116 	if (!IS_DGFX(huc_to_xe(huc)))
117 		return 0;
118 
119 	if (!xe_uc_fw_is_loadable(&huc->fw))
120 		return 0;
121 
122 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
123 	if (ret)
124 		return ret;
125 
126 	return 0;
127 }
128 
129 int xe_huc_upload(struct xe_huc *huc)
130 {
131 	if (!xe_uc_fw_is_loadable(&huc->fw))
132 		return 0;
133 	return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
134 }
135 
136 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
137 	xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
138 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \
139 	xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
140 
141 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
142 				 u32 wr_offset, u32 huc_offset, u32 huc_size)
143 {
144 	xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
145 
146 	huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
147 	huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
148 	huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
149 	huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
150 			sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
151 	huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
152 	huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
153 
154 	return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
155 }
156 
157 static int huc_auth_via_gsccs(struct xe_huc *huc)
158 {
159 	struct xe_gt *gt = huc_to_gt(huc);
160 	struct xe_device *xe = gt_to_xe(gt);
161 	struct xe_bo *pkt = huc->gsc_pkt;
162 	u32 wr_offset;
163 	u32 rd_offset;
164 	u64 ggtt_offset;
165 	u32 out_status;
166 	int retry = 5;
167 	int err = 0;
168 
169 	if (!pkt)
170 		return -ENODEV;
171 
172 	ggtt_offset = xe_bo_ggtt_addr(pkt);
173 
174 	wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
175 				       sizeof(struct pxp43_new_huc_auth_in));
176 	wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
177 					  xe_bo_ggtt_addr(huc->fw.bo),
178 					  huc->fw.bo->size);
179 	do {
180 		err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
181 					       ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
182 					       PXP43_HUC_AUTH_INOUT_SIZE);
183 		if (err)
184 			break;
185 
186 		if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
187 						    PXP43_HUC_AUTH_INOUT_SIZE)) {
188 			err = -EBUSY;
189 			msleep(50);
190 		}
191 	} while (--retry && err == -EBUSY);
192 
193 	if (err) {
194 		drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err);
195 		return err;
196 	}
197 
198 	err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
199 				     sizeof(struct pxp43_huc_auth_out), &rd_offset);
200 	if (err) {
201 		drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err);
202 		return err;
203 	}
204 
205 	/*
206 	 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
207 	 * authenticated. If the same error is ever returned with HuC not loaded
208 	 * we'll still catch it when we check the authentication bit later.
209 	 */
210 	out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
211 	if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
212 		drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status);
213 		return -EIO;
214 	}
215 
216 	return 0;
217 }
218 
219 static const struct {
220 	const char *name;
221 	struct xe_reg reg;
222 	u32 val;
223 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
224 	[XE_HUC_AUTH_VIA_GUC] = { "GuC",
225 				  HUC_KERNEL_LOAD_INFO,
226 				  HUC_LOAD_SUCCESSFUL },
227 	[XE_HUC_AUTH_VIA_GSC] = { "GSC",
228 				  HECI_FWSTS5(MTL_GSC_HECI1_BASE),
229 				  HECI1_FWSTS5_HUC_AUTH_DONE },
230 };
231 
232 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
233 {
234 	struct xe_gt *gt = huc_to_gt(huc);
235 
236 	return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
237 }
238 
239 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
240 {
241 	struct xe_device *xe = huc_to_xe(huc);
242 	struct xe_gt *gt = huc_to_gt(huc);
243 	struct xe_guc *guc = huc_to_guc(huc);
244 	int ret;
245 
246 	if (!xe_uc_fw_is_loadable(&huc->fw))
247 		return 0;
248 
249 	/* On newer platforms the HuC survives reset, so no need to re-auth */
250 	if (xe_huc_is_authenticated(huc, type)) {
251 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
252 		return 0;
253 	}
254 
255 	if (!xe_uc_fw_is_loaded(&huc->fw))
256 		return -ENOEXEC;
257 
258 	switch (type) {
259 	case XE_HUC_AUTH_VIA_GUC:
260 		ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
261 				      xe_uc_fw_rsa_offset(&huc->fw));
262 		break;
263 	case XE_HUC_AUTH_VIA_GSC:
264 		ret = huc_auth_via_gsccs(huc);
265 		break;
266 	default:
267 		XE_WARN_ON(type);
268 		return -EINVAL;
269 	}
270 	if (ret) {
271 		drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n",
272 			huc_auth_modes[type].name, ret);
273 		goto fail;
274 	}
275 
276 	ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
277 			     huc_auth_modes[type].val, 100000, NULL, false);
278 	if (ret) {
279 		drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret);
280 		goto fail;
281 	}
282 
283 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
284 	drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name);
285 
286 	return 0;
287 
288 fail:
289 	drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n",
290 		huc_auth_modes[type].name, ret);
291 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
292 
293 	return ret;
294 }
295 
296 void xe_huc_sanitize(struct xe_huc *huc)
297 {
298 	if (!xe_uc_fw_is_loadable(&huc->fw))
299 		return;
300 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
301 }
302 
303 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
304 {
305 	struct xe_gt *gt = huc_to_gt(huc);
306 	int err;
307 
308 	xe_uc_fw_print(&huc->fw, p);
309 
310 	if (!xe_uc_fw_is_enabled(&huc->fw))
311 		return;
312 
313 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
314 	if (err)
315 		return;
316 
317 	drm_printf(p, "\nHuC status: 0x%08x\n",
318 		   xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
319 
320 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
321 }
322