xref: /linux/drivers/gpu/drm/xe/xe_huc.c (revision 955abe0a1b41de5ba61fe4cd614ebc123084d499)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2022 Intel Corporation
4  */
5 
6 #include "xe_huc.h"
7 
8 #include <linux/delay.h>
9 
10 #include <drm/drm_managed.h>
11 
12 #include "abi/gsc_pxp_commands_abi.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_assert.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gsc_submit.h"
20 #include "xe_gt.h"
21 #include "xe_gt_printk.h"
22 #include "xe_guc.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_sriov.h"
26 #include "xe_uc_fw.h"
27 
28 static struct xe_gt *
29 huc_to_gt(struct xe_huc *huc)
30 {
31 	return container_of(huc, struct xe_gt, uc.huc);
32 }
33 
34 static struct xe_device *
35 huc_to_xe(struct xe_huc *huc)
36 {
37 	return gt_to_xe(huc_to_gt(huc));
38 }
39 
40 static struct xe_guc *
41 huc_to_guc(struct xe_huc *huc)
42 {
43 	return &container_of(huc, struct xe_uc, huc)->guc;
44 }
45 
46 static void free_gsc_pkt(struct drm_device *drm, void *arg)
47 {
48 	struct xe_huc *huc = arg;
49 
50 	xe_bo_unpin_map_no_vm(huc->gsc_pkt);
51 	huc->gsc_pkt = NULL;
52 }
53 
54 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
55 static int huc_alloc_gsc_pkt(struct xe_huc *huc)
56 {
57 	struct xe_gt *gt = huc_to_gt(huc);
58 	struct xe_device *xe = gt_to_xe(gt);
59 	struct xe_bo *bo;
60 
61 	/* we use a single object for both input and output */
62 	bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL,
63 				  PXP43_HUC_AUTH_INOUT_SIZE * 2,
64 				  ttm_bo_type_kernel,
65 				  XE_BO_FLAG_SYSTEM |
66 				  XE_BO_FLAG_GGTT);
67 	if (IS_ERR(bo))
68 		return PTR_ERR(bo);
69 
70 	huc->gsc_pkt = bo;
71 
72 	return drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc);
73 }
74 
75 int xe_huc_init(struct xe_huc *huc)
76 {
77 	struct xe_gt *gt = huc_to_gt(huc);
78 	struct xe_tile *tile = gt_to_tile(gt);
79 	struct xe_device *xe = gt_to_xe(gt);
80 	int ret;
81 
82 	huc->fw.type = XE_UC_FW_TYPE_HUC;
83 
84 	/* On platforms with a media GT the HuC is only available there */
85 	if (tile->media_gt && (gt != tile->media_gt)) {
86 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
87 		return 0;
88 	}
89 
90 	ret = xe_uc_fw_init(&huc->fw);
91 	if (ret)
92 		goto out;
93 
94 	if (!xe_uc_fw_is_enabled(&huc->fw))
95 		return 0;
96 
97 	if (IS_SRIOV_VF(xe))
98 		return 0;
99 
100 	if (huc->fw.has_gsc_headers) {
101 		ret = huc_alloc_gsc_pkt(huc);
102 		if (ret)
103 			goto out;
104 	}
105 
106 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
107 
108 	return 0;
109 
110 out:
111 	xe_gt_err(gt, "HuC: initialization failed: %pe\n", ERR_PTR(ret));
112 	return ret;
113 }
114 
115 int xe_huc_init_post_hwconfig(struct xe_huc *huc)
116 {
117 	struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
118 	struct xe_device *xe = huc_to_xe(huc);
119 	int ret;
120 
121 	if (!IS_DGFX(huc_to_xe(huc)))
122 		return 0;
123 
124 	if (!xe_uc_fw_is_loadable(&huc->fw))
125 		return 0;
126 
127 	ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
128 	if (ret)
129 		return ret;
130 
131 	return 0;
132 }
133 
134 int xe_huc_upload(struct xe_huc *huc)
135 {
136 	if (!xe_uc_fw_is_loadable(&huc->fw))
137 		return 0;
138 	return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
139 }
140 
141 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
142 	xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
143 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \
144 	xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
145 
146 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
147 				 u32 wr_offset, u32 huc_offset, u32 huc_size)
148 {
149 	xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
150 
151 	huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
152 	huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
153 	huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
154 	huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
155 			sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
156 	huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
157 	huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
158 
159 	return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
160 }
161 
162 static int huc_auth_via_gsccs(struct xe_huc *huc)
163 {
164 	struct xe_gt *gt = huc_to_gt(huc);
165 	struct xe_device *xe = gt_to_xe(gt);
166 	struct xe_bo *pkt = huc->gsc_pkt;
167 	u32 wr_offset;
168 	u32 rd_offset;
169 	u64 ggtt_offset;
170 	u32 out_status;
171 	int retry = 5;
172 	int err = 0;
173 
174 	if (!pkt)
175 		return -ENODEV;
176 
177 	ggtt_offset = xe_bo_ggtt_addr(pkt);
178 
179 	wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
180 				       sizeof(struct pxp43_new_huc_auth_in));
181 	wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
182 					  xe_bo_ggtt_addr(huc->fw.bo),
183 					  huc->fw.bo->size);
184 	do {
185 		err = xe_gsc_pkt_submit_kernel(&gt->uc.gsc, ggtt_offset, wr_offset,
186 					       ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
187 					       PXP43_HUC_AUTH_INOUT_SIZE);
188 		if (err)
189 			break;
190 
191 		if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
192 						    PXP43_HUC_AUTH_INOUT_SIZE)) {
193 			err = -EBUSY;
194 			msleep(50);
195 		}
196 	} while (--retry && err == -EBUSY);
197 
198 	if (err) {
199 		xe_gt_err(gt, "HuC: failed to submit GSC request to auth: %pe\n", ERR_PTR(err));
200 		return err;
201 	}
202 
203 	err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
204 				     sizeof(struct pxp43_huc_auth_out), &rd_offset);
205 	if (err) {
206 		xe_gt_err(gt, "HuC: invalid GSC reply for auth: %pe\n", ERR_PTR(err));
207 		return err;
208 	}
209 
210 	/*
211 	 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
212 	 * authenticated. If the same error is ever returned with HuC not loaded
213 	 * we'll still catch it when we check the authentication bit later.
214 	 */
215 	out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
216 	if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
217 		xe_gt_err(gt, "HuC: authentication failed with GSC error = %#x\n", out_status);
218 		return -EIO;
219 	}
220 
221 	return 0;
222 }
223 
224 static const struct {
225 	const char *name;
226 	struct xe_reg reg;
227 	u32 val;
228 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
229 	[XE_HUC_AUTH_VIA_GUC] = { "GuC",
230 				  HUC_KERNEL_LOAD_INFO,
231 				  HUC_LOAD_SUCCESSFUL },
232 	[XE_HUC_AUTH_VIA_GSC] = { "GSC",
233 				  HECI_FWSTS5(MTL_GSC_HECI1_BASE),
234 				  HECI1_FWSTS5_HUC_AUTH_DONE },
235 };
236 
237 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
238 {
239 	struct xe_gt *gt = huc_to_gt(huc);
240 
241 	return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
242 }
243 
244 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
245 {
246 	struct xe_gt *gt = huc_to_gt(huc);
247 	struct xe_guc *guc = huc_to_guc(huc);
248 	int ret;
249 
250 	if (!xe_uc_fw_is_loadable(&huc->fw))
251 		return 0;
252 
253 	/* On newer platforms the HuC survives reset, so no need to re-auth */
254 	if (xe_huc_is_authenticated(huc, type)) {
255 		xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
256 		return 0;
257 	}
258 
259 	if (!xe_uc_fw_is_loaded(&huc->fw))
260 		return -ENOEXEC;
261 
262 	switch (type) {
263 	case XE_HUC_AUTH_VIA_GUC:
264 		ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
265 				      xe_uc_fw_rsa_offset(&huc->fw));
266 		break;
267 	case XE_HUC_AUTH_VIA_GSC:
268 		ret = huc_auth_via_gsccs(huc);
269 		break;
270 	default:
271 		XE_WARN_ON(type);
272 		return -EINVAL;
273 	}
274 	if (ret) {
275 		xe_gt_err(gt, "HuC: failed to trigger auth via %s: %pe\n",
276 			  huc_auth_modes[type].name, ERR_PTR(ret));
277 		goto fail;
278 	}
279 
280 	ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
281 			     huc_auth_modes[type].val, 100000, NULL, false);
282 	if (ret) {
283 		xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
284 		goto fail;
285 	}
286 
287 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
288 	xe_gt_dbg(gt, "HuC: authenticated via %s\n", huc_auth_modes[type].name);
289 
290 	return 0;
291 
292 fail:
293 	xe_gt_err(gt, "HuC: authentication via %s failed: %pe\n",
294 		  huc_auth_modes[type].name, ERR_PTR(ret));
295 	xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
296 
297 	return ret;
298 }
299 
300 void xe_huc_sanitize(struct xe_huc *huc)
301 {
302 	xe_uc_fw_sanitize(&huc->fw);
303 }
304 
305 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
306 {
307 	struct xe_gt *gt = huc_to_gt(huc);
308 	int err;
309 
310 	xe_uc_fw_print(&huc->fw, p);
311 
312 	if (!xe_uc_fw_is_enabled(&huc->fw))
313 		return;
314 
315 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
316 	if (err)
317 		return;
318 
319 	drm_printf(p, "\nHuC status: 0x%08x\n",
320 		   xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
321 
322 	xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
323 }
324