1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2022 Intel Corporation
4 */
5
6 #include "xe_huc.h"
7
8 #include <linux/delay.h>
9
10 #include <drm/drm_managed.h>
11
12 #include "abi/gsc_pxp_commands_abi.h"
13 #include "regs/xe_gsc_regs.h"
14 #include "regs/xe_guc_regs.h"
15 #include "xe_assert.h"
16 #include "xe_bo.h"
17 #include "xe_device.h"
18 #include "xe_force_wake.h"
19 #include "xe_gsc_submit.h"
20 #include "xe_gt.h"
21 #include "xe_gt_printk.h"
22 #include "xe_guc.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_sriov.h"
26 #include "xe_uc_fw.h"
27
28 static struct xe_gt *
huc_to_gt(struct xe_huc * huc)29 huc_to_gt(struct xe_huc *huc)
30 {
31 return container_of(huc, struct xe_gt, uc.huc);
32 }
33
34 static struct xe_device *
huc_to_xe(struct xe_huc * huc)35 huc_to_xe(struct xe_huc *huc)
36 {
37 return gt_to_xe(huc_to_gt(huc));
38 }
39
40 static struct xe_guc *
huc_to_guc(struct xe_huc * huc)41 huc_to_guc(struct xe_huc *huc)
42 {
43 return &container_of(huc, struct xe_uc, huc)->guc;
44 }
45
46 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K
huc_alloc_gsc_pkt(struct xe_huc * huc)47 static int huc_alloc_gsc_pkt(struct xe_huc *huc)
48 {
49 struct xe_gt *gt = huc_to_gt(huc);
50 struct xe_device *xe = gt_to_xe(gt);
51 struct xe_bo *bo;
52
53 /* we use a single object for both input and output */
54 bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt),
55 PXP43_HUC_AUTH_INOUT_SIZE * 2,
56 XE_BO_FLAG_SYSTEM |
57 XE_BO_FLAG_GGTT);
58 if (IS_ERR(bo))
59 return PTR_ERR(bo);
60
61 huc->gsc_pkt = bo;
62
63 return 0;
64 }
65
xe_huc_init(struct xe_huc * huc)66 int xe_huc_init(struct xe_huc *huc)
67 {
68 struct xe_gt *gt = huc_to_gt(huc);
69 struct xe_tile *tile = gt_to_tile(gt);
70 struct xe_device *xe = gt_to_xe(gt);
71 int ret;
72
73 huc->fw.type = XE_UC_FW_TYPE_HUC;
74
75 /* On platforms with a media GT the HuC is only available there */
76 if (tile->media_gt && (gt != tile->media_gt)) {
77 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED);
78 return 0;
79 }
80
81 ret = xe_uc_fw_init(&huc->fw);
82 if (ret)
83 goto out;
84
85 if (!xe_uc_fw_is_enabled(&huc->fw))
86 return 0;
87
88 if (IS_SRIOV_VF(xe))
89 return 0;
90
91 if (huc->fw.has_gsc_headers) {
92 ret = huc_alloc_gsc_pkt(huc);
93 if (ret)
94 goto out;
95 }
96
97 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE);
98
99 return 0;
100
101 out:
102 xe_gt_err(gt, "HuC: initialization failed: %pe\n", ERR_PTR(ret));
103 return ret;
104 }
105
xe_huc_init_post_hwconfig(struct xe_huc * huc)106 int xe_huc_init_post_hwconfig(struct xe_huc *huc)
107 {
108 struct xe_tile *tile = gt_to_tile(huc_to_gt(huc));
109 struct xe_device *xe = huc_to_xe(huc);
110 int ret;
111
112 if (!IS_DGFX(huc_to_xe(huc)))
113 return 0;
114
115 if (!xe_uc_fw_is_loadable(&huc->fw))
116 return 0;
117
118 ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo);
119 if (ret)
120 return ret;
121
122 return 0;
123 }
124
xe_huc_upload(struct xe_huc * huc)125 int xe_huc_upload(struct xe_huc *huc)
126 {
127 if (!xe_uc_fw_is_loadable(&huc->fw))
128 return 0;
129 return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL);
130 }
131
132 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \
133 xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_)
134 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \
135 xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_)
136
huc_emit_pxp_auth_msg(struct xe_device * xe,struct iosys_map * map,u32 wr_offset,u32 huc_offset,u32 huc_size)137 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map,
138 u32 wr_offset, u32 huc_offset, u32 huc_size)
139 {
140 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in));
141
142 huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3));
143 huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH);
144 huc_auth_msg_wr(xe, map, wr_offset, header.status, 0);
145 huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len,
146 sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header));
147 huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset);
148 huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size);
149
150 return wr_offset + sizeof(struct pxp43_new_huc_auth_in);
151 }
152
huc_auth_via_gsccs(struct xe_huc * huc)153 static int huc_auth_via_gsccs(struct xe_huc *huc)
154 {
155 struct xe_gt *gt = huc_to_gt(huc);
156 struct xe_device *xe = gt_to_xe(gt);
157 struct xe_bo *pkt = huc->gsc_pkt;
158 u32 wr_offset;
159 u32 rd_offset;
160 u64 ggtt_offset;
161 u32 out_status;
162 int retry = 5;
163 int err = 0;
164
165 if (!pkt)
166 return -ENODEV;
167
168 ggtt_offset = xe_bo_ggtt_addr(pkt);
169
170 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0,
171 sizeof(struct pxp43_new_huc_auth_in));
172 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset,
173 xe_bo_ggtt_addr(huc->fw.bo),
174 huc->fw.bo->size);
175 do {
176 err = xe_gsc_pkt_submit_kernel(>->uc.gsc, ggtt_offset, wr_offset,
177 ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE,
178 PXP43_HUC_AUTH_INOUT_SIZE);
179 if (err)
180 break;
181
182 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap,
183 PXP43_HUC_AUTH_INOUT_SIZE)) {
184 err = -EBUSY;
185 msleep(50);
186 }
187 } while (--retry && err == -EBUSY);
188
189 if (err) {
190 xe_gt_err(gt, "HuC: failed to submit GSC request to auth: %pe\n", ERR_PTR(err));
191 return err;
192 }
193
194 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE,
195 sizeof(struct pxp43_huc_auth_out), &rd_offset);
196 if (err) {
197 xe_gt_err(gt, "HuC: invalid GSC reply for auth: %pe\n", ERR_PTR(err));
198 return err;
199 }
200
201 /*
202 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already
203 * authenticated. If the same error is ever returned with HuC not loaded
204 * we'll still catch it when we check the authentication bit later.
205 */
206 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status);
207 if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) {
208 xe_gt_err(gt, "HuC: authentication failed with GSC error = %#x\n", out_status);
209 return -EIO;
210 }
211
212 return 0;
213 }
214
215 static const struct {
216 const char *name;
217 struct xe_reg reg;
218 u32 val;
219 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = {
220 [XE_HUC_AUTH_VIA_GUC] = { "GuC",
221 HUC_KERNEL_LOAD_INFO,
222 HUC_LOAD_SUCCESSFUL },
223 [XE_HUC_AUTH_VIA_GSC] = { "GSC",
224 HECI_FWSTS5(MTL_GSC_HECI1_BASE),
225 HECI1_FWSTS5_HUC_AUTH_DONE },
226 };
227
xe_huc_is_authenticated(struct xe_huc * huc,enum xe_huc_auth_types type)228 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type)
229 {
230 struct xe_gt *gt = huc_to_gt(huc);
231
232 return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val;
233 }
234
xe_huc_auth(struct xe_huc * huc,enum xe_huc_auth_types type)235 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type)
236 {
237 struct xe_gt *gt = huc_to_gt(huc);
238 struct xe_guc *guc = huc_to_guc(huc);
239 int ret;
240
241 if (!xe_uc_fw_is_loadable(&huc->fw))
242 return 0;
243
244 /* On newer platforms the HuC survives reset, so no need to re-auth */
245 if (xe_huc_is_authenticated(huc, type)) {
246 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
247 return 0;
248 }
249
250 if (!xe_uc_fw_is_loaded(&huc->fw))
251 return -ENOEXEC;
252
253 switch (type) {
254 case XE_HUC_AUTH_VIA_GUC:
255 ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) +
256 xe_uc_fw_rsa_offset(&huc->fw));
257 break;
258 case XE_HUC_AUTH_VIA_GSC:
259 ret = huc_auth_via_gsccs(huc);
260 break;
261 default:
262 XE_WARN_ON(type);
263 return -EINVAL;
264 }
265 if (ret) {
266 xe_gt_err(gt, "HuC: failed to trigger auth via %s: %pe\n",
267 huc_auth_modes[type].name, ERR_PTR(ret));
268 goto fail;
269 }
270
271 ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val,
272 huc_auth_modes[type].val, 100000, NULL, false);
273 if (ret) {
274 xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret));
275 goto fail;
276 }
277
278 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING);
279 xe_gt_dbg(gt, "HuC: authenticated via %s\n", huc_auth_modes[type].name);
280
281 return 0;
282
283 fail:
284 xe_gt_err(gt, "HuC: authentication via %s failed: %pe\n",
285 huc_auth_modes[type].name, ERR_PTR(ret));
286 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL);
287
288 return ret;
289 }
290
xe_huc_sanitize(struct xe_huc * huc)291 void xe_huc_sanitize(struct xe_huc *huc)
292 {
293 xe_uc_fw_sanitize(&huc->fw);
294 }
295
xe_huc_print_info(struct xe_huc * huc,struct drm_printer * p)296 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p)
297 {
298 struct xe_gt *gt = huc_to_gt(huc);
299 int err;
300
301 xe_uc_fw_print(&huc->fw, p);
302
303 if (!xe_uc_fw_is_enabled(&huc->fw))
304 return;
305
306 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT);
307 if (err)
308 return;
309
310 drm_printf(p, "\nHuC status: 0x%08x\n",
311 xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO));
312
313 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT);
314 }
315