1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_huc.h" 7 8 #include <drm/drm_managed.h> 9 10 #include "abi/gsc_pxp_commands_abi.h" 11 #include "regs/xe_gsc_regs.h" 12 #include "regs/xe_guc_regs.h" 13 #include "xe_assert.h" 14 #include "xe_bo.h" 15 #include "xe_device.h" 16 #include "xe_force_wake.h" 17 #include "xe_gsc_submit.h" 18 #include "xe_gt.h" 19 #include "xe_guc.h" 20 #include "xe_map.h" 21 #include "xe_mmio.h" 22 #include "xe_uc_fw.h" 23 24 static struct xe_gt * 25 huc_to_gt(struct xe_huc *huc) 26 { 27 return container_of(huc, struct xe_gt, uc.huc); 28 } 29 30 static struct xe_device * 31 huc_to_xe(struct xe_huc *huc) 32 { 33 return gt_to_xe(huc_to_gt(huc)); 34 } 35 36 static struct xe_guc * 37 huc_to_guc(struct xe_huc *huc) 38 { 39 return &container_of(huc, struct xe_uc, huc)->guc; 40 } 41 42 static void free_gsc_pkt(struct drm_device *drm, void *arg) 43 { 44 struct xe_huc *huc = arg; 45 46 xe_bo_unpin_map_no_vm(huc->gsc_pkt); 47 huc->gsc_pkt = NULL; 48 } 49 50 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K 51 static int huc_alloc_gsc_pkt(struct xe_huc *huc) 52 { 53 struct xe_gt *gt = huc_to_gt(huc); 54 struct xe_device *xe = gt_to_xe(gt); 55 struct xe_bo *bo; 56 int err; 57 58 /* we use a single object for both input and output */ 59 bo = xe_bo_create_pin_map(xe, gt_to_tile(gt), NULL, 60 PXP43_HUC_AUTH_INOUT_SIZE * 2, 61 ttm_bo_type_kernel, 62 XE_BO_CREATE_SYSTEM_BIT | 63 XE_BO_CREATE_GGTT_BIT); 64 if (IS_ERR(bo)) 65 return PTR_ERR(bo); 66 67 huc->gsc_pkt = bo; 68 69 err = drmm_add_action_or_reset(&xe->drm, free_gsc_pkt, huc); 70 if (err) { 71 free_gsc_pkt(&xe->drm, huc); 72 return err; 73 } 74 75 return 0; 76 } 77 78 int xe_huc_init(struct xe_huc *huc) 79 { 80 struct xe_gt *gt = huc_to_gt(huc); 81 struct xe_tile *tile = gt_to_tile(gt); 82 struct xe_device *xe = gt_to_xe(gt); 83 int ret; 84 85 huc->fw.type = XE_UC_FW_TYPE_HUC; 86 87 /* On platforms with a media GT the HuC is only available there */ 88 if (tile->media_gt && (gt != tile->media_gt)) { 89 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED); 90 return 0; 91 } 92 93 ret = xe_uc_fw_init(&huc->fw); 94 if (ret) 95 goto out; 96 97 if (!xe_uc_fw_is_enabled(&huc->fw)) 98 return 0; 99 100 if (huc->fw.has_gsc_headers) { 101 ret = huc_alloc_gsc_pkt(huc); 102 if (ret) 103 goto out; 104 } 105 106 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE); 107 108 return 0; 109 110 out: 111 drm_err(&xe->drm, "HuC init failed with %d", ret); 112 return ret; 113 } 114 115 int xe_huc_upload(struct xe_huc *huc) 116 { 117 if (!xe_uc_fw_is_loadable(&huc->fw)) 118 return 0; 119 return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL); 120 } 121 122 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \ 123 xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_) 124 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \ 125 xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_) 126 127 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map, 128 u32 wr_offset, u32 huc_offset, u32 huc_size) 129 { 130 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in)); 131 132 huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3)); 133 huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH); 134 huc_auth_msg_wr(xe, map, wr_offset, header.status, 0); 135 huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len, 136 sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header)); 137 huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset); 138 huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size); 139 140 return wr_offset + sizeof(struct pxp43_new_huc_auth_in); 141 } 142 143 static int huc_auth_via_gsccs(struct xe_huc *huc) 144 { 145 struct xe_gt *gt = huc_to_gt(huc); 146 struct xe_device *xe = gt_to_xe(gt); 147 struct xe_bo *pkt = huc->gsc_pkt; 148 u32 wr_offset; 149 u32 rd_offset; 150 u64 ggtt_offset; 151 u32 out_status; 152 int retry = 5; 153 int err = 0; 154 155 if (!pkt) 156 return -ENODEV; 157 158 ggtt_offset = xe_bo_ggtt_addr(pkt); 159 160 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0, 161 sizeof(struct pxp43_new_huc_auth_in)); 162 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, 163 xe_bo_ggtt_addr(huc->fw.bo), 164 huc->fw.bo->size); 165 do { 166 err = xe_gsc_pkt_submit_kernel(>->uc.gsc, ggtt_offset, wr_offset, 167 ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE, 168 PXP43_HUC_AUTH_INOUT_SIZE); 169 if (err) 170 break; 171 172 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap, 173 PXP43_HUC_AUTH_INOUT_SIZE)) { 174 err = -EBUSY; 175 msleep(50); 176 } 177 } while (--retry && err == -EBUSY); 178 179 if (err) { 180 drm_err(&xe->drm, "failed to submit GSC request to auth: %d\n", err); 181 return err; 182 } 183 184 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE, 185 sizeof(struct pxp43_huc_auth_out), &rd_offset); 186 if (err) { 187 drm_err(&xe->drm, "HuC: invalid GSC reply for auth (err=%d)\n", err); 188 return err; 189 } 190 191 /* 192 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already 193 * authenticated. If the same error is ever returned with HuC not loaded 194 * we'll still catch it when we check the authentication bit later. 195 */ 196 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status); 197 if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) { 198 drm_err(&xe->drm, "auth failed with GSC error = 0x%x\n", out_status); 199 return -EIO; 200 } 201 202 return 0; 203 } 204 205 static const struct { 206 const char *name; 207 struct xe_reg reg; 208 u32 val; 209 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = { 210 [XE_HUC_AUTH_VIA_GUC] = { "GuC", 211 HUC_KERNEL_LOAD_INFO, 212 HUC_LOAD_SUCCESSFUL }, 213 [XE_HUC_AUTH_VIA_GSC] = { "GSC", 214 HECI_FWSTS5(MTL_GSC_HECI1_BASE), 215 HECI1_FWSTS5_HUC_AUTH_DONE }, 216 }; 217 218 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type) 219 { 220 struct xe_gt *gt = huc_to_gt(huc); 221 222 return xe_mmio_read32(gt, huc_auth_modes[type].reg) & huc_auth_modes[type].val; 223 } 224 225 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) 226 { 227 struct xe_device *xe = huc_to_xe(huc); 228 struct xe_gt *gt = huc_to_gt(huc); 229 struct xe_guc *guc = huc_to_guc(huc); 230 int ret; 231 232 if (!xe_uc_fw_is_loadable(&huc->fw)) 233 return 0; 234 235 /* On newer platforms the HuC survives reset, so no need to re-auth */ 236 if (xe_huc_is_authenticated(huc, type)) { 237 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING); 238 return 0; 239 } 240 241 if (!xe_uc_fw_is_loaded(&huc->fw)) 242 return -ENOEXEC; 243 244 switch (type) { 245 case XE_HUC_AUTH_VIA_GUC: 246 ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) + 247 xe_uc_fw_rsa_offset(&huc->fw)); 248 break; 249 case XE_HUC_AUTH_VIA_GSC: 250 ret = huc_auth_via_gsccs(huc); 251 break; 252 default: 253 XE_WARN_ON(type); 254 return -EINVAL; 255 } 256 if (ret) { 257 drm_err(&xe->drm, "Failed to trigger HuC auth via %s: %d\n", 258 huc_auth_modes[type].name, ret); 259 goto fail; 260 } 261 262 ret = xe_mmio_wait32(gt, huc_auth_modes[type].reg, huc_auth_modes[type].val, 263 huc_auth_modes[type].val, 100000, NULL, false); 264 if (ret) { 265 drm_err(&xe->drm, "HuC: Firmware not verified %d\n", ret); 266 goto fail; 267 } 268 269 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING); 270 drm_dbg(&xe->drm, "HuC authenticated via %s\n", huc_auth_modes[type].name); 271 272 return 0; 273 274 fail: 275 drm_err(&xe->drm, "HuC: Auth via %s failed: %d\n", 276 huc_auth_modes[type].name, ret); 277 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL); 278 279 return ret; 280 } 281 282 void xe_huc_sanitize(struct xe_huc *huc) 283 { 284 if (!xe_uc_fw_is_loadable(&huc->fw)) 285 return; 286 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE); 287 } 288 289 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) 290 { 291 struct xe_gt *gt = huc_to_gt(huc); 292 int err; 293 294 xe_uc_fw_print(&huc->fw, p); 295 296 if (!xe_uc_fw_is_enabled(&huc->fw)) 297 return; 298 299 err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 300 if (err) 301 return; 302 303 drm_printf(p, "\nHuC status: 0x%08x\n", 304 xe_mmio_read32(gt, HUC_KERNEL_LOAD_INFO)); 305 306 xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); 307 } 308