1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2022 Intel Corporation 4 */ 5 6 #include "xe_huc.h" 7 8 #include <linux/delay.h> 9 10 #include <drm/drm_managed.h> 11 12 #include "abi/gsc_pxp_commands_abi.h" 13 #include "regs/xe_gsc_regs.h" 14 #include "regs/xe_guc_regs.h" 15 #include "xe_assert.h" 16 #include "xe_bo.h" 17 #include "xe_device.h" 18 #include "xe_force_wake.h" 19 #include "xe_gsc_submit.h" 20 #include "xe_gt.h" 21 #include "xe_gt_printk.h" 22 #include "xe_guc.h" 23 #include "xe_map.h" 24 #include "xe_mmio.h" 25 #include "xe_sriov.h" 26 #include "xe_uc_fw.h" 27 28 static struct xe_gt * 29 huc_to_gt(struct xe_huc *huc) 30 { 31 return container_of(huc, struct xe_gt, uc.huc); 32 } 33 34 static struct xe_device * 35 huc_to_xe(struct xe_huc *huc) 36 { 37 return gt_to_xe(huc_to_gt(huc)); 38 } 39 40 static struct xe_guc * 41 huc_to_guc(struct xe_huc *huc) 42 { 43 return &container_of(huc, struct xe_uc, huc)->guc; 44 } 45 46 #define PXP43_HUC_AUTH_INOUT_SIZE SZ_4K 47 static int huc_alloc_gsc_pkt(struct xe_huc *huc) 48 { 49 struct xe_gt *gt = huc_to_gt(huc); 50 struct xe_device *xe = gt_to_xe(gt); 51 struct xe_bo *bo; 52 53 /* we use a single object for both input and output */ 54 bo = xe_managed_bo_create_pin_map(xe, gt_to_tile(gt), 55 PXP43_HUC_AUTH_INOUT_SIZE * 2, 56 XE_BO_FLAG_SYSTEM | 57 XE_BO_FLAG_GGTT); 58 if (IS_ERR(bo)) 59 return PTR_ERR(bo); 60 61 huc->gsc_pkt = bo; 62 63 return 0; 64 } 65 66 int xe_huc_init(struct xe_huc *huc) 67 { 68 struct xe_gt *gt = huc_to_gt(huc); 69 struct xe_device *xe = gt_to_xe(gt); 70 int ret; 71 72 huc->fw.type = XE_UC_FW_TYPE_HUC; 73 74 /* 75 * The HuC is only available on the media GT on most platforms. The 76 * exception to that rule are the old Xe1 platforms where there was 77 * no separate GT for media IP, so the HuC was part of the primary 78 * GT. Such platforms have graphics versions 12.55 and earlier. 79 */ 80 if (!xe_gt_is_media_type(gt) && GRAPHICS_VERx100(xe) > 1255) { 81 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_NOT_SUPPORTED); 82 return 0; 83 } 84 85 ret = xe_uc_fw_init(&huc->fw); 86 if (ret) 87 goto out; 88 89 if (!xe_uc_fw_is_enabled(&huc->fw)) 90 return 0; 91 92 if (IS_SRIOV_VF(xe)) 93 return 0; 94 95 if (huc->fw.has_gsc_headers) { 96 ret = huc_alloc_gsc_pkt(huc); 97 if (ret) 98 goto out; 99 } 100 101 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOADABLE); 102 103 return 0; 104 105 out: 106 xe_gt_err(gt, "HuC: initialization failed: %pe\n", ERR_PTR(ret)); 107 return ret; 108 } 109 110 int xe_huc_init_post_hwconfig(struct xe_huc *huc) 111 { 112 struct xe_tile *tile = gt_to_tile(huc_to_gt(huc)); 113 struct xe_device *xe = huc_to_xe(huc); 114 int ret; 115 116 if (!IS_DGFX(huc_to_xe(huc))) 117 return 0; 118 119 if (!xe_uc_fw_is_loadable(&huc->fw)) 120 return 0; 121 122 ret = xe_managed_bo_reinit_in_vram(xe, tile, &huc->fw.bo); 123 if (ret) 124 return ret; 125 126 return 0; 127 } 128 129 int xe_huc_upload(struct xe_huc *huc) 130 { 131 if (!xe_uc_fw_is_loadable(&huc->fw)) 132 return 0; 133 return xe_uc_fw_upload(&huc->fw, 0, HUC_UKERNEL); 134 } 135 136 #define huc_auth_msg_wr(xe_, map_, offset_, field_, val_) \ 137 xe_map_wr_field(xe_, map_, offset_, struct pxp43_new_huc_auth_in, field_, val_) 138 #define huc_auth_msg_rd(xe_, map_, offset_, field_) \ 139 xe_map_rd_field(xe_, map_, offset_, struct pxp43_huc_auth_out, field_) 140 141 static u32 huc_emit_pxp_auth_msg(struct xe_device *xe, struct iosys_map *map, 142 u32 wr_offset, u32 huc_offset, u32 huc_size) 143 { 144 xe_map_memset(xe, map, wr_offset, 0, sizeof(struct pxp43_new_huc_auth_in)); 145 146 huc_auth_msg_wr(xe, map, wr_offset, header.api_version, PXP_APIVER(4, 3)); 147 huc_auth_msg_wr(xe, map, wr_offset, header.command_id, PXP43_CMDID_NEW_HUC_AUTH); 148 huc_auth_msg_wr(xe, map, wr_offset, header.status, 0); 149 huc_auth_msg_wr(xe, map, wr_offset, header.buffer_len, 150 sizeof(struct pxp43_new_huc_auth_in) - sizeof(struct pxp_cmd_header)); 151 huc_auth_msg_wr(xe, map, wr_offset, huc_base_address, huc_offset); 152 huc_auth_msg_wr(xe, map, wr_offset, huc_size, huc_size); 153 154 return wr_offset + sizeof(struct pxp43_new_huc_auth_in); 155 } 156 157 static int huc_auth_via_gsccs(struct xe_huc *huc) 158 { 159 struct xe_gt *gt = huc_to_gt(huc); 160 struct xe_device *xe = gt_to_xe(gt); 161 struct xe_bo *pkt = huc->gsc_pkt; 162 u32 wr_offset; 163 u32 rd_offset; 164 u64 ggtt_offset; 165 u32 out_status; 166 int retry = 5; 167 int err = 0; 168 169 if (!pkt) 170 return -ENODEV; 171 172 ggtt_offset = xe_bo_ggtt_addr(pkt); 173 174 wr_offset = xe_gsc_emit_header(xe, &pkt->vmap, 0, HECI_MEADDRESS_PXP, 0, 175 sizeof(struct pxp43_new_huc_auth_in)); 176 wr_offset = huc_emit_pxp_auth_msg(xe, &pkt->vmap, wr_offset, 177 xe_bo_ggtt_addr(huc->fw.bo), 178 xe_bo_size(huc->fw.bo)); 179 do { 180 err = xe_gsc_pkt_submit_kernel(>->uc.gsc, ggtt_offset, wr_offset, 181 ggtt_offset + PXP43_HUC_AUTH_INOUT_SIZE, 182 PXP43_HUC_AUTH_INOUT_SIZE); 183 if (err) 184 break; 185 186 if (xe_gsc_check_and_update_pending(xe, &pkt->vmap, 0, &pkt->vmap, 187 PXP43_HUC_AUTH_INOUT_SIZE)) { 188 err = -EBUSY; 189 msleep(50); 190 } 191 } while (--retry && err == -EBUSY); 192 193 if (err) { 194 xe_gt_err(gt, "HuC: failed to submit GSC request to auth: %pe\n", ERR_PTR(err)); 195 return err; 196 } 197 198 err = xe_gsc_read_out_header(xe, &pkt->vmap, PXP43_HUC_AUTH_INOUT_SIZE, 199 sizeof(struct pxp43_huc_auth_out), &rd_offset); 200 if (err) { 201 xe_gt_err(gt, "HuC: invalid GSC reply for auth: %pe\n", ERR_PTR(err)); 202 return err; 203 } 204 205 /* 206 * The GSC will return PXP_STATUS_OP_NOT_PERMITTED if the HuC is already 207 * authenticated. If the same error is ever returned with HuC not loaded 208 * we'll still catch it when we check the authentication bit later. 209 */ 210 out_status = huc_auth_msg_rd(xe, &pkt->vmap, rd_offset, header.status); 211 if (out_status != PXP_STATUS_SUCCESS && out_status != PXP_STATUS_OP_NOT_PERMITTED) { 212 xe_gt_err(gt, "HuC: authentication failed with GSC error = %#x\n", out_status); 213 return -EIO; 214 } 215 216 return 0; 217 } 218 219 static const struct { 220 const char *name; 221 struct xe_reg reg; 222 u32 val; 223 } huc_auth_modes[XE_HUC_AUTH_TYPES_COUNT] = { 224 [XE_HUC_AUTH_VIA_GUC] = { "GuC", 225 HUC_KERNEL_LOAD_INFO, 226 HUC_LOAD_SUCCESSFUL }, 227 [XE_HUC_AUTH_VIA_GSC] = { "GSC", 228 HECI_FWSTS5(MTL_GSC_HECI1_BASE), 229 HECI1_FWSTS5_HUC_AUTH_DONE }, 230 }; 231 232 bool xe_huc_is_authenticated(struct xe_huc *huc, enum xe_huc_auth_types type) 233 { 234 struct xe_gt *gt = huc_to_gt(huc); 235 236 return xe_mmio_read32(>->mmio, huc_auth_modes[type].reg) & huc_auth_modes[type].val; 237 } 238 239 int xe_huc_auth(struct xe_huc *huc, enum xe_huc_auth_types type) 240 { 241 struct xe_gt *gt = huc_to_gt(huc); 242 struct xe_guc *guc = huc_to_guc(huc); 243 int ret; 244 245 if (!xe_uc_fw_is_loadable(&huc->fw)) 246 return 0; 247 248 /* On newer platforms the HuC survives reset, so no need to re-auth */ 249 if (xe_huc_is_authenticated(huc, type)) { 250 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING); 251 return 0; 252 } 253 254 if (!xe_uc_fw_is_loaded(&huc->fw)) 255 return -ENOEXEC; 256 257 switch (type) { 258 case XE_HUC_AUTH_VIA_GUC: 259 ret = xe_guc_auth_huc(guc, xe_bo_ggtt_addr(huc->fw.bo) + 260 xe_uc_fw_rsa_offset(&huc->fw)); 261 break; 262 case XE_HUC_AUTH_VIA_GSC: 263 ret = huc_auth_via_gsccs(huc); 264 break; 265 default: 266 XE_WARN_ON(type); 267 return -EINVAL; 268 } 269 if (ret) { 270 xe_gt_err(gt, "HuC: failed to trigger auth via %s: %pe\n", 271 huc_auth_modes[type].name, ERR_PTR(ret)); 272 goto fail; 273 } 274 275 ret = xe_mmio_wait32(>->mmio, huc_auth_modes[type].reg, huc_auth_modes[type].val, 276 huc_auth_modes[type].val, 100000, NULL, false); 277 if (ret) { 278 xe_gt_err(gt, "HuC: firmware not verified: %pe\n", ERR_PTR(ret)); 279 goto fail; 280 } 281 282 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_RUNNING); 283 xe_gt_dbg(gt, "HuC: authenticated via %s\n", huc_auth_modes[type].name); 284 285 return 0; 286 287 fail: 288 xe_gt_err(gt, "HuC: authentication via %s failed: %pe\n", 289 huc_auth_modes[type].name, ERR_PTR(ret)); 290 xe_uc_fw_change_status(&huc->fw, XE_UC_FIRMWARE_LOAD_FAIL); 291 292 return ret; 293 } 294 295 void xe_huc_sanitize(struct xe_huc *huc) 296 { 297 xe_uc_fw_sanitize(&huc->fw); 298 } 299 300 void xe_huc_print_info(struct xe_huc *huc, struct drm_printer *p) 301 { 302 struct xe_gt *gt = huc_to_gt(huc); 303 unsigned int fw_ref; 304 305 xe_uc_fw_print(&huc->fw, p); 306 307 if (!xe_uc_fw_is_enabled(&huc->fw)) 308 return; 309 310 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); 311 if (!fw_ref) 312 return; 313 314 drm_printf(p, "\nHuC status: 0x%08x\n", 315 xe_mmio_read32(>->mmio, HUC_KERNEL_LOAD_INFO)); 316 317 xe_force_wake_put(gt_to_fw(gt), fw_ref); 318 } 319