1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "xe_gsc_proxy.h" 7 8 #include <linux/component.h> 9 #include <linux/delay.h> 10 11 #include <drm/drm_managed.h> 12 #include <drm/intel/i915_component.h> 13 #include <drm/intel/i915_gsc_proxy_mei_interface.h> 14 15 #include "abi/gsc_proxy_commands_abi.h" 16 #include "regs/xe_gsc_regs.h" 17 #include "xe_bo.h" 18 #include "xe_force_wake.h" 19 #include "xe_gsc.h" 20 #include "xe_gsc_submit.h" 21 #include "xe_gt.h" 22 #include "xe_gt_printk.h" 23 #include "xe_map.h" 24 #include "xe_mmio.h" 25 #include "xe_pm.h" 26 #include "xe_tile.h" 27 28 /* 29 * GSC proxy: 30 * The GSC uC needs to communicate with the CSME to perform certain operations. 31 * Since the GSC can't perform this communication directly on platforms where it 32 * is integrated in GT, the graphics driver needs to transfer the messages from 33 * GSC to CSME and back. The proxy flow must be manually started after the GSC 34 * is loaded to signal to GSC that we're ready to handle its messages and allow 35 * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt 36 * if it needs to send messages to CSME again. 37 * The proxy flow is as follow: 38 * 1 - Xe submits a request to GSC asking for the message to CSME 39 * 2 - GSC replies with the proxy header + payload for CSME 40 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component 41 * 4 - CSME replies with the proxy header + payload for GSC 42 * 5 - Xe submits a request to GSC with the reply from CSME 43 * 6 - GSC replies either with a new header + payload (same as step 2, so we 44 * restart from there) or with an end message. 45 */ 46 47 /* 48 * The component should load quite quickly in most cases, but it could take 49 * a bit. Using a very big timeout just to cover the worst case scenario 50 */ 51 #define GSC_PROXY_INIT_TIMEOUT_MS 20000 52 53 /* shorthand define for code compactness */ 54 #define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header)) 55 56 /* the protocol supports up to 32K in each direction */ 57 #define GSC_PROXY_BUFFER_SIZE SZ_32K 58 #define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2) 59 60 static struct xe_gt * 61 gsc_to_gt(struct xe_gsc *gsc) 62 { 63 return container_of(gsc, struct xe_gt, uc.gsc); 64 } 65 66 bool xe_gsc_proxy_init_done(struct xe_gsc *gsc) 67 { 68 struct xe_gt *gt = gsc_to_gt(gsc); 69 u32 fwsts1 = xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE)); 70 71 return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) == 72 HECI1_FWSTS1_PROXY_STATE_NORMAL; 73 } 74 75 int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc) 76 { 77 struct xe_gt *gt = gsc_to_gt(gsc); 78 79 /* Proxy init can take up to 500ms, so wait double that for safety */ 80 return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE), 81 HECI1_FWSTS1_CURRENT_STATE, 82 HECI1_FWSTS1_PROXY_STATE_NORMAL, 83 USEC_PER_SEC, NULL, false); 84 } 85 86 static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set) 87 { 88 struct xe_gt *gt = gsc_to_gt(gsc); 89 90 /* make sure we never accidentally write the RST bit */ 91 clr |= HECI_H_CSR_RST; 92 93 xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set); 94 } 95 96 static void gsc_proxy_irq_clear(struct xe_gsc *gsc) 97 { 98 /* The status bit is cleared by writing to it */ 99 __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS); 100 } 101 102 static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled) 103 { 104 u32 set = enabled ? HECI_H_CSR_IE : 0; 105 u32 clr = enabled ? 0 : HECI_H_CSR_IE; 106 107 __gsc_proxy_irq_rmw(gsc, clr, set); 108 } 109 110 static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size) 111 { 112 struct xe_gt *gt = gsc_to_gt(gsc); 113 struct i915_gsc_proxy_component *comp = gsc->proxy.component; 114 int ret; 115 116 ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size); 117 if (ret < 0) { 118 xe_gt_err(gt, "Failed to send CSME proxy message\n"); 119 return ret; 120 } 121 122 ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE); 123 if (ret < 0) { 124 xe_gt_err(gt, "Failed to receive CSME proxy message\n"); 125 return ret; 126 } 127 128 return ret; 129 } 130 131 static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size) 132 { 133 struct xe_gt *gt = gsc_to_gt(gsc); 134 u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo); 135 u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE; 136 int err; 137 138 /* the message must contain at least the gsc and proxy headers */ 139 if (size > GSC_PROXY_BUFFER_SIZE) { 140 xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size); 141 return -EINVAL; 142 } 143 144 err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size, 145 addr_out, GSC_PROXY_BUFFER_SIZE); 146 if (err) { 147 xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err)); 148 return err; 149 } 150 151 return 0; 152 } 153 154 static int validate_proxy_header(struct xe_gt *gt, 155 struct xe_gsc_proxy_header *header, 156 u32 source, u32 dest, u32 max_size) 157 { 158 u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr); 159 u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr); 160 int ret = 0; 161 162 if (header->destination != dest || header->source != source) { 163 ret = -ENOEXEC; 164 goto out; 165 } 166 167 if (length + PROXY_HDR_SIZE > max_size) { 168 ret = -E2BIG; 169 goto out; 170 } 171 172 /* We only care about the status if this is a message for the driver */ 173 if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) { 174 ret = -EIO; 175 goto out; 176 } 177 178 switch (type) { 179 case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD: 180 if (length > 0) 181 break; 182 fallthrough; 183 case GSC_PROXY_MSG_TYPE_PROXY_INVALID: 184 ret = -EIO; 185 break; 186 default: 187 break; 188 } 189 190 out: 191 if (ret) 192 xe_gt_err(gt, 193 "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n", 194 header->source, source, header->destination, dest, 195 type, length, header->status); 196 197 return ret; 198 } 199 200 #define proxy_header_wr(xe_, map_, offset_, field_, val_) \ 201 xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_) 202 203 #define proxy_header_rd(xe_, map_, offset_, field_) \ 204 xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_) 205 206 static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset) 207 { 208 xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE); 209 210 proxy_header_wr(xe, map, offset, hdr, 211 FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) | 212 FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0)); 213 214 proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD); 215 proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC); 216 proxy_header_wr(xe, map, offset, status, 0); 217 218 return offset + PROXY_HDR_SIZE; 219 } 220 221 static int proxy_query(struct xe_gsc *gsc) 222 { 223 struct xe_gt *gt = gsc_to_gt(gsc); 224 struct xe_device *xe = gt_to_xe(gt); 225 struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme; 226 void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE; 227 u32 wr_offset; 228 u32 reply_offset; 229 u32 size; 230 int ret; 231 232 wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0, 233 HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE); 234 wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset); 235 236 size = wr_offset; 237 238 while (1) { 239 /* 240 * Poison the GSC response header space to make sure we don't 241 * read a stale reply. 242 */ 243 xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0); 244 245 /* send proxy message to GSC */ 246 ret = proxy_send_to_gsc(gsc, size); 247 if (ret) 248 goto proxy_error; 249 250 /* check the reply from GSC */ 251 ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0, 252 PROXY_HDR_SIZE, &reply_offset); 253 if (ret) { 254 xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n", 255 ERR_PTR(ret)); 256 goto proxy_error; 257 } 258 259 /* copy the proxy header reply from GSC */ 260 xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc, 261 reply_offset, PROXY_HDR_SIZE); 262 263 /* Check the status and stop if this was the last message */ 264 if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) { 265 ret = validate_proxy_header(gt, to_csme_hdr, 266 GSC_PROXY_ADDRESSING_GSC, 267 GSC_PROXY_ADDRESSING_KMD, 268 GSC_PROXY_BUFFER_SIZE - reply_offset); 269 break; 270 } 271 272 /* make sure the GSC-to-CSME proxy header is sane */ 273 ret = validate_proxy_header(gt, to_csme_hdr, 274 GSC_PROXY_ADDRESSING_GSC, 275 GSC_PROXY_ADDRESSING_CSME, 276 GSC_PROXY_BUFFER_SIZE - reply_offset); 277 if (ret) { 278 xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n", 279 ERR_PTR(ret)); 280 goto proxy_error; 281 } 282 283 /* copy the rest of the message */ 284 size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr); 285 xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc, 286 reply_offset + PROXY_HDR_SIZE, size); 287 288 /* send the GSC message to the CSME */ 289 ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE); 290 if (ret < 0) 291 goto proxy_error; 292 293 /* reply size from CSME, including the proxy header */ 294 size = ret; 295 if (size < PROXY_HDR_SIZE) { 296 xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size); 297 ret = -EPROTO; 298 goto proxy_error; 299 } 300 301 /* make sure the CSME-to-GSC proxy header is sane */ 302 ret = validate_proxy_header(gt, gsc->proxy.from_csme, 303 GSC_PROXY_ADDRESSING_CSME, 304 GSC_PROXY_ADDRESSING_GSC, 305 GSC_PROXY_BUFFER_SIZE - reply_offset); 306 if (ret) { 307 xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret); 308 goto proxy_error; 309 } 310 311 /* Emit a new header for sending the reply to the GSC */ 312 wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0, 313 HECI_MEADDRESS_PROXY, 0, size); 314 315 /* copy the CSME reply and update the total msg size to include the GSC header */ 316 xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size); 317 318 size += wr_offset; 319 } 320 321 proxy_error: 322 return ret < 0 ? ret : 0; 323 } 324 325 int xe_gsc_proxy_request_handler(struct xe_gsc *gsc) 326 { 327 struct xe_gt *gt = gsc_to_gt(gsc); 328 int slept; 329 int err; 330 331 if (!gsc->proxy.component_added) 332 return -ENODEV; 333 334 /* when GSC is loaded, we can queue this before the component is bound */ 335 for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) { 336 if (gsc->proxy.component) 337 break; 338 339 msleep(100); 340 } 341 342 mutex_lock(&gsc->proxy.mutex); 343 if (!gsc->proxy.component) { 344 xe_gt_err(gt, "GSC proxy component not bound!\n"); 345 err = -EIO; 346 } else { 347 /* 348 * clear the pending interrupt and allow new proxy requests to 349 * be generated while we handle the current one 350 */ 351 gsc_proxy_irq_clear(gsc); 352 err = proxy_query(gsc); 353 } 354 mutex_unlock(&gsc->proxy.mutex); 355 return err; 356 } 357 358 void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir) 359 { 360 struct xe_gt *gt = gsc_to_gt(gsc); 361 362 if (unlikely(!iir)) 363 return; 364 365 if (!gsc->proxy.component) { 366 xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n"); 367 return; 368 } 369 370 spin_lock(&gsc->lock); 371 gsc->work_actions |= GSC_ACTION_SW_PROXY; 372 spin_unlock(&gsc->lock); 373 374 queue_work(gsc->wq, &gsc->work); 375 } 376 377 static int xe_gsc_proxy_component_bind(struct device *xe_kdev, 378 struct device *mei_kdev, void *data) 379 { 380 struct xe_device *xe = kdev_to_xe_device(xe_kdev); 381 struct xe_gt *gt = xe->tiles[0].media_gt; 382 struct xe_gsc *gsc = >->uc.gsc; 383 384 mutex_lock(&gsc->proxy.mutex); 385 gsc->proxy.component = data; 386 gsc->proxy.component->mei_dev = mei_kdev; 387 mutex_unlock(&gsc->proxy.mutex); 388 389 return 0; 390 } 391 392 static void xe_gsc_proxy_component_unbind(struct device *xe_kdev, 393 struct device *mei_kdev, void *data) 394 { 395 struct xe_device *xe = kdev_to_xe_device(xe_kdev); 396 struct xe_gt *gt = xe->tiles[0].media_gt; 397 struct xe_gsc *gsc = >->uc.gsc; 398 399 xe_gsc_wait_for_worker_completion(gsc); 400 401 mutex_lock(&gsc->proxy.mutex); 402 gsc->proxy.component = NULL; 403 mutex_unlock(&gsc->proxy.mutex); 404 } 405 406 static const struct component_ops xe_gsc_proxy_component_ops = { 407 .bind = xe_gsc_proxy_component_bind, 408 .unbind = xe_gsc_proxy_component_unbind, 409 }; 410 411 static int proxy_channel_alloc(struct xe_gsc *gsc) 412 { 413 struct xe_gt *gt = gsc_to_gt(gsc); 414 struct xe_tile *tile = gt_to_tile(gt); 415 struct xe_device *xe = gt_to_xe(gt); 416 struct xe_bo *bo; 417 void *csme; 418 419 csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL); 420 if (!csme) 421 return -ENOMEM; 422 423 bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE, 424 XE_BO_FLAG_SYSTEM | 425 XE_BO_FLAG_GGTT); 426 if (IS_ERR(bo)) 427 return PTR_ERR(bo); 428 429 gsc->proxy.bo = bo; 430 gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0); 431 gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE); 432 gsc->proxy.to_csme = csme; 433 gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE; 434 435 return 0; 436 } 437 438 static void xe_gsc_proxy_remove(void *arg) 439 { 440 struct xe_gsc *gsc = arg; 441 struct xe_gt *gt = gsc_to_gt(gsc); 442 struct xe_device *xe = gt_to_xe(gt); 443 unsigned int fw_ref = 0; 444 445 if (!gsc->proxy.component_added) 446 return; 447 448 /* disable HECI2 IRQs */ 449 xe_pm_runtime_get(xe); 450 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC); 451 if (!fw_ref) 452 xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n"); 453 454 /* try do disable irq even if forcewake failed */ 455 gsc_proxy_irq_toggle(gsc, false); 456 457 xe_force_wake_put(gt_to_fw(gt), fw_ref); 458 xe_pm_runtime_put(xe); 459 460 xe_gsc_wait_for_worker_completion(gsc); 461 462 component_del(xe->drm.dev, &xe_gsc_proxy_component_ops); 463 gsc->proxy.component_added = false; 464 } 465 466 /** 467 * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy 468 * @gsc: the GSC uC 469 * 470 * Return: 0 if the initialization was successful, a negative errno otherwise. 471 */ 472 int xe_gsc_proxy_init(struct xe_gsc *gsc) 473 { 474 int err; 475 struct xe_gt *gt = gsc_to_gt(gsc); 476 struct xe_tile *tile = gt_to_tile(gt); 477 struct xe_device *xe = tile_to_xe(tile); 478 479 mutex_init(&gsc->proxy.mutex); 480 481 if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) { 482 xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n"); 483 return -ENODEV; 484 } 485 486 /* no multi-tile devices with this feature yet */ 487 if (!xe_tile_is_root(tile)) { 488 xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id); 489 return -EINVAL; 490 } 491 492 err = proxy_channel_alloc(gsc); 493 if (err) 494 return err; 495 496 err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops, 497 I915_COMPONENT_GSC_PROXY); 498 if (err < 0) { 499 xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err)); 500 return err; 501 } 502 503 gsc->proxy.component_added = true; 504 505 return devm_add_action_or_reset(xe->drm.dev, xe_gsc_proxy_remove, gsc); 506 } 507 508 /** 509 * xe_gsc_proxy_start() - start the proxy by submitting the first request 510 * @gsc: the GSC uC 511 * 512 * Return: 0 if the proxy are now enabled, a negative errno otherwise. 513 */ 514 int xe_gsc_proxy_start(struct xe_gsc *gsc) 515 { 516 int err; 517 518 /* enable the proxy interrupt in the GSC shim layer */ 519 gsc_proxy_irq_toggle(gsc, true); 520 521 /* 522 * The handling of the first proxy request must be manually triggered to 523 * notify the GSC that we're ready to support the proxy flow. 524 */ 525 err = xe_gsc_proxy_request_handler(gsc); 526 if (err) 527 return err; 528 529 if (!xe_gsc_proxy_init_done(gsc)) { 530 xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n"); 531 return -EIO; 532 } 533 534 return 0; 535 } 536