1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2023 Intel Corporation
4 */
5
6 #include "xe_gsc_proxy.h"
7
8 #include <linux/component.h>
9 #include <linux/delay.h>
10
11 #include <drm/drm_managed.h>
12 #include <drm/intel/i915_component.h>
13 #include <drm/intel/i915_gsc_proxy_mei_interface.h>
14
15 #include "abi/gsc_proxy_commands_abi.h"
16 #include "regs/xe_gsc_regs.h"
17 #include "xe_bo.h"
18 #include "xe_force_wake.h"
19 #include "xe_gsc.h"
20 #include "xe_gsc_submit.h"
21 #include "xe_gt.h"
22 #include "xe_gt_printk.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_pm.h"
26
27 /*
28 * GSC proxy:
29 * The GSC uC needs to communicate with the CSME to perform certain operations.
30 * Since the GSC can't perform this communication directly on platforms where it
31 * is integrated in GT, the graphics driver needs to transfer the messages from
32 * GSC to CSME and back. The proxy flow must be manually started after the GSC
33 * is loaded to signal to GSC that we're ready to handle its messages and allow
34 * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
35 * if it needs to send messages to CSME again.
36 * The proxy flow is as follow:
37 * 1 - Xe submits a request to GSC asking for the message to CSME
38 * 2 - GSC replies with the proxy header + payload for CSME
39 * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
40 * 4 - CSME replies with the proxy header + payload for GSC
41 * 5 - Xe submits a request to GSC with the reply from CSME
42 * 6 - GSC replies either with a new header + payload (same as step 2, so we
43 * restart from there) or with an end message.
44 */
45
46 /*
47 * The component should load quite quickly in most cases, but it could take
48 * a bit. Using a very big timeout just to cover the worst case scenario
49 */
50 #define GSC_PROXY_INIT_TIMEOUT_MS 20000
51
52 /* shorthand define for code compactness */
53 #define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
54
55 /* the protocol supports up to 32K in each direction */
56 #define GSC_PROXY_BUFFER_SIZE SZ_32K
57 #define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
58
59 static struct xe_gt *
gsc_to_gt(struct xe_gsc * gsc)60 gsc_to_gt(struct xe_gsc *gsc)
61 {
62 return container_of(gsc, struct xe_gt, uc.gsc);
63 }
64
xe_gsc_proxy_init_done(struct xe_gsc * gsc)65 bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
66 {
67 struct xe_gt *gt = gsc_to_gt(gsc);
68 u32 fwsts1 = xe_mmio_read32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
69
70 return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
71 HECI1_FWSTS1_PROXY_STATE_NORMAL;
72 }
73
xe_gsc_wait_for_proxy_init_done(struct xe_gsc * gsc)74 int xe_gsc_wait_for_proxy_init_done(struct xe_gsc *gsc)
75 {
76 struct xe_gt *gt = gsc_to_gt(gsc);
77
78 /* Proxy init can take up to 500ms, so wait double that for safety */
79 return xe_mmio_wait32(>->mmio, HECI_FWSTS1(MTL_GSC_HECI1_BASE),
80 HECI1_FWSTS1_CURRENT_STATE,
81 HECI1_FWSTS1_PROXY_STATE_NORMAL,
82 USEC_PER_SEC, NULL, false);
83 }
84
__gsc_proxy_irq_rmw(struct xe_gsc * gsc,u32 clr,u32 set)85 static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
86 {
87 struct xe_gt *gt = gsc_to_gt(gsc);
88
89 /* make sure we never accidentally write the RST bit */
90 clr |= HECI_H_CSR_RST;
91
92 xe_mmio_rmw32(>->mmio, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
93 }
94
gsc_proxy_irq_clear(struct xe_gsc * gsc)95 static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
96 {
97 /* The status bit is cleared by writing to it */
98 __gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
99 }
100
gsc_proxy_irq_toggle(struct xe_gsc * gsc,bool enabled)101 static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
102 {
103 u32 set = enabled ? HECI_H_CSR_IE : 0;
104 u32 clr = enabled ? 0 : HECI_H_CSR_IE;
105
106 __gsc_proxy_irq_rmw(gsc, clr, set);
107 }
108
proxy_send_to_csme(struct xe_gsc * gsc,u32 size)109 static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
110 {
111 struct xe_gt *gt = gsc_to_gt(gsc);
112 struct i915_gsc_proxy_component *comp = gsc->proxy.component;
113 int ret;
114
115 ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
116 if (ret < 0) {
117 xe_gt_err(gt, "Failed to send CSME proxy message\n");
118 return ret;
119 }
120
121 ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
122 if (ret < 0) {
123 xe_gt_err(gt, "Failed to receive CSME proxy message\n");
124 return ret;
125 }
126
127 return ret;
128 }
129
proxy_send_to_gsc(struct xe_gsc * gsc,u32 size)130 static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
131 {
132 struct xe_gt *gt = gsc_to_gt(gsc);
133 u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
134 u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
135 int err;
136
137 /* the message must contain at least the gsc and proxy headers */
138 if (size > GSC_PROXY_BUFFER_SIZE) {
139 xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
140 return -EINVAL;
141 }
142
143 err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
144 addr_out, GSC_PROXY_BUFFER_SIZE);
145 if (err) {
146 xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
147 return err;
148 }
149
150 return 0;
151 }
152
validate_proxy_header(struct xe_gt * gt,struct xe_gsc_proxy_header * header,u32 source,u32 dest,u32 max_size)153 static int validate_proxy_header(struct xe_gt *gt,
154 struct xe_gsc_proxy_header *header,
155 u32 source, u32 dest, u32 max_size)
156 {
157 u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
158 u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
159 int ret = 0;
160
161 if (header->destination != dest || header->source != source) {
162 ret = -ENOEXEC;
163 goto out;
164 }
165
166 if (length + PROXY_HDR_SIZE > max_size) {
167 ret = -E2BIG;
168 goto out;
169 }
170
171 /* We only care about the status if this is a message for the driver */
172 if (dest == GSC_PROXY_ADDRESSING_KMD && header->status != 0) {
173 ret = -EIO;
174 goto out;
175 }
176
177 switch (type) {
178 case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
179 if (length > 0)
180 break;
181 fallthrough;
182 case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
183 ret = -EIO;
184 break;
185 default:
186 break;
187 }
188
189 out:
190 if (ret)
191 xe_gt_err(gt,
192 "GSC proxy error: s=0x%x[0x%x], d=0x%x[0x%x], t=%u, l=0x%x, st=0x%x\n",
193 header->source, source, header->destination, dest,
194 type, length, header->status);
195
196 return ret;
197 }
198
199 #define proxy_header_wr(xe_, map_, offset_, field_, val_) \
200 xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
201
202 #define proxy_header_rd(xe_, map_, offset_, field_) \
203 xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
204
emit_proxy_header(struct xe_device * xe,struct iosys_map * map,u32 offset)205 static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
206 {
207 xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
208
209 proxy_header_wr(xe, map, offset, hdr,
210 FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
211 FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
212
213 proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
214 proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
215 proxy_header_wr(xe, map, offset, status, 0);
216
217 return offset + PROXY_HDR_SIZE;
218 }
219
proxy_query(struct xe_gsc * gsc)220 static int proxy_query(struct xe_gsc *gsc)
221 {
222 struct xe_gt *gt = gsc_to_gt(gsc);
223 struct xe_device *xe = gt_to_xe(gt);
224 struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
225 void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
226 u32 wr_offset;
227 u32 reply_offset;
228 u32 size;
229 int ret;
230
231 wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
232 HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
233 wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
234
235 size = wr_offset;
236
237 while (1) {
238 /*
239 * Poison the GSC response header space to make sure we don't
240 * read a stale reply.
241 */
242 xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
243
244 /* send proxy message to GSC */
245 ret = proxy_send_to_gsc(gsc, size);
246 if (ret)
247 goto proxy_error;
248
249 /* check the reply from GSC */
250 ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
251 PROXY_HDR_SIZE, &reply_offset);
252 if (ret) {
253 xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
254 ERR_PTR(ret));
255 goto proxy_error;
256 }
257
258 /* copy the proxy header reply from GSC */
259 xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
260 reply_offset, PROXY_HDR_SIZE);
261
262 /* Check the status and stop if this was the last message */
263 if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END) {
264 ret = validate_proxy_header(gt, to_csme_hdr,
265 GSC_PROXY_ADDRESSING_GSC,
266 GSC_PROXY_ADDRESSING_KMD,
267 GSC_PROXY_BUFFER_SIZE - reply_offset);
268 break;
269 }
270
271 /* make sure the GSC-to-CSME proxy header is sane */
272 ret = validate_proxy_header(gt, to_csme_hdr,
273 GSC_PROXY_ADDRESSING_GSC,
274 GSC_PROXY_ADDRESSING_CSME,
275 GSC_PROXY_BUFFER_SIZE - reply_offset);
276 if (ret) {
277 xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
278 ERR_PTR(ret));
279 goto proxy_error;
280 }
281
282 /* copy the rest of the message */
283 size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
284 xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
285 reply_offset + PROXY_HDR_SIZE, size);
286
287 /* send the GSC message to the CSME */
288 ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
289 if (ret < 0)
290 goto proxy_error;
291
292 /* reply size from CSME, including the proxy header */
293 size = ret;
294 if (size < PROXY_HDR_SIZE) {
295 xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
296 ret = -EPROTO;
297 goto proxy_error;
298 }
299
300 /* make sure the CSME-to-GSC proxy header is sane */
301 ret = validate_proxy_header(gt, gsc->proxy.from_csme,
302 GSC_PROXY_ADDRESSING_CSME,
303 GSC_PROXY_ADDRESSING_GSC,
304 GSC_PROXY_BUFFER_SIZE - reply_offset);
305 if (ret) {
306 xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
307 goto proxy_error;
308 }
309
310 /* Emit a new header for sending the reply to the GSC */
311 wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
312 HECI_MEADDRESS_PROXY, 0, size);
313
314 /* copy the CSME reply and update the total msg size to include the GSC header */
315 xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
316
317 size += wr_offset;
318 }
319
320 proxy_error:
321 return ret < 0 ? ret : 0;
322 }
323
xe_gsc_proxy_request_handler(struct xe_gsc * gsc)324 int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
325 {
326 struct xe_gt *gt = gsc_to_gt(gsc);
327 int slept;
328 int err;
329
330 if (!gsc->proxy.component_added)
331 return -ENODEV;
332
333 /* when GSC is loaded, we can queue this before the component is bound */
334 for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
335 if (gsc->proxy.component)
336 break;
337
338 msleep(100);
339 }
340
341 mutex_lock(&gsc->proxy.mutex);
342 if (!gsc->proxy.component) {
343 xe_gt_err(gt, "GSC proxy component not bound!\n");
344 err = -EIO;
345 } else {
346 /*
347 * clear the pending interrupt and allow new proxy requests to
348 * be generated while we handle the current one
349 */
350 gsc_proxy_irq_clear(gsc);
351 err = proxy_query(gsc);
352 }
353 mutex_unlock(&gsc->proxy.mutex);
354 return err;
355 }
356
xe_gsc_proxy_irq_handler(struct xe_gsc * gsc,u32 iir)357 void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
358 {
359 struct xe_gt *gt = gsc_to_gt(gsc);
360
361 if (unlikely(!iir))
362 return;
363
364 if (!gsc->proxy.component) {
365 xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
366 return;
367 }
368
369 spin_lock(&gsc->lock);
370 gsc->work_actions |= GSC_ACTION_SW_PROXY;
371 spin_unlock(&gsc->lock);
372
373 queue_work(gsc->wq, &gsc->work);
374 }
375
xe_gsc_proxy_component_bind(struct device * xe_kdev,struct device * mei_kdev,void * data)376 static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
377 struct device *mei_kdev, void *data)
378 {
379 struct xe_device *xe = kdev_to_xe_device(xe_kdev);
380 struct xe_gt *gt = xe->tiles[0].media_gt;
381 struct xe_gsc *gsc = >->uc.gsc;
382
383 mutex_lock(&gsc->proxy.mutex);
384 gsc->proxy.component = data;
385 gsc->proxy.component->mei_dev = mei_kdev;
386 mutex_unlock(&gsc->proxy.mutex);
387
388 return 0;
389 }
390
xe_gsc_proxy_component_unbind(struct device * xe_kdev,struct device * mei_kdev,void * data)391 static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
392 struct device *mei_kdev, void *data)
393 {
394 struct xe_device *xe = kdev_to_xe_device(xe_kdev);
395 struct xe_gt *gt = xe->tiles[0].media_gt;
396 struct xe_gsc *gsc = >->uc.gsc;
397
398 xe_gsc_wait_for_worker_completion(gsc);
399
400 mutex_lock(&gsc->proxy.mutex);
401 gsc->proxy.component = NULL;
402 mutex_unlock(&gsc->proxy.mutex);
403 }
404
405 static const struct component_ops xe_gsc_proxy_component_ops = {
406 .bind = xe_gsc_proxy_component_bind,
407 .unbind = xe_gsc_proxy_component_unbind,
408 };
409
proxy_channel_alloc(struct xe_gsc * gsc)410 static int proxy_channel_alloc(struct xe_gsc *gsc)
411 {
412 struct xe_gt *gt = gsc_to_gt(gsc);
413 struct xe_tile *tile = gt_to_tile(gt);
414 struct xe_device *xe = gt_to_xe(gt);
415 struct xe_bo *bo;
416 void *csme;
417
418 csme = drmm_kzalloc(&xe->drm, GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
419 if (!csme)
420 return -ENOMEM;
421
422 bo = xe_managed_bo_create_pin_map(xe, tile, GSC_PROXY_CHANNEL_SIZE,
423 XE_BO_FLAG_SYSTEM |
424 XE_BO_FLAG_GGTT);
425 if (IS_ERR(bo))
426 return PTR_ERR(bo);
427
428 gsc->proxy.bo = bo;
429 gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
430 gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
431 gsc->proxy.to_csme = csme;
432 gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
433
434 return 0;
435 }
436
xe_gsc_proxy_remove(void * arg)437 static void xe_gsc_proxy_remove(void *arg)
438 {
439 struct xe_gsc *gsc = arg;
440 struct xe_gt *gt = gsc_to_gt(gsc);
441 struct xe_device *xe = gt_to_xe(gt);
442 unsigned int fw_ref = 0;
443
444 if (!gsc->proxy.component_added)
445 return;
446
447 /* disable HECI2 IRQs */
448 xe_pm_runtime_get(xe);
449 fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
450 if (!fw_ref)
451 xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
452
453 /* try do disable irq even if forcewake failed */
454 gsc_proxy_irq_toggle(gsc, false);
455
456 xe_force_wake_put(gt_to_fw(gt), fw_ref);
457 xe_pm_runtime_put(xe);
458
459 xe_gsc_wait_for_worker_completion(gsc);
460
461 component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
462 gsc->proxy.component_added = false;
463 }
464
465 /**
466 * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
467 * @gsc: the GSC uC
468 *
469 * Return: 0 if the initialization was successful, a negative errno otherwise.
470 */
xe_gsc_proxy_init(struct xe_gsc * gsc)471 int xe_gsc_proxy_init(struct xe_gsc *gsc)
472 {
473 int err;
474 struct xe_gt *gt = gsc_to_gt(gsc);
475 struct xe_tile *tile = gt_to_tile(gt);
476 struct xe_device *xe = tile_to_xe(tile);
477
478 mutex_init(&gsc->proxy.mutex);
479
480 if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
481 xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
482 return -ENODEV;
483 }
484
485 /* no multi-tile devices with this feature yet */
486 if (tile->id > 0) {
487 xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
488 return -EINVAL;
489 }
490
491 err = proxy_channel_alloc(gsc);
492 if (err)
493 return err;
494
495 err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
496 I915_COMPONENT_GSC_PROXY);
497 if (err < 0) {
498 xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
499 return err;
500 }
501
502 gsc->proxy.component_added = true;
503
504 return devm_add_action_or_reset(xe->drm.dev, xe_gsc_proxy_remove, gsc);
505 }
506
507 /**
508 * xe_gsc_proxy_start() - start the proxy by submitting the first request
509 * @gsc: the GSC uC
510 *
511 * Return: 0 if the proxy are now enabled, a negative errno otherwise.
512 */
xe_gsc_proxy_start(struct xe_gsc * gsc)513 int xe_gsc_proxy_start(struct xe_gsc *gsc)
514 {
515 int err;
516
517 /* enable the proxy interrupt in the GSC shim layer */
518 gsc_proxy_irq_toggle(gsc, true);
519
520 /*
521 * The handling of the first proxy request must be manually triggered to
522 * notify the GSC that we're ready to support the proxy flow.
523 */
524 err = xe_gsc_proxy_request_handler(gsc);
525 if (err)
526 return err;
527
528 if (!xe_gsc_proxy_init_done(gsc)) {
529 xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
530 return -EIO;
531 }
532
533 return 0;
534 }
535