xref: /linux/drivers/gpu/drm/xe/xe_gsc_proxy.c (revision b3ce7a30847a54a7f96a35e609303d8afecd460b)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2023 Intel Corporation
4  */
5 
6 #include "xe_gsc_proxy.h"
7 
8 #include <linux/component.h>
9 #include <linux/delay.h>
10 
11 #include <drm/drm_managed.h>
12 #include <drm/intel/i915_component.h>
13 #include <drm/intel/i915_gsc_proxy_mei_interface.h>
14 
15 #include "abi/gsc_proxy_commands_abi.h"
16 #include "regs/xe_gsc_regs.h"
17 #include "xe_bo.h"
18 #include "xe_force_wake.h"
19 #include "xe_gsc.h"
20 #include "xe_gsc_submit.h"
21 #include "xe_gt.h"
22 #include "xe_gt_printk.h"
23 #include "xe_map.h"
24 #include "xe_mmio.h"
25 #include "xe_pm.h"
26 
27 /*
28  * GSC proxy:
29  * The GSC uC needs to communicate with the CSME to perform certain operations.
30  * Since the GSC can't perform this communication directly on platforms where it
31  * is integrated in GT, the graphics driver needs to transfer the messages from
32  * GSC to CSME and back. The proxy flow must be manually started after the GSC
33  * is loaded to signal to GSC that we're ready to handle its messages and allow
34  * it to query its init data from CSME; GSC will then trigger an HECI2 interrupt
35  * if it needs to send messages to CSME again.
36  * The proxy flow is as follow:
37  * 1 - Xe submits a request to GSC asking for the message to CSME
38  * 2 - GSC replies with the proxy header + payload for CSME
39  * 3 - Xe sends the reply from GSC as-is to CSME via the mei proxy component
40  * 4 - CSME replies with the proxy header + payload for GSC
41  * 5 - Xe submits a request to GSC with the reply from CSME
42  * 6 - GSC replies either with a new header + payload (same as step 2, so we
43  *     restart from there) or with an end message.
44  */
45 
46 /*
47  * The component should load quite quickly in most cases, but it could take
48  * a bit. Using a very big timeout just to cover the worst case scenario
49  */
50 #define GSC_PROXY_INIT_TIMEOUT_MS 20000
51 
52 /* shorthand define for code compactness */
53 #define PROXY_HDR_SIZE (sizeof(struct xe_gsc_proxy_header))
54 
55 /* the protocol supports up to 32K in each direction */
56 #define GSC_PROXY_BUFFER_SIZE SZ_32K
57 #define GSC_PROXY_CHANNEL_SIZE (GSC_PROXY_BUFFER_SIZE * 2)
58 
59 static struct xe_gt *
60 gsc_to_gt(struct xe_gsc *gsc)
61 {
62 	return container_of(gsc, struct xe_gt, uc.gsc);
63 }
64 
65 static inline struct xe_device *kdev_to_xe(struct device *kdev)
66 {
67 	return dev_get_drvdata(kdev);
68 }
69 
70 bool xe_gsc_proxy_init_done(struct xe_gsc *gsc)
71 {
72 	struct xe_gt *gt = gsc_to_gt(gsc);
73 	u32 fwsts1 = xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE));
74 
75 	return REG_FIELD_GET(HECI1_FWSTS1_CURRENT_STATE, fwsts1) ==
76 	       HECI1_FWSTS1_PROXY_STATE_NORMAL;
77 }
78 
79 static void __gsc_proxy_irq_rmw(struct xe_gsc *gsc, u32 clr, u32 set)
80 {
81 	struct xe_gt *gt = gsc_to_gt(gsc);
82 
83 	/* make sure we never accidentally write the RST bit */
84 	clr |= HECI_H_CSR_RST;
85 
86 	xe_mmio_rmw32(gt, HECI_H_CSR(MTL_GSC_HECI2_BASE), clr, set);
87 }
88 
89 static void gsc_proxy_irq_clear(struct xe_gsc *gsc)
90 {
91 	/* The status bit is cleared by writing to it */
92 	__gsc_proxy_irq_rmw(gsc, 0, HECI_H_CSR_IS);
93 }
94 
95 static void gsc_proxy_irq_toggle(struct xe_gsc *gsc, bool enabled)
96 {
97 	u32 set = enabled ? HECI_H_CSR_IE : 0;
98 	u32 clr = enabled ? 0 : HECI_H_CSR_IE;
99 
100 	__gsc_proxy_irq_rmw(gsc, clr, set);
101 }
102 
103 static int proxy_send_to_csme(struct xe_gsc *gsc, u32 size)
104 {
105 	struct xe_gt *gt = gsc_to_gt(gsc);
106 	struct i915_gsc_proxy_component *comp = gsc->proxy.component;
107 	int ret;
108 
109 	ret = comp->ops->send(comp->mei_dev, gsc->proxy.to_csme, size);
110 	if (ret < 0) {
111 		xe_gt_err(gt, "Failed to send CSME proxy message\n");
112 		return ret;
113 	}
114 
115 	ret = comp->ops->recv(comp->mei_dev, gsc->proxy.from_csme, GSC_PROXY_BUFFER_SIZE);
116 	if (ret < 0) {
117 		xe_gt_err(gt, "Failed to receive CSME proxy message\n");
118 		return ret;
119 	}
120 
121 	return ret;
122 }
123 
124 static int proxy_send_to_gsc(struct xe_gsc *gsc, u32 size)
125 {
126 	struct xe_gt *gt = gsc_to_gt(gsc);
127 	u64 addr_in = xe_bo_ggtt_addr(gsc->proxy.bo);
128 	u64 addr_out = addr_in + GSC_PROXY_BUFFER_SIZE;
129 	int err;
130 
131 	/* the message must contain at least the gsc and proxy headers */
132 	if (size > GSC_PROXY_BUFFER_SIZE) {
133 		xe_gt_err(gt, "Invalid GSC proxy message size: %u\n", size);
134 		return -EINVAL;
135 	}
136 
137 	err = xe_gsc_pkt_submit_kernel(gsc, addr_in, size,
138 				       addr_out, GSC_PROXY_BUFFER_SIZE);
139 	if (err) {
140 		xe_gt_err(gt, "Failed to submit gsc proxy rq (%pe)\n", ERR_PTR(err));
141 		return err;
142 	}
143 
144 	return 0;
145 }
146 
147 static int validate_proxy_header(struct xe_gsc_proxy_header *header,
148 				 u32 source, u32 dest, u32 max_size)
149 {
150 	u32 type = FIELD_GET(GSC_PROXY_TYPE, header->hdr);
151 	u32 length = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, header->hdr);
152 
153 	if (header->destination != dest || header->source != source)
154 		return -ENOEXEC;
155 
156 	if (length + PROXY_HDR_SIZE > max_size)
157 		return -E2BIG;
158 
159 	switch (type) {
160 	case GSC_PROXY_MSG_TYPE_PROXY_PAYLOAD:
161 		if (length > 0)
162 			break;
163 		fallthrough;
164 	case GSC_PROXY_MSG_TYPE_PROXY_INVALID:
165 		return -EIO;
166 	default:
167 		break;
168 	}
169 
170 	return 0;
171 }
172 
173 #define proxy_header_wr(xe_, map_, offset_, field_, val_) \
174 	xe_map_wr_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_, val_)
175 
176 #define proxy_header_rd(xe_, map_, offset_, field_) \
177 	xe_map_rd_field(xe_, map_, offset_, struct xe_gsc_proxy_header, field_)
178 
179 static u32 emit_proxy_header(struct xe_device *xe, struct iosys_map *map, u32 offset)
180 {
181 	xe_map_memset(xe, map, offset, 0, PROXY_HDR_SIZE);
182 
183 	proxy_header_wr(xe, map, offset, hdr,
184 			FIELD_PREP(GSC_PROXY_TYPE, GSC_PROXY_MSG_TYPE_PROXY_QUERY) |
185 			FIELD_PREP(GSC_PROXY_PAYLOAD_LENGTH, 0));
186 
187 	proxy_header_wr(xe, map, offset, source, GSC_PROXY_ADDRESSING_KMD);
188 	proxy_header_wr(xe, map, offset, destination, GSC_PROXY_ADDRESSING_GSC);
189 	proxy_header_wr(xe, map, offset, status, 0);
190 
191 	return offset + PROXY_HDR_SIZE;
192 }
193 
194 static int proxy_query(struct xe_gsc *gsc)
195 {
196 	struct xe_gt *gt = gsc_to_gt(gsc);
197 	struct xe_device *xe = gt_to_xe(gt);
198 	struct xe_gsc_proxy_header *to_csme_hdr = gsc->proxy.to_csme;
199 	void *to_csme_payload = gsc->proxy.to_csme + PROXY_HDR_SIZE;
200 	u32 wr_offset;
201 	u32 reply_offset;
202 	u32 size;
203 	int ret;
204 
205 	wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
206 				       HECI_MEADDRESS_PROXY, 0, PROXY_HDR_SIZE);
207 	wr_offset = emit_proxy_header(xe, &gsc->proxy.to_gsc, wr_offset);
208 
209 	size = wr_offset;
210 
211 	while (1) {
212 		/*
213 		 * Poison the GSC response header space to make sure we don't
214 		 * read a stale reply.
215 		 */
216 		xe_gsc_poison_header(xe, &gsc->proxy.from_gsc, 0);
217 
218 		/* send proxy message to GSC */
219 		ret = proxy_send_to_gsc(gsc, size);
220 		if (ret)
221 			goto proxy_error;
222 
223 		/* check the reply from GSC */
224 		ret = xe_gsc_read_out_header(xe, &gsc->proxy.from_gsc, 0,
225 					     PROXY_HDR_SIZE, &reply_offset);
226 		if (ret) {
227 			xe_gt_err(gt, "Invalid gsc header in proxy reply (%pe)\n",
228 				  ERR_PTR(ret));
229 			goto proxy_error;
230 		}
231 
232 		/* copy the proxy header reply from GSC */
233 		xe_map_memcpy_from(xe, to_csme_hdr, &gsc->proxy.from_gsc,
234 				   reply_offset, PROXY_HDR_SIZE);
235 
236 		/* stop if this was the last message */
237 		if (FIELD_GET(GSC_PROXY_TYPE, to_csme_hdr->hdr) == GSC_PROXY_MSG_TYPE_PROXY_END)
238 			break;
239 
240 		/* make sure the GSC-to-CSME proxy header is sane */
241 		ret = validate_proxy_header(to_csme_hdr,
242 					    GSC_PROXY_ADDRESSING_GSC,
243 					    GSC_PROXY_ADDRESSING_CSME,
244 					    GSC_PROXY_BUFFER_SIZE - reply_offset);
245 		if (ret) {
246 			xe_gt_err(gt, "invalid GSC to CSME proxy header! (%pe)\n",
247 				  ERR_PTR(ret));
248 			goto proxy_error;
249 		}
250 
251 		/* copy the rest of the message */
252 		size = FIELD_GET(GSC_PROXY_PAYLOAD_LENGTH, to_csme_hdr->hdr);
253 		xe_map_memcpy_from(xe, to_csme_payload, &gsc->proxy.from_gsc,
254 				   reply_offset + PROXY_HDR_SIZE, size);
255 
256 		/* send the GSC message to the CSME */
257 		ret = proxy_send_to_csme(gsc, size + PROXY_HDR_SIZE);
258 		if (ret < 0)
259 			goto proxy_error;
260 
261 		/* reply size from CSME, including the proxy header */
262 		size = ret;
263 		if (size < PROXY_HDR_SIZE) {
264 			xe_gt_err(gt, "CSME to GSC proxy msg too small: 0x%x\n", size);
265 			ret = -EPROTO;
266 			goto proxy_error;
267 		}
268 
269 		/* make sure the CSME-to-GSC proxy header is sane */
270 		ret = validate_proxy_header(gsc->proxy.from_csme,
271 					    GSC_PROXY_ADDRESSING_CSME,
272 					    GSC_PROXY_ADDRESSING_GSC,
273 					    GSC_PROXY_BUFFER_SIZE - reply_offset);
274 		if (ret) {
275 			xe_gt_err(gt, "invalid CSME to GSC proxy header! %d\n", ret);
276 			goto proxy_error;
277 		}
278 
279 		/* Emit a new header for sending the reply to the GSC */
280 		wr_offset = xe_gsc_emit_header(xe, &gsc->proxy.to_gsc, 0,
281 					       HECI_MEADDRESS_PROXY, 0, size);
282 
283 		/* copy the CSME reply and update the total msg size to include the GSC header */
284 		xe_map_memcpy_to(xe, &gsc->proxy.to_gsc, wr_offset, gsc->proxy.from_csme, size);
285 
286 		size += wr_offset;
287 	}
288 
289 proxy_error:
290 	return ret < 0 ? ret : 0;
291 }
292 
293 int xe_gsc_proxy_request_handler(struct xe_gsc *gsc)
294 {
295 	struct xe_gt *gt = gsc_to_gt(gsc);
296 	int slept;
297 	int err;
298 
299 	if (!gsc->proxy.component_added)
300 		return -ENODEV;
301 
302 	/* when GSC is loaded, we can queue this before the component is bound */
303 	for (slept = 0; slept < GSC_PROXY_INIT_TIMEOUT_MS; slept += 100) {
304 		if (gsc->proxy.component)
305 			break;
306 
307 		msleep(100);
308 	}
309 
310 	mutex_lock(&gsc->proxy.mutex);
311 	if (!gsc->proxy.component) {
312 		xe_gt_err(gt, "GSC proxy component not bound!\n");
313 		err = -EIO;
314 	} else {
315 		/*
316 		 * clear the pending interrupt and allow new proxy requests to
317 		 * be generated while we handle the current one
318 		 */
319 		gsc_proxy_irq_clear(gsc);
320 		err = proxy_query(gsc);
321 	}
322 	mutex_unlock(&gsc->proxy.mutex);
323 	return err;
324 }
325 
326 void xe_gsc_proxy_irq_handler(struct xe_gsc *gsc, u32 iir)
327 {
328 	struct xe_gt *gt = gsc_to_gt(gsc);
329 
330 	if (unlikely(!iir))
331 		return;
332 
333 	if (!gsc->proxy.component) {
334 		xe_gt_err(gt, "GSC proxy irq received without the component being bound!\n");
335 		return;
336 	}
337 
338 	spin_lock(&gsc->lock);
339 	gsc->work_actions |= GSC_ACTION_SW_PROXY;
340 	spin_unlock(&gsc->lock);
341 
342 	queue_work(gsc->wq, &gsc->work);
343 }
344 
345 static int xe_gsc_proxy_component_bind(struct device *xe_kdev,
346 				       struct device *mei_kdev, void *data)
347 {
348 	struct xe_device *xe = kdev_to_xe(xe_kdev);
349 	struct xe_gt *gt = xe->tiles[0].media_gt;
350 	struct xe_gsc *gsc = &gt->uc.gsc;
351 
352 	mutex_lock(&gsc->proxy.mutex);
353 	gsc->proxy.component = data;
354 	gsc->proxy.component->mei_dev = mei_kdev;
355 	mutex_unlock(&gsc->proxy.mutex);
356 
357 	return 0;
358 }
359 
360 static void xe_gsc_proxy_component_unbind(struct device *xe_kdev,
361 					  struct device *mei_kdev, void *data)
362 {
363 	struct xe_device *xe = kdev_to_xe(xe_kdev);
364 	struct xe_gt *gt = xe->tiles[0].media_gt;
365 	struct xe_gsc *gsc = &gt->uc.gsc;
366 
367 	xe_gsc_wait_for_worker_completion(gsc);
368 
369 	mutex_lock(&gsc->proxy.mutex);
370 	gsc->proxy.component = NULL;
371 	mutex_unlock(&gsc->proxy.mutex);
372 }
373 
374 static const struct component_ops xe_gsc_proxy_component_ops = {
375 	.bind   = xe_gsc_proxy_component_bind,
376 	.unbind = xe_gsc_proxy_component_unbind,
377 };
378 
379 static void proxy_channel_free(struct drm_device *drm, void *arg)
380 {
381 	struct xe_gsc *gsc = arg;
382 
383 	if (!gsc->proxy.bo)
384 		return;
385 
386 	if (gsc->proxy.to_csme) {
387 		kfree(gsc->proxy.to_csme);
388 		gsc->proxy.to_csme = NULL;
389 		gsc->proxy.from_csme = NULL;
390 	}
391 
392 	if (gsc->proxy.bo) {
393 		iosys_map_clear(&gsc->proxy.to_gsc);
394 		iosys_map_clear(&gsc->proxy.from_gsc);
395 		xe_bo_unpin_map_no_vm(gsc->proxy.bo);
396 		gsc->proxy.bo = NULL;
397 	}
398 }
399 
400 static int proxy_channel_alloc(struct xe_gsc *gsc)
401 {
402 	struct xe_gt *gt = gsc_to_gt(gsc);
403 	struct xe_tile *tile = gt_to_tile(gt);
404 	struct xe_device *xe = gt_to_xe(gt);
405 	struct xe_bo *bo;
406 	void *csme;
407 
408 	csme = kzalloc(GSC_PROXY_CHANNEL_SIZE, GFP_KERNEL);
409 	if (!csme)
410 		return -ENOMEM;
411 
412 	bo = xe_bo_create_pin_map(xe, tile, NULL, GSC_PROXY_CHANNEL_SIZE,
413 				  ttm_bo_type_kernel,
414 				  XE_BO_FLAG_SYSTEM |
415 				  XE_BO_FLAG_GGTT);
416 	if (IS_ERR(bo)) {
417 		kfree(csme);
418 		return PTR_ERR(bo);
419 	}
420 
421 	gsc->proxy.bo = bo;
422 	gsc->proxy.to_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, 0);
423 	gsc->proxy.from_gsc = IOSYS_MAP_INIT_OFFSET(&bo->vmap, GSC_PROXY_BUFFER_SIZE);
424 	gsc->proxy.to_csme = csme;
425 	gsc->proxy.from_csme = csme + GSC_PROXY_BUFFER_SIZE;
426 
427 	return drmm_add_action_or_reset(&xe->drm, proxy_channel_free, gsc);
428 }
429 
430 /**
431  * xe_gsc_proxy_init() - init objects and MEI component required by GSC proxy
432  * @gsc: the GSC uC
433  *
434  * Return: 0 if the initialization was successful, a negative errno otherwise.
435  */
436 int xe_gsc_proxy_init(struct xe_gsc *gsc)
437 {
438 	int err;
439 	struct xe_gt *gt = gsc_to_gt(gsc);
440 	struct xe_tile *tile = gt_to_tile(gt);
441 	struct xe_device *xe = tile_to_xe(tile);
442 
443 	mutex_init(&gsc->proxy.mutex);
444 
445 	if (!IS_ENABLED(CONFIG_INTEL_MEI_GSC_PROXY)) {
446 		xe_gt_info(gt, "can't init GSC proxy due to missing mei component\n");
447 		return -ENODEV;
448 	}
449 
450 	/* no multi-tile devices with this feature yet */
451 	if (tile->id > 0) {
452 		xe_gt_err(gt, "unexpected GSC proxy init on tile %u\n", tile->id);
453 		return -EINVAL;
454 	}
455 
456 	err = proxy_channel_alloc(gsc);
457 	if (err)
458 		return err;
459 
460 	err = component_add_typed(xe->drm.dev, &xe_gsc_proxy_component_ops,
461 				  I915_COMPONENT_GSC_PROXY);
462 	if (err < 0) {
463 		xe_gt_err(gt, "Failed to add GSC_PROXY component (%pe)\n", ERR_PTR(err));
464 		return err;
465 	}
466 
467 	gsc->proxy.component_added = true;
468 
469 	/* the component must be removed before unload, so can't use drmm for cleanup */
470 
471 	return 0;
472 }
473 
474 /**
475  * xe_gsc_proxy_remove() - remove the GSC proxy MEI component
476  * @gsc: the GSC uC
477  */
478 void xe_gsc_proxy_remove(struct xe_gsc *gsc)
479 {
480 	struct xe_gt *gt = gsc_to_gt(gsc);
481 	struct xe_device *xe = gt_to_xe(gt);
482 	int err = 0;
483 
484 	if (!gsc->proxy.component_added)
485 		return;
486 
487 	/* disable HECI2 IRQs */
488 	xe_pm_runtime_get(xe);
489 	err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
490 	if (err)
491 		xe_gt_err(gt, "failed to get forcewake to disable GSC interrupts\n");
492 
493 	/* try do disable irq even if forcewake failed */
494 	gsc_proxy_irq_toggle(gsc, false);
495 
496 	if (!err)
497 		xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
498 	xe_pm_runtime_put(xe);
499 
500 	xe_gsc_wait_for_worker_completion(gsc);
501 
502 	component_del(xe->drm.dev, &xe_gsc_proxy_component_ops);
503 	gsc->proxy.component_added = false;
504 }
505 
506 /**
507  * xe_gsc_proxy_start() - start the proxy by submitting the first request
508  * @gsc: the GSC uC
509  *
510  * Return: 0 if the proxy are now enabled, a negative errno otherwise.
511  */
512 int xe_gsc_proxy_start(struct xe_gsc *gsc)
513 {
514 	int err;
515 
516 	/* enable the proxy interrupt in the GSC shim layer */
517 	gsc_proxy_irq_toggle(gsc, true);
518 
519 	/*
520 	 * The handling of the first proxy request must be manually triggered to
521 	 * notify the GSC that we're ready to support the proxy flow.
522 	 */
523 	err = xe_gsc_proxy_request_handler(gsc);
524 	if (err)
525 		return err;
526 
527 	if (!xe_gsc_proxy_init_done(gsc)) {
528 		xe_gt_err(gsc_to_gt(gsc), "GSC FW reports proxy init not completed\n");
529 		return -EIO;
530 	}
531 
532 	return 0;
533 }
534