xref: /linux/drivers/gpu/drm/xe/xe_mert.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright(c) 2025, Intel Corporation. All rights reserved.
4  */
5 
6 #include "regs/xe_irq_regs.h"
7 #include "regs/xe_mert_regs.h"
8 
9 #include "xe_device.h"
10 #include "xe_mert.h"
11 #include "xe_mmio.h"
12 #include "xe_sriov_printk.h"
13 #include "xe_tile.h"
14 
15 /**
16  * xe_mert_init_early() - Initialize MERT data
17  * @xe: the &xe_device with MERT to init
18  */
19 void xe_mert_init_early(struct xe_device *xe)
20 {
21 	struct xe_tile *tile = xe_device_get_root_tile(xe);
22 	struct xe_mert *mert = &tile->mert;
23 
24 	spin_lock_init(&mert->lock);
25 	init_completion(&mert->tlb_inv_done);
26 }
27 
28 /**
29  * xe_mert_invalidate_lmtt() - Invalidate MERT LMTT
30  * @xe: the &xe_device with MERT
31  *
32  * Trigger invalidation of the MERT LMTT and wait for completion.
33  *
34  * Return: 0 on success or -ETIMEDOUT in case of a timeout.
35  */
36 int xe_mert_invalidate_lmtt(struct xe_device *xe)
37 {
38 	struct xe_tile *tile = xe_device_get_root_tile(xe);
39 	struct xe_mert *mert = &tile->mert;
40 	const long timeout = HZ / 4;
41 	unsigned long flags;
42 
43 	xe_assert(xe, xe_device_has_mert(xe));
44 
45 	spin_lock_irqsave(&mert->lock, flags);
46 	if (!mert->tlb_inv_triggered) {
47 		mert->tlb_inv_triggered = true;
48 		reinit_completion(&mert->tlb_inv_done);
49 		xe_mmio_write32(&tile->mmio, MERT_TLB_INV_DESC_A, MERT_TLB_INV_DESC_A_VALID);
50 	}
51 	spin_unlock_irqrestore(&mert->lock, flags);
52 
53 	if (!wait_for_completion_timeout(&mert->tlb_inv_done, timeout))
54 		return -ETIMEDOUT;
55 
56 	return 0;
57 }
58 
59 static void mert_handle_cat_error(struct xe_device *xe)
60 {
61 	struct xe_tile *tile = xe_device_get_root_tile(xe);
62 	u32 reg_val, vfid, code;
63 
64 	reg_val = xe_mmio_read32(&tile->mmio, MERT_TLB_CT_INTR_ERR_ID_PORT);
65 	if (!reg_val)
66 		return;
67 	xe_mmio_write32(&tile->mmio, MERT_TLB_CT_INTR_ERR_ID_PORT, 0);
68 
69 	vfid = FIELD_GET(CATERR_VFID, reg_val);
70 	code = FIELD_GET(CATERR_CODES, reg_val);
71 
72 	switch (code) {
73 	case CATERR_NO_ERROR:
74 		break;
75 	case CATERR_UNMAPPED_GGTT:
76 		xe_sriov_err(xe, "MERT: CAT_ERR: Access to an unmapped GGTT!\n");
77 		xe_device_declare_wedged(xe);
78 		break;
79 	case CATERR_LMTT_FAULT:
80 		xe_sriov_dbg(xe, "MERT: CAT_ERR: VF%u LMTT fault!\n", vfid);
81 		/* XXX: track/report malicious VF activity */
82 		break;
83 	default:
84 		xe_sriov_err(xe, "MERT: Unexpected CAT_ERR code=%#x!\n", code);
85 		xe_device_declare_wedged(xe);
86 		break;
87 	}
88 }
89 
90 /**
91  * xe_mert_irq_handler - Handler for MERT interrupts
92  * @xe: the &xe_device
93  * @master_ctl: interrupt register
94  *
95  * Handle interrupts generated by MERT.
96  */
97 void xe_mert_irq_handler(struct xe_device *xe, u32 master_ctl)
98 {
99 	struct xe_tile *tile = xe_device_get_root_tile(xe);
100 	struct xe_mert *mert = &tile->mert;
101 	unsigned long flags;
102 	u32 reg_val;
103 
104 	if (!(master_ctl & SOC_H2DMEMINT_IRQ))
105 		return;
106 
107 	mert_handle_cat_error(xe);
108 
109 	spin_lock_irqsave(&mert->lock, flags);
110 	if (mert->tlb_inv_triggered) {
111 		reg_val = xe_mmio_read32(&tile->mmio, MERT_TLB_INV_DESC_A);
112 		if (!(reg_val & MERT_TLB_INV_DESC_A_VALID)) {
113 			mert->tlb_inv_triggered = false;
114 			complete_all(&mert->tlb_inv_done);
115 		}
116 	}
117 	spin_unlock_irqrestore(&mert->lock, flags);
118 }
119