1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2 /*
3 * Copyright 2016-2022 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "kfd_priv.h"
25 #include "kfd_events.h"
26 #include "kfd_debug.h"
27 #include "soc15_int.h"
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_smi_events.h"
30 #include "amdgpu_ras.h"
31
32 /*
33 * GFX9 SQ Interrupts
34 *
35 * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit
36 * packet to the Interrupt Handler:
37 * Auto - Generated by the SQG (various cmd overflows, timestamps etc)
38 * Wave - Generated by S_SENDMSG through a shader program
39 * Error - HW generated errors (Illegal instructions, Memviols, EDC etc)
40 *
41 * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus
42 * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such:
43 *
44 * - context_id0[27:26]
45 * Encoding type (0 = Auto, 1 = Wave, 2 = Error)
46 *
47 * - context_id0[13]
48 * PRIV bit indicates that Wave S_SEND or error occurred within trap
49 *
50 * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]}
51 * 24-bit data with the following layout per encoding type:
52 * Auto - only context_id0[8:0] is used, which reports various interrupts
53 * generated by SQG. The rest is 0.
54 * Wave - user data sent from m0 via S_SENDMSG
55 * Error - Error type (context_id1[7:4]), Error Details (rest of bits)
56 *
57 * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave
58 * S_SENDMSG and Errors. These are 0 for Auto.
59 */
60
61 enum SQ_INTERRUPT_WORD_ENCODING {
62 SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0,
63 SQ_INTERRUPT_WORD_ENCODING_INST,
64 SQ_INTERRUPT_WORD_ENCODING_ERROR,
65 };
66
67 enum SQ_INTERRUPT_ERROR_TYPE {
68 SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0,
69 SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST,
70 SQ_INTERRUPT_ERROR_TYPE_MEMVIOL,
71 SQ_INTERRUPT_ERROR_TYPE_EDC_FED,
72 };
73
74 /* SQ_INTERRUPT_WORD_AUTO_CTXID */
75 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0
76 #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1
77 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2
78 #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3
79 #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4
80 #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5
81 #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6
82 #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7
83 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8
84 #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24
85 #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26
86
87 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001
88 #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002
89 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004
90 #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008
91 #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010
92 #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020
93 #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040
94 #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080
95 #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100
96 #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000
97 #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000
98
99 /* SQ_INTERRUPT_WORD_WAVE_CTXID */
100 #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0
101 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12
102 #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13
103 #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14
104 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18
105 #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20
106 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24
107 #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26
108
109 #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff
110 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000
111 #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000
112 #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000
113 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000
114 #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000
115 #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000
116 #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000
117
118 /* GFX9 SQ interrupt 24-bit data from context_id<0,1> */
119 #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \
120 ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000))
121
122 #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000
123 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20
124
125 /*
126 * The debugger will send user data(m0) with PRIV=1 to indicate it requires
127 * notification from the KFD with the following queue id (DOORBELL_ID) and
128 * trap code (TRAP_CODE).
129 */
130 #define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff
131 #define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10
132 #define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00
133 #define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \
134 KFD_INT_DATA_DEBUG_DOORBELL_MASK)
135 #define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \
136 KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \
137 >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT)
138 #define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00
139 #define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10
140 #define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \
141 KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \
142 >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT)
143
event_interrupt_poison_consumption_v9(struct kfd_node * dev,uint16_t pasid,uint16_t client_id)144 static void event_interrupt_poison_consumption_v9(struct kfd_node *dev,
145 uint16_t pasid, uint16_t client_id)
146 {
147 enum amdgpu_ras_block block = 0;
148 uint32_t reset = 0;
149 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
150 enum ras_event_type type = RAS_EVENT_TYPE_POISON_CONSUMPTION;
151 u64 event_id;
152 int old_poison, ret;
153
154 if (!p)
155 return;
156
157 /* all queues of a process will be unmapped in one time */
158 old_poison = atomic_cmpxchg(&p->poison, 0, 1);
159 kfd_unref_process(p);
160 if (old_poison)
161 return;
162
163 switch (client_id) {
164 case SOC15_IH_CLIENTID_SE0SH:
165 case SOC15_IH_CLIENTID_SE1SH:
166 case SOC15_IH_CLIENTID_SE2SH:
167 case SOC15_IH_CLIENTID_SE3SH:
168 case SOC15_IH_CLIENTID_UTCL2:
169 block = AMDGPU_RAS_BLOCK__GFX;
170 if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3)) {
171 /* driver mode-2 for gfx poison is only supported by
172 * pmfw 0x00557300 and onwards */
173 if (dev->adev->pm.fw_version < 0x00557300)
174 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
175 else
176 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
177 } else if (amdgpu_ip_version(dev->adev, GC_HWIP, 0) == IP_VERSION(9, 4, 4)) {
178 /* driver mode-2 for gfx poison is only supported by
179 * pmfw 0x05550C00 and onwards */
180 if (dev->adev->pm.fw_version < 0x05550C00)
181 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
182 else
183 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
184 } else {
185 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
186 }
187 break;
188 case SOC15_IH_CLIENTID_VMC:
189 case SOC15_IH_CLIENTID_VMC1:
190 block = AMDGPU_RAS_BLOCK__MMHUB;
191 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
192 break;
193 case SOC15_IH_CLIENTID_SDMA0:
194 case SOC15_IH_CLIENTID_SDMA1:
195 case SOC15_IH_CLIENTID_SDMA2:
196 case SOC15_IH_CLIENTID_SDMA3:
197 case SOC15_IH_CLIENTID_SDMA4:
198 block = AMDGPU_RAS_BLOCK__SDMA;
199 if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 2)) {
200 /* driver mode-2 for gfx poison is only supported by
201 * pmfw 0x00557300 and onwards */
202 if (dev->adev->pm.fw_version < 0x00557300)
203 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
204 else
205 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
206 } else if (amdgpu_ip_version(dev->adev, SDMA0_HWIP, 0) == IP_VERSION(4, 4, 5)) {
207 /* driver mode-2 for gfx poison is only supported by
208 * pmfw 0x05550C00 and onwards */
209 if (dev->adev->pm.fw_version < 0x05550C00)
210 reset = AMDGPU_RAS_GPU_RESET_MODE1_RESET;
211 else
212 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
213 } else {
214 reset = AMDGPU_RAS_GPU_RESET_MODE2_RESET;
215 }
216 break;
217 default:
218 dev_warn(dev->adev->dev,
219 "client %d does not support poison consumption\n", client_id);
220 return;
221 }
222
223 ret = amdgpu_ras_mark_ras_event(dev->adev, type);
224 if (ret)
225 return;
226
227 kfd_signal_poison_consumed_event(dev, pasid);
228
229 event_id = amdgpu_ras_acquire_event_id(dev->adev, type);
230
231 RAS_EVENT_LOG(dev->adev, event_id,
232 "poison is consumed by client %d, kick off gpu reset flow\n", client_id);
233
234 amdgpu_amdkfd_ras_pasid_poison_consumption_handler(dev->adev,
235 block, pasid, NULL, NULL, reset);
236 }
237
context_id_expected(struct kfd_dev * dev)238 static bool context_id_expected(struct kfd_dev *dev)
239 {
240 switch (KFD_GC_VERSION(dev)) {
241 case IP_VERSION(9, 0, 1):
242 return dev->mec_fw_version >= 0x817a;
243 case IP_VERSION(9, 1, 0):
244 case IP_VERSION(9, 2, 1):
245 case IP_VERSION(9, 2, 2):
246 case IP_VERSION(9, 3, 0):
247 case IP_VERSION(9, 4, 0):
248 return dev->mec_fw_version >= 0x17a;
249 default:
250 /* Other GFXv9 and later GPUs always sent valid context IDs
251 * on legitimate events
252 */
253 return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 1);
254 }
255 }
256
event_interrupt_isr_v9(struct kfd_node * dev,const uint32_t * ih_ring_entry,uint32_t * patched_ihre,bool * patched_flag)257 static bool event_interrupt_isr_v9(struct kfd_node *dev,
258 const uint32_t *ih_ring_entry,
259 uint32_t *patched_ihre,
260 bool *patched_flag)
261 {
262 uint16_t source_id, client_id, pasid, vmid;
263 const uint32_t *data = ih_ring_entry;
264
265 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
266 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
267
268 /* Only handle interrupts from KFD VMIDs */
269 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
270 if (!KFD_IRQ_IS_FENCE(client_id, source_id) &&
271 (vmid < dev->vm_info.first_vmid_kfd ||
272 vmid > dev->vm_info.last_vmid_kfd))
273 return false;
274
275 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
276
277 /* Only handle clients we care about */
278 if (client_id != SOC15_IH_CLIENTID_GRBM_CP &&
279 client_id != SOC15_IH_CLIENTID_SDMA0 &&
280 client_id != SOC15_IH_CLIENTID_SDMA1 &&
281 client_id != SOC15_IH_CLIENTID_SDMA2 &&
282 client_id != SOC15_IH_CLIENTID_SDMA3 &&
283 client_id != SOC15_IH_CLIENTID_SDMA4 &&
284 client_id != SOC15_IH_CLIENTID_SDMA5 &&
285 client_id != SOC15_IH_CLIENTID_SDMA6 &&
286 client_id != SOC15_IH_CLIENTID_SDMA7 &&
287 client_id != SOC15_IH_CLIENTID_VMC &&
288 client_id != SOC15_IH_CLIENTID_VMC1 &&
289 client_id != SOC15_IH_CLIENTID_UTCL2 &&
290 client_id != SOC15_IH_CLIENTID_SE0SH &&
291 client_id != SOC15_IH_CLIENTID_SE1SH &&
292 client_id != SOC15_IH_CLIENTID_SE2SH &&
293 client_id != SOC15_IH_CLIENTID_SE3SH &&
294 !KFD_IRQ_IS_FENCE(client_id, source_id))
295 return false;
296
297 /* This is a known issue for gfx9. Under non HWS, pasid is not set
298 * in the interrupt payload, so we need to find out the pasid on our
299 * own.
300 */
301 if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) {
302 const uint32_t pasid_mask = 0xffff;
303
304 *patched_flag = true;
305 memcpy(patched_ihre, ih_ring_entry,
306 dev->kfd->device_info.ih_ring_entry_size);
307
308 pasid = dev->dqm->vmid_pasid[vmid];
309
310 /* Patch the pasid field */
311 patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3])
312 & ~pasid_mask) | pasid);
313 }
314
315 pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n",
316 client_id, source_id, vmid, pasid);
317 pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n",
318 data[0], data[1], data[2], data[3],
319 data[4], data[5], data[6], data[7]);
320
321 /* If there is no valid PASID, it's likely a bug */
322 if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt"))
323 return false;
324
325 /* Workaround CP firmware sending bogus signals with 0 context_id.
326 * Those can be safely ignored on hardware and firmware versions that
327 * include a valid context_id on legitimate signals. This avoids the
328 * slow path in kfd_signal_event_interrupt that scans all event slots
329 * for signaled events.
330 */
331 if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) {
332 uint32_t context_id =
333 SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
334
335 if (context_id == 0 && context_id_expected(dev->kfd))
336 return false;
337 }
338
339 /* Interrupt types we care about: various signals and faults.
340 * They will be forwarded to a work queue (see below).
341 */
342 return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
343 source_id == SOC15_INTSRC_SDMA_TRAP ||
344 source_id == SOC15_INTSRC_SDMA_ECC ||
345 source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
346 source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
347 KFD_IRQ_IS_FENCE(client_id, source_id) ||
348 ((client_id == SOC15_IH_CLIENTID_VMC ||
349 client_id == SOC15_IH_CLIENTID_VMC1 ||
350 client_id == SOC15_IH_CLIENTID_UTCL2) &&
351 !amdgpu_no_queue_eviction_on_vm_fault);
352 }
353
event_interrupt_wq_v9(struct kfd_node * dev,const uint32_t * ih_ring_entry)354 static void event_interrupt_wq_v9(struct kfd_node *dev,
355 const uint32_t *ih_ring_entry)
356 {
357 uint16_t source_id, client_id, pasid, vmid;
358 uint32_t context_id0, context_id1;
359 uint32_t sq_intr_err, sq_int_data, encoding;
360
361 source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry);
362 client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry);
363 pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry);
364 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
365 context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry);
366 context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry);
367
368 if (client_id == SOC15_IH_CLIENTID_GRBM_CP ||
369 client_id == SOC15_IH_CLIENTID_SE0SH ||
370 client_id == SOC15_IH_CLIENTID_SE1SH ||
371 client_id == SOC15_IH_CLIENTID_SE2SH ||
372 client_id == SOC15_IH_CLIENTID_SE3SH) {
373 if (source_id == SOC15_INTSRC_CP_END_OF_PIPE)
374 kfd_signal_event_interrupt(pasid, context_id0, 32);
375 else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) {
376 sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1);
377 encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING);
378 switch (encoding) {
379 case SQ_INTERRUPT_WORD_ENCODING_AUTO:
380 pr_debug_ratelimited(
381 "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n",
382 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID),
383 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE),
384 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT),
385 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL),
386 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP),
387 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP),
388 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW),
389 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW),
390 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW),
391 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR));
392 break;
393 case SQ_INTERRUPT_WORD_ENCODING_INST:
394 pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n",
395 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
396 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
397 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
398 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
399 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
400 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
401 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
402 sq_int_data);
403 if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) {
404 if (kfd_set_dbg_ev_from_interrupt(dev, pasid,
405 KFD_DEBUG_DOORBELL_ID(sq_int_data),
406 KFD_DEBUG_TRAP_CODE(sq_int_data),
407 NULL, 0))
408 return;
409 }
410 break;
411 case SQ_INTERRUPT_WORD_ENCODING_ERROR:
412 sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE);
413 pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n",
414 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID),
415 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA),
416 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID),
417 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV),
418 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID),
419 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID),
420 REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID),
421 sq_intr_err);
422 if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST &&
423 sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) {
424 event_interrupt_poison_consumption_v9(dev, pasid, client_id);
425 return;
426 }
427 break;
428 default:
429 break;
430 }
431 kfd_signal_event_interrupt(pasid, sq_int_data, 24);
432 } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE &&
433 KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) {
434 kfd_set_dbg_ev_from_interrupt(dev, pasid,
435 KFD_DEBUG_DOORBELL_ID(context_id0),
436 KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)),
437 NULL, 0);
438 }
439 } else if (client_id == SOC15_IH_CLIENTID_SDMA0 ||
440 client_id == SOC15_IH_CLIENTID_SDMA1 ||
441 client_id == SOC15_IH_CLIENTID_SDMA2 ||
442 client_id == SOC15_IH_CLIENTID_SDMA3 ||
443 client_id == SOC15_IH_CLIENTID_SDMA4 ||
444 client_id == SOC15_IH_CLIENTID_SDMA5 ||
445 client_id == SOC15_IH_CLIENTID_SDMA6 ||
446 client_id == SOC15_IH_CLIENTID_SDMA7) {
447 if (source_id == SOC15_INTSRC_SDMA_TRAP) {
448 kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28);
449 } else if (source_id == SOC15_INTSRC_SDMA_ECC) {
450 event_interrupt_poison_consumption_v9(dev, pasid, client_id);
451 return;
452 }
453 } else if (client_id == SOC15_IH_CLIENTID_VMC ||
454 client_id == SOC15_IH_CLIENTID_VMC1 ||
455 client_id == SOC15_IH_CLIENTID_UTCL2) {
456 struct kfd_vm_fault_info info = {0};
457 uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
458 struct kfd_hsa_memory_exception_data exception_data;
459
460 if (source_id == SOC15_INTSRC_VMC_UTCL2_POISON) {
461 event_interrupt_poison_consumption_v9(dev, pasid, client_id);
462 return;
463 }
464
465 info.vmid = vmid;
466 info.mc_id = client_id;
467 info.page_addr = ih_ring_entry[4] |
468 (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
469 info.prot_valid = ring_id & 0x08;
470 info.prot_read = ring_id & 0x10;
471 info.prot_write = ring_id & 0x20;
472
473 memset(&exception_data, 0, sizeof(exception_data));
474 exception_data.gpu_id = dev->id;
475 exception_data.va = (info.page_addr) << PAGE_SHIFT;
476 exception_data.failure.NotPresent = info.prot_valid ? 1 : 0;
477 exception_data.failure.NoExecute = info.prot_exec ? 1 : 0;
478 exception_data.failure.ReadOnly = info.prot_write ? 1 : 0;
479 exception_data.failure.imprecise = 0;
480
481 kfd_set_dbg_ev_from_interrupt(dev,
482 pasid,
483 -1,
484 KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION),
485 &exception_data,
486 sizeof(exception_data));
487 kfd_smi_event_update_vmfault(dev, pasid);
488 } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) {
489 kfd_process_close_interrupt_drain(pasid);
490 }
491 }
492
event_interrupt_isr_v9_4_3(struct kfd_node * node,const uint32_t * ih_ring_entry,uint32_t * patched_ihre,bool * patched_flag)493 static bool event_interrupt_isr_v9_4_3(struct kfd_node *node,
494 const uint32_t *ih_ring_entry,
495 uint32_t *patched_ihre,
496 bool *patched_flag)
497 {
498 uint16_t node_id, vmid;
499
500 /*
501 * For GFX 9.4.3, process the interrupt if:
502 * - NodeID field in IH entry matches the corresponding bit
503 * set in interrupt_bitmap Bits 0-15.
504 * OR
505 * - If partition mode is CPX and interrupt came from
506 * Node_id 0,4,8,12, then check if the Bit (16 + client id)
507 * is set in interrupt bitmap Bits 16-31.
508 */
509 node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry);
510 vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry);
511 if (kfd_irq_is_from_node(node, node_id, vmid))
512 return event_interrupt_isr_v9(node, ih_ring_entry,
513 patched_ihre, patched_flag);
514 return false;
515 }
516
517 const struct kfd_event_interrupt_class event_interrupt_class_v9 = {
518 .interrupt_isr = event_interrupt_isr_v9,
519 .interrupt_wq = event_interrupt_wq_v9,
520 };
521
522 const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = {
523 .interrupt_isr = event_interrupt_isr_v9_4_3,
524 .interrupt_wq = event_interrupt_wq_v9,
525 };
526