1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2023 Intel Corporation 4 */ 5 6 #include "xe_devcoredump.h" 7 #include "xe_devcoredump_types.h" 8 9 #include <linux/ascii85.h> 10 #include <linux/devcoredump.h> 11 #include <generated/utsrelease.h> 12 13 #include <drm/drm_managed.h> 14 15 #include "xe_device.h" 16 #include "xe_exec_queue.h" 17 #include "xe_force_wake.h" 18 #include "xe_gt.h" 19 #include "xe_gt_printk.h" 20 #include "xe_guc_capture.h" 21 #include "xe_guc_ct.h" 22 #include "xe_guc_log.h" 23 #include "xe_guc_submit.h" 24 #include "xe_hw_engine.h" 25 #include "xe_module.h" 26 #include "xe_sched_job.h" 27 #include "xe_vm.h" 28 29 /** 30 * DOC: Xe device coredump 31 * 32 * Xe uses dev_coredump infrastructure for exposing the crash errors in a 33 * standardized way. Once a crash occurs, devcoredump exposes a temporary 34 * node under ``/sys/class/devcoredump/devcd<m>/``. The same node is also 35 * accessible in ``/sys/class/drm/card<n>/device/devcoredump/``. The 36 * ``failing_device`` symlink points to the device that crashed and created the 37 * coredump. 38 * 39 * The following characteristics are observed by xe when creating a device 40 * coredump: 41 * 42 * **Snapshot at hang**: 43 * The 'data' file contains a snapshot of the HW and driver states at the time 44 * the hang happened. Due to the driver recovering from resets/crashes, it may 45 * not correspond to the state of the system when the file is read by 46 * userspace. 47 * 48 * **Coredump release**: 49 * After a coredump is generated, it stays in kernel memory until released by 50 * userpace by writing anything to it, or after an internal timer expires. The 51 * exact timeout may vary and should not be relied upon. Example to release 52 * a coredump: 53 * 54 * .. code-block:: shell 55 * 56 * $ > /sys/class/drm/card0/device/devcoredump/data 57 * 58 * **First failure only**: 59 * In general, the first hang is the most critical one since the following 60 * hangs can be a consequence of the initial hang. For this reason a snapshot 61 * is taken only for the first failure. Until the devcoredump is released by 62 * userspace or kernel, all subsequent hangs do not override the snapshot nor 63 * create new ones. Devcoredump has a delayed work queue that will eventually 64 * delete the file node and free all the dump information. 65 */ 66 67 #ifdef CONFIG_DEV_COREDUMP 68 69 /* 1 hour timeout */ 70 #define XE_COREDUMP_TIMEOUT_JIFFIES (60 * 60 * HZ) 71 72 static struct xe_device *coredump_to_xe(const struct xe_devcoredump *coredump) 73 { 74 return container_of(coredump, struct xe_device, devcoredump); 75 } 76 77 static struct xe_guc *exec_queue_to_guc(struct xe_exec_queue *q) 78 { 79 return &q->gt->uc.guc; 80 } 81 82 static ssize_t __xe_devcoredump_read(char *buffer, size_t count, 83 struct xe_devcoredump *coredump) 84 { 85 struct xe_device *xe; 86 struct xe_devcoredump_snapshot *ss; 87 struct drm_printer p; 88 struct drm_print_iterator iter; 89 struct timespec64 ts; 90 int i; 91 92 xe = coredump_to_xe(coredump); 93 ss = &coredump->snapshot; 94 95 iter.data = buffer; 96 iter.start = 0; 97 iter.remain = count; 98 99 p = drm_coredump_printer(&iter); 100 101 drm_puts(&p, "**** Xe Device Coredump ****\n"); 102 drm_puts(&p, "kernel: " UTS_RELEASE "\n"); 103 drm_puts(&p, "module: " KBUILD_MODNAME "\n"); 104 105 ts = ktime_to_timespec64(ss->snapshot_time); 106 drm_printf(&p, "Snapshot time: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); 107 ts = ktime_to_timespec64(ss->boot_time); 108 drm_printf(&p, "Uptime: %lld.%09ld\n", ts.tv_sec, ts.tv_nsec); 109 drm_printf(&p, "Process: %s\n", ss->process_name); 110 xe_device_snapshot_print(xe, &p); 111 112 drm_printf(&p, "\n**** GT #%d ****\n", ss->gt->info.id); 113 drm_printf(&p, "\tTile: %d\n", ss->gt->tile->id); 114 115 drm_puts(&p, "\n**** GuC Log ****\n"); 116 xe_guc_log_snapshot_print(ss->guc.log, &p); 117 drm_puts(&p, "\n**** GuC CT ****\n"); 118 xe_guc_ct_snapshot_print(ss->guc.ct, &p); 119 120 drm_puts(&p, "\n**** Contexts ****\n"); 121 xe_guc_exec_queue_snapshot_print(ss->ge, &p); 122 123 drm_puts(&p, "\n**** Job ****\n"); 124 xe_sched_job_snapshot_print(ss->job, &p); 125 126 drm_puts(&p, "\n**** HW Engines ****\n"); 127 for (i = 0; i < XE_NUM_HW_ENGINES; i++) 128 if (ss->hwe[i]) 129 xe_engine_snapshot_print(ss->hwe[i], &p); 130 131 drm_puts(&p, "\n**** VM state ****\n"); 132 xe_vm_snapshot_print(ss->vm, &p); 133 134 return count - iter.remain; 135 } 136 137 static void xe_devcoredump_snapshot_free(struct xe_devcoredump_snapshot *ss) 138 { 139 int i; 140 141 xe_guc_log_snapshot_free(ss->guc.log); 142 ss->guc.log = NULL; 143 144 xe_guc_ct_snapshot_free(ss->guc.ct); 145 ss->guc.ct = NULL; 146 147 xe_guc_capture_put_matched_nodes(&ss->gt->uc.guc); 148 ss->matched_node = NULL; 149 150 xe_guc_exec_queue_snapshot_free(ss->ge); 151 ss->ge = NULL; 152 153 xe_sched_job_snapshot_free(ss->job); 154 ss->job = NULL; 155 156 for (i = 0; i < XE_NUM_HW_ENGINES; i++) 157 if (ss->hwe[i]) { 158 xe_hw_engine_snapshot_free(ss->hwe[i]); 159 ss->hwe[i] = NULL; 160 } 161 162 xe_vm_snapshot_free(ss->vm); 163 ss->vm = NULL; 164 } 165 166 static void xe_devcoredump_deferred_snap_work(struct work_struct *work) 167 { 168 struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); 169 struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); 170 unsigned int fw_ref; 171 172 /* keep going if fw fails as we still want to save the memory and SW data */ 173 fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); 174 if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) 175 xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); 176 xe_vm_snapshot_capture_delayed(ss->vm); 177 xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); 178 xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); 179 180 /* Calculate devcoredump size */ 181 ss->read.size = __xe_devcoredump_read(NULL, INT_MAX, coredump); 182 183 ss->read.buffer = kvmalloc(ss->read.size, GFP_USER); 184 if (!ss->read.buffer) 185 return; 186 187 __xe_devcoredump_read(ss->read.buffer, ss->read.size, coredump); 188 xe_devcoredump_snapshot_free(ss); 189 } 190 191 static ssize_t xe_devcoredump_read(char *buffer, loff_t offset, 192 size_t count, void *data, size_t datalen) 193 { 194 struct xe_devcoredump *coredump = data; 195 struct xe_devcoredump_snapshot *ss; 196 ssize_t byte_copied; 197 198 if (!coredump) 199 return -ENODEV; 200 201 ss = &coredump->snapshot; 202 203 /* Ensure delayed work is captured before continuing */ 204 flush_work(&ss->work); 205 206 if (!ss->read.buffer) 207 return -ENODEV; 208 209 if (offset >= ss->read.size) 210 return 0; 211 212 byte_copied = count < ss->read.size - offset ? count : 213 ss->read.size - offset; 214 memcpy(buffer, ss->read.buffer + offset, byte_copied); 215 216 return byte_copied; 217 } 218 219 static void xe_devcoredump_free(void *data) 220 { 221 struct xe_devcoredump *coredump = data; 222 223 /* Our device is gone. Nothing to do... */ 224 if (!data || !coredump_to_xe(coredump)) 225 return; 226 227 cancel_work_sync(&coredump->snapshot.work); 228 229 xe_devcoredump_snapshot_free(&coredump->snapshot); 230 kvfree(coredump->snapshot.read.buffer); 231 232 /* To prevent stale data on next snapshot, clear everything */ 233 memset(&coredump->snapshot, 0, sizeof(coredump->snapshot)); 234 coredump->captured = false; 235 coredump->job = NULL; 236 drm_info(&coredump_to_xe(coredump)->drm, 237 "Xe device coredump has been deleted.\n"); 238 } 239 240 static void devcoredump_snapshot(struct xe_devcoredump *coredump, 241 struct xe_sched_job *job) 242 { 243 struct xe_devcoredump_snapshot *ss = &coredump->snapshot; 244 struct xe_exec_queue *q = job->q; 245 struct xe_guc *guc = exec_queue_to_guc(q); 246 u32 adj_logical_mask = q->logical_mask; 247 u32 width_mask = (0x1 << q->width) - 1; 248 const char *process_name = "no process"; 249 250 unsigned int fw_ref; 251 bool cookie; 252 int i; 253 254 ss->snapshot_time = ktime_get_real(); 255 ss->boot_time = ktime_get_boottime(); 256 257 if (q->vm && q->vm->xef) 258 process_name = q->vm->xef->process_name; 259 strscpy(ss->process_name, process_name); 260 261 ss->gt = q->gt; 262 coredump->job = job; 263 INIT_WORK(&ss->work, xe_devcoredump_deferred_snap_work); 264 265 cookie = dma_fence_begin_signalling(); 266 for (i = 0; q->width > 1 && i < XE_HW_ENGINE_MAX_INSTANCE;) { 267 if (adj_logical_mask & BIT(i)) { 268 adj_logical_mask |= width_mask << i; 269 i += q->width; 270 } else { 271 ++i; 272 } 273 } 274 275 /* keep going if fw fails as we still want to save the memory and SW data */ 276 fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); 277 278 ss->guc.log = xe_guc_log_snapshot_capture(&guc->log, true); 279 ss->guc.ct = xe_guc_ct_snapshot_capture(&guc->ct); 280 ss->ge = xe_guc_exec_queue_snapshot_capture(q); 281 ss->job = xe_sched_job_snapshot_capture(job); 282 ss->vm = xe_vm_snapshot_capture(q->vm); 283 284 xe_engine_snapshot_capture_for_job(job); 285 286 queue_work(system_unbound_wq, &ss->work); 287 288 xe_force_wake_put(gt_to_fw(q->gt), fw_ref); 289 dma_fence_end_signalling(cookie); 290 } 291 292 /** 293 * xe_devcoredump - Take the required snapshots and initialize coredump device. 294 * @job: The faulty xe_sched_job, where the issue was detected. 295 * 296 * This function should be called at the crash time within the serialized 297 * gt_reset. It is skipped if we still have the core dump device available 298 * with the information of the 'first' snapshot. 299 */ 300 void xe_devcoredump(struct xe_sched_job *job) 301 { 302 struct xe_device *xe = gt_to_xe(job->q->gt); 303 struct xe_devcoredump *coredump = &xe->devcoredump; 304 305 if (coredump->captured) { 306 drm_dbg(&xe->drm, "Multiple hangs are occurring, but only the first snapshot was taken\n"); 307 return; 308 } 309 310 coredump->captured = true; 311 devcoredump_snapshot(coredump, job); 312 313 drm_info(&xe->drm, "Xe device coredump has been created\n"); 314 drm_info(&xe->drm, "Check your /sys/class/drm/card%d/device/devcoredump/data\n", 315 xe->drm.primary->index); 316 317 dev_coredumpm_timeout(xe->drm.dev, THIS_MODULE, coredump, 0, GFP_KERNEL, 318 xe_devcoredump_read, xe_devcoredump_free, 319 XE_COREDUMP_TIMEOUT_JIFFIES); 320 } 321 322 static void xe_driver_devcoredump_fini(void *arg) 323 { 324 struct drm_device *drm = arg; 325 326 dev_coredump_put(drm->dev); 327 } 328 329 int xe_devcoredump_init(struct xe_device *xe) 330 { 331 return devm_add_action_or_reset(xe->drm.dev, xe_driver_devcoredump_fini, &xe->drm); 332 } 333 334 #endif 335 336 /** 337 * xe_print_blob_ascii85 - print a BLOB to some useful location in ASCII85 338 * 339 * The output is split to multiple lines because some print targets, e.g. dmesg 340 * cannot handle arbitrarily long lines. Note also that printing to dmesg in 341 * piece-meal fashion is not possible, each separate call to drm_puts() has a 342 * line-feed automatically added! Therefore, the entire output line must be 343 * constructed in a local buffer first, then printed in one atomic output call. 344 * 345 * There is also a scheduler yield call to prevent the 'task has been stuck for 346 * 120s' kernel hang check feature from firing when printing to a slow target 347 * such as dmesg over a serial port. 348 * 349 * TODO: Add compression prior to the ASCII85 encoding to shrink huge buffers down. 350 * 351 * @p: the printer object to output to 352 * @prefix: optional prefix to add to output string 353 * @blob: the Binary Large OBject to dump out 354 * @offset: offset in bytes to skip from the front of the BLOB, must be a multiple of sizeof(u32) 355 * @size: the size in bytes of the BLOB, must be a multiple of sizeof(u32) 356 */ 357 void xe_print_blob_ascii85(struct drm_printer *p, const char *prefix, 358 const void *blob, size_t offset, size_t size) 359 { 360 const u32 *blob32 = (const u32 *)blob; 361 char buff[ASCII85_BUFSZ], *line_buff; 362 size_t line_pos = 0; 363 364 #define DMESG_MAX_LINE_LEN 800 365 #define MIN_SPACE (ASCII85_BUFSZ + 2) /* 85 + "\n\0" */ 366 367 if (size & 3) 368 drm_printf(p, "Size not word aligned: %zu", size); 369 if (offset & 3) 370 drm_printf(p, "Offset not word aligned: %zu", size); 371 372 line_buff = kzalloc(DMESG_MAX_LINE_LEN, GFP_KERNEL); 373 if (IS_ERR_OR_NULL(line_buff)) { 374 drm_printf(p, "Failed to allocate line buffer: %pe", line_buff); 375 return; 376 } 377 378 blob32 += offset / sizeof(*blob32); 379 size /= sizeof(*blob32); 380 381 if (prefix) { 382 strscpy(line_buff, prefix, DMESG_MAX_LINE_LEN - MIN_SPACE - 2); 383 line_pos = strlen(line_buff); 384 385 line_buff[line_pos++] = ':'; 386 line_buff[line_pos++] = ' '; 387 } 388 389 while (size--) { 390 u32 val = *(blob32++); 391 392 strscpy(line_buff + line_pos, ascii85_encode(val, buff), 393 DMESG_MAX_LINE_LEN - line_pos); 394 line_pos += strlen(line_buff + line_pos); 395 396 if ((line_pos + MIN_SPACE) >= DMESG_MAX_LINE_LEN) { 397 line_buff[line_pos++] = '\n'; 398 line_buff[line_pos++] = 0; 399 400 drm_puts(p, line_buff); 401 402 line_pos = 0; 403 404 /* Prevent 'stuck thread' time out errors */ 405 cond_resched(); 406 } 407 } 408 409 if (line_pos) { 410 line_buff[line_pos++] = '\n'; 411 line_buff[line_pos++] = 0; 412 413 drm_puts(p, line_buff); 414 } 415 416 kfree(line_buff); 417 418 #undef MIN_SPACE 419 #undef DMESG_MAX_LINE_LEN 420 } 421