xref: /linux/drivers/gpu/drm/xe/xe_device.c (revision 9a3e975d6619c6fb8997ca59361768b4ec853565)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_device.h"
7 
8 #include <linux/aperture.h>
9 #include <linux/delay.h>
10 #include <linux/fault-inject.h>
11 #include <linux/iopoll.h>
12 #include <linux/units.h>
13 
14 #include <drm/drm_atomic_helper.h>
15 #include <drm/drm_client.h>
16 #include <drm/drm_gem_ttm_helper.h>
17 #include <drm/drm_ioctl.h>
18 #include <drm/drm_managed.h>
19 #include <drm/drm_print.h>
20 #include <uapi/drm/xe_drm.h>
21 
22 #include "display/xe_display.h"
23 #include "instructions/xe_gpu_commands.h"
24 #include "regs/xe_gt_regs.h"
25 #include "regs/xe_regs.h"
26 #include "xe_bo.h"
27 #include "xe_bo_evict.h"
28 #include "xe_debugfs.h"
29 #include "xe_devcoredump.h"
30 #include "xe_device_sysfs.h"
31 #include "xe_dma_buf.h"
32 #include "xe_drm_client.h"
33 #include "xe_drv.h"
34 #include "xe_exec.h"
35 #include "xe_exec_queue.h"
36 #include "xe_force_wake.h"
37 #include "xe_ggtt.h"
38 #include "xe_gsc_proxy.h"
39 #include "xe_gt.h"
40 #include "xe_gt_mcr.h"
41 #include "xe_gt_printk.h"
42 #include "xe_gt_sriov_vf.h"
43 #include "xe_guc.h"
44 #include "xe_guc_pc.h"
45 #include "xe_hw_engine_group.h"
46 #include "xe_hwmon.h"
47 #include "xe_i2c.h"
48 #include "xe_irq.h"
49 #include "xe_late_bind_fw.h"
50 #include "xe_mmio.h"
51 #include "xe_module.h"
52 #include "xe_nvm.h"
53 #include "xe_oa.h"
54 #include "xe_observation.h"
55 #include "xe_pagefault.h"
56 #include "xe_pat.h"
57 #include "xe_pcode.h"
58 #include "xe_pm.h"
59 #include "xe_pmu.h"
60 #include "xe_psmi.h"
61 #include "xe_pxp.h"
62 #include "xe_query.h"
63 #include "xe_shrinker.h"
64 #include "xe_survivability_mode.h"
65 #include "xe_sriov.h"
66 #include "xe_tile.h"
67 #include "xe_ttm_stolen_mgr.h"
68 #include "xe_ttm_sys_mgr.h"
69 #include "xe_vm.h"
70 #include "xe_vm_madvise.h"
71 #include "xe_vram.h"
72 #include "xe_vram_types.h"
73 #include "xe_vsec.h"
74 #include "xe_wait_user_fence.h"
75 #include "xe_wa.h"
76 
77 #include <generated/xe_device_wa_oob.h>
78 #include <generated/xe_wa_oob.h>
79 
80 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
81 {
82 	struct xe_device *xe = to_xe_device(dev);
83 	struct xe_drm_client *client;
84 	struct xe_file *xef;
85 	int ret = -ENOMEM;
86 	struct task_struct *task = NULL;
87 
88 	xef = kzalloc(sizeof(*xef), GFP_KERNEL);
89 	if (!xef)
90 		return ret;
91 
92 	client = xe_drm_client_alloc();
93 	if (!client) {
94 		kfree(xef);
95 		return ret;
96 	}
97 
98 	xef->drm = file;
99 	xef->client = client;
100 	xef->xe = xe;
101 
102 	mutex_init(&xef->vm.lock);
103 	xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
104 
105 	mutex_init(&xef->exec_queue.lock);
106 	xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
107 
108 	file->driver_priv = xef;
109 	kref_init(&xef->refcount);
110 
111 	task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
112 	if (task) {
113 		xef->process_name = kstrdup(task->comm, GFP_KERNEL);
114 		xef->pid = task->pid;
115 		put_task_struct(task);
116 	}
117 
118 	return 0;
119 }
120 
121 static void xe_file_destroy(struct kref *ref)
122 {
123 	struct xe_file *xef = container_of(ref, struct xe_file, refcount);
124 
125 	xa_destroy(&xef->exec_queue.xa);
126 	mutex_destroy(&xef->exec_queue.lock);
127 	xa_destroy(&xef->vm.xa);
128 	mutex_destroy(&xef->vm.lock);
129 
130 	xe_drm_client_put(xef->client);
131 	kfree(xef->process_name);
132 	kfree(xef);
133 }
134 
135 /**
136  * xe_file_get() - Take a reference to the xe file object
137  * @xef: Pointer to the xe file
138  *
139  * Anyone with a pointer to xef must take a reference to the xe file
140  * object using this call.
141  *
142  * Return: xe file pointer
143  */
144 struct xe_file *xe_file_get(struct xe_file *xef)
145 {
146 	kref_get(&xef->refcount);
147 	return xef;
148 }
149 
150 /**
151  * xe_file_put() - Drop a reference to the xe file object
152  * @xef: Pointer to the xe file
153  *
154  * Used to drop reference to the xef object
155  */
156 void xe_file_put(struct xe_file *xef)
157 {
158 	kref_put(&xef->refcount, xe_file_destroy);
159 }
160 
161 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
162 {
163 	struct xe_device *xe = to_xe_device(dev);
164 	struct xe_file *xef = file->driver_priv;
165 	struct xe_vm *vm;
166 	struct xe_exec_queue *q;
167 	unsigned long idx;
168 
169 	guard(xe_pm_runtime)(xe);
170 
171 	/*
172 	 * No need for exec_queue.lock here as there is no contention for it
173 	 * when FD is closing as IOCTLs presumably can't be modifying the
174 	 * xarray. Taking exec_queue.lock here causes undue dependency on
175 	 * vm->lock taken during xe_exec_queue_kill().
176 	 */
177 	xa_for_each(&xef->exec_queue.xa, idx, q) {
178 		if (q->vm && q->hwe->hw_engine_group)
179 			xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
180 
181 		if (xe_exec_queue_is_multi_queue_primary(q))
182 			xe_exec_queue_group_kill_put(q->multi_queue.group);
183 		else
184 			xe_exec_queue_kill(q);
185 
186 		xe_exec_queue_put(q);
187 	}
188 	xa_for_each(&xef->vm.xa, idx, vm)
189 		xe_vm_close_and_put(vm);
190 
191 	xe_file_put(xef);
192 }
193 
194 static const struct drm_ioctl_desc xe_ioctls[] = {
195 	DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
196 	DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
197 	DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
198 			  DRM_RENDER_ALLOW),
199 	DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
200 	DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
201 	DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
202 	DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
203 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
204 			  DRM_RENDER_ALLOW),
205 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
206 			  DRM_RENDER_ALLOW),
207 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
208 			  DRM_RENDER_ALLOW),
209 	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
210 			  DRM_RENDER_ALLOW),
211 	DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
212 	DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
213 	DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
214 			  DRM_RENDER_ALLOW),
215 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
216 			  DRM_RENDER_ALLOW),
217 };
218 
219 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
220 {
221 	struct drm_file *file_priv = file->private_data;
222 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
223 	long ret;
224 
225 	if (xe_device_wedged(xe))
226 		return -ECANCELED;
227 
228 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
229 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
230 	if (ret >= 0)
231 		ret = drm_ioctl(file, cmd, arg);
232 
233 	return ret;
234 }
235 
236 #ifdef CONFIG_COMPAT
237 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
238 {
239 	struct drm_file *file_priv = file->private_data;
240 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
241 	long ret;
242 
243 	if (xe_device_wedged(xe))
244 		return -ECANCELED;
245 
246 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
247 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
248 	if (ret >= 0)
249 		ret = drm_compat_ioctl(file, cmd, arg);
250 
251 	return ret;
252 }
253 #else
254 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
255 #define xe_drm_compat_ioctl NULL
256 #endif
257 
258 static void barrier_open(struct vm_area_struct *vma)
259 {
260 	drm_dev_get(vma->vm_private_data);
261 }
262 
263 static void barrier_close(struct vm_area_struct *vma)
264 {
265 	drm_dev_put(vma->vm_private_data);
266 }
267 
268 static void barrier_release_dummy_page(struct drm_device *dev, void *res)
269 {
270 	struct page *dummy_page = (struct page *)res;
271 
272 	__free_page(dummy_page);
273 }
274 
275 static vm_fault_t barrier_fault(struct vm_fault *vmf)
276 {
277 	struct drm_device *dev = vmf->vma->vm_private_data;
278 	struct vm_area_struct *vma = vmf->vma;
279 	vm_fault_t ret = VM_FAULT_NOPAGE;
280 	pgprot_t prot;
281 	int idx;
282 
283 	prot = vm_get_page_prot(vma->vm_flags);
284 
285 	if (drm_dev_enter(dev, &idx)) {
286 		unsigned long pfn;
287 
288 #define LAST_DB_PAGE_OFFSET 0x7ff001
289 		pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
290 				LAST_DB_PAGE_OFFSET);
291 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
292 					  pgprot_noncached(prot));
293 		drm_dev_exit(idx);
294 	} else {
295 		struct page *page;
296 
297 		/* Allocate new dummy page to map all the VA range in this VMA to it*/
298 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
299 		if (!page)
300 			return VM_FAULT_OOM;
301 
302 		/* Set the page to be freed using drmm release action */
303 		if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
304 			return VM_FAULT_OOM;
305 
306 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
307 					  prot);
308 	}
309 
310 	return ret;
311 }
312 
313 static const struct vm_operations_struct vm_ops_barrier = {
314 	.open = barrier_open,
315 	.close = barrier_close,
316 	.fault = barrier_fault,
317 };
318 
319 static int xe_pci_barrier_mmap(struct file *filp,
320 			       struct vm_area_struct *vma)
321 {
322 	struct drm_file *priv = filp->private_data;
323 	struct drm_device *dev = priv->minor->dev;
324 	struct xe_device *xe = to_xe_device(dev);
325 
326 	if (!IS_DGFX(xe))
327 		return -EINVAL;
328 
329 	if (vma->vm_end - vma->vm_start > SZ_4K)
330 		return -EINVAL;
331 
332 	if (is_cow_mapping(vma->vm_flags))
333 		return -EINVAL;
334 
335 	if (vma->vm_flags & (VM_READ | VM_EXEC))
336 		return -EINVAL;
337 
338 	vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
339 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
340 	vma->vm_ops = &vm_ops_barrier;
341 	vma->vm_private_data = dev;
342 	drm_dev_get(vma->vm_private_data);
343 
344 	return 0;
345 }
346 
347 static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
348 {
349 	struct drm_file *priv = filp->private_data;
350 	struct drm_device *dev = priv->minor->dev;
351 
352 	if (drm_dev_is_unplugged(dev))
353 		return -ENODEV;
354 
355 	switch (vma->vm_pgoff) {
356 	case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
357 		return xe_pci_barrier_mmap(filp, vma);
358 	}
359 
360 	return drm_gem_mmap(filp, vma);
361 }
362 
363 static const struct file_operations xe_driver_fops = {
364 	.owner = THIS_MODULE,
365 	.open = drm_open,
366 	.release = drm_release_noglobal,
367 	.unlocked_ioctl = xe_drm_ioctl,
368 	.mmap = xe_mmap,
369 	.poll = drm_poll,
370 	.read = drm_read,
371 	.compat_ioctl = xe_drm_compat_ioctl,
372 	.llseek = noop_llseek,
373 #ifdef CONFIG_PROC_FS
374 	.show_fdinfo = drm_show_fdinfo,
375 #endif
376 	.fop_flags = FOP_UNSIGNED_OFFSET,
377 };
378 
379 static struct drm_driver driver = {
380 	/* Don't use MTRRs here; the Xserver or userspace app should
381 	 * deal with them for Intel hardware.
382 	 */
383 	.driver_features =
384 	    DRIVER_GEM |
385 	    DRIVER_RENDER | DRIVER_SYNCOBJ |
386 	    DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
387 	.open = xe_file_open,
388 	.postclose = xe_file_close,
389 
390 	.gem_prime_import = xe_gem_prime_import,
391 
392 	.dumb_create = xe_bo_dumb_create,
393 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
394 #ifdef CONFIG_PROC_FS
395 	.show_fdinfo = xe_drm_client_fdinfo,
396 #endif
397 	.ioctls = xe_ioctls,
398 	.num_ioctls = ARRAY_SIZE(xe_ioctls),
399 	.fops = &xe_driver_fops,
400 	.name = DRIVER_NAME,
401 	.desc = DRIVER_DESC,
402 	.major = DRIVER_MAJOR,
403 	.minor = DRIVER_MINOR,
404 	.patchlevel = DRIVER_PATCHLEVEL,
405 };
406 
407 static void xe_device_destroy(struct drm_device *dev, void *dummy)
408 {
409 	struct xe_device *xe = to_xe_device(dev);
410 
411 	xe_bo_dev_fini(&xe->bo_device);
412 
413 	if (xe->preempt_fence_wq)
414 		destroy_workqueue(xe->preempt_fence_wq);
415 
416 	if (xe->ordered_wq)
417 		destroy_workqueue(xe->ordered_wq);
418 
419 	if (xe->unordered_wq)
420 		destroy_workqueue(xe->unordered_wq);
421 
422 	if (xe->destroy_wq)
423 		destroy_workqueue(xe->destroy_wq);
424 
425 	ttm_device_fini(&xe->ttm);
426 }
427 
428 struct xe_device *xe_device_create(struct pci_dev *pdev,
429 				   const struct pci_device_id *ent)
430 {
431 	struct xe_device *xe;
432 	int err;
433 
434 	xe_display_driver_set_hooks(&driver);
435 
436 	err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
437 	if (err)
438 		return ERR_PTR(err);
439 
440 	xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
441 	if (IS_ERR(xe))
442 		return xe;
443 
444 	err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
445 			      xe->drm.anon_inode->i_mapping,
446 			      xe->drm.vma_offset_manager, 0);
447 	if (WARN_ON(err))
448 		goto err;
449 
450 	xe_bo_dev_init(&xe->bo_device);
451 	err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
452 	if (err)
453 		goto err;
454 
455 	err = xe_shrinker_create(xe);
456 	if (err)
457 		goto err;
458 
459 	xe->info.devid = pdev->device;
460 	xe->info.revid = pdev->revision;
461 	xe->info.force_execlist = xe_modparam.force_execlist;
462 	xe->atomic_svm_timeslice_ms = 5;
463 
464 	err = xe_irq_init(xe);
465 	if (err)
466 		goto err;
467 
468 	xe_validation_device_init(&xe->val);
469 
470 	init_waitqueue_head(&xe->ufence_wq);
471 
472 	init_rwsem(&xe->usm.lock);
473 
474 	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
475 
476 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
477 		/* Trigger a large asid and an early asid wrap. */
478 		u32 asid;
479 
480 		BUILD_BUG_ON(XE_MAX_ASID < 2);
481 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
482 				      XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
483 				      &xe->usm.next_asid, GFP_KERNEL);
484 		drm_WARN_ON(&xe->drm, err);
485 		if (err >= 0)
486 			xa_erase(&xe->usm.asid_to_vm, asid);
487 	}
488 
489 	err = xe_bo_pinned_init(xe);
490 	if (err)
491 		goto err;
492 
493 	xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
494 						       WQ_MEM_RECLAIM);
495 	xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
496 	xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
497 	xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
498 	if (!xe->ordered_wq || !xe->unordered_wq ||
499 	    !xe->preempt_fence_wq || !xe->destroy_wq) {
500 		/*
501 		 * Cleanup done in xe_device_destroy via
502 		 * drmm_add_action_or_reset register above
503 		 */
504 		drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
505 		err = -ENOMEM;
506 		goto err;
507 	}
508 
509 	err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
510 	if (err)
511 		goto err;
512 
513 	return xe;
514 
515 err:
516 	return ERR_PTR(err);
517 }
518 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
519 
520 static bool xe_driver_flr_disabled(struct xe_device *xe)
521 {
522 	if (IS_SRIOV_VF(xe))
523 		return true;
524 
525 	if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
526 		drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
527 		return true;
528 	}
529 
530 	return false;
531 }
532 
533 /*
534  * The driver-initiated FLR is the highest level of reset that we can trigger
535  * from within the driver. It is different from the PCI FLR in that it doesn't
536  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
537  * it doesn't require a re-enumeration of the PCI BARs. However, the
538  * driver-initiated FLR does still cause a reset of both GT and display and a
539  * memory wipe of local and stolen memory, so recovery would require a full HW
540  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
541  * perform the FLR as the very last action before releasing access to the HW
542  * during the driver release flow, we don't attempt recovery at all, because
543  * if/when a new instance of Xe is bound to the device it will do a full
544  * re-init anyway.
545  */
546 static void __xe_driver_flr(struct xe_device *xe)
547 {
548 	const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
549 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
550 	int ret;
551 
552 	drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
553 
554 	/*
555 	 * Make sure any pending FLR requests have cleared by waiting for the
556 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
557 	 * to make sure it's not still set from a prior attempt (it's a write to
558 	 * clear bit).
559 	 * Note that we should never be in a situation where a previous attempt
560 	 * is still pending (unless the HW is totally dead), but better to be
561 	 * safe in case something unexpected happens
562 	 */
563 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
564 	if (ret) {
565 		drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
566 		return;
567 	}
568 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
569 
570 	/* Trigger the actual Driver-FLR */
571 	xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
572 
573 	/* Wait for hardware teardown to complete */
574 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
575 	if (ret) {
576 		drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
577 		return;
578 	}
579 
580 	/* Wait for hardware/firmware re-init to complete */
581 	ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
582 			     flr_timeout, NULL, false);
583 	if (ret) {
584 		drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
585 		return;
586 	}
587 
588 	/* Clear sticky completion status */
589 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
590 }
591 
592 static void xe_driver_flr(struct xe_device *xe)
593 {
594 	if (xe_driver_flr_disabled(xe))
595 		return;
596 
597 	__xe_driver_flr(xe);
598 }
599 
600 static void xe_driver_flr_fini(void *arg)
601 {
602 	struct xe_device *xe = arg;
603 
604 	if (xe->needs_flr_on_fini)
605 		xe_driver_flr(xe);
606 }
607 
608 static void xe_device_sanitize(void *arg)
609 {
610 	struct xe_device *xe = arg;
611 	struct xe_gt *gt;
612 	u8 id;
613 
614 	for_each_gt(gt, xe, id)
615 		xe_gt_sanitize(gt);
616 }
617 
618 static int xe_set_dma_info(struct xe_device *xe)
619 {
620 	unsigned int mask_size = xe->info.dma_mask_size;
621 	int err;
622 
623 	dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
624 
625 	err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
626 	if (err)
627 		goto mask_err;
628 
629 	err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
630 	if (err)
631 		goto mask_err;
632 
633 	return 0;
634 
635 mask_err:
636 	drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
637 	return err;
638 }
639 
640 static int lmem_initializing(struct xe_device *xe)
641 {
642 	if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) & LMEM_INIT)
643 		return 0;
644 
645 	if (signal_pending(current))
646 		return -EINTR;
647 
648 	return 1;
649 }
650 
651 static int wait_for_lmem_ready(struct xe_device *xe)
652 {
653 	const unsigned long TIMEOUT_SEC = 60;
654 	unsigned long prev_jiffies;
655 	int initializing;
656 
657 	if (!IS_DGFX(xe))
658 		return 0;
659 
660 	if (IS_SRIOV_VF(xe))
661 		return 0;
662 
663 	if (!lmem_initializing(xe))
664 		return 0;
665 
666 	drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
667 	prev_jiffies = jiffies;
668 
669 	/*
670 	 * The boot firmware initializes local memory and
671 	 * assesses its health. If memory training fails,
672 	 * the punit will have been instructed to keep the GT powered
673 	 * down.we won't be able to communicate with it
674 	 *
675 	 * If the status check is done before punit updates the register,
676 	 * it can lead to the system being unusable.
677 	 * use a timeout and defer the probe to prevent this.
678 	 */
679 	poll_timeout_us(initializing = lmem_initializing(xe),
680 			initializing <= 0,
681 			20 * USEC_PER_MSEC, TIMEOUT_SEC * USEC_PER_SEC, true);
682 	if (initializing < 0)
683 		return initializing;
684 
685 	if (initializing) {
686 		drm_dbg(&xe->drm, "lmem not initialized by firmware\n");
687 		return -EPROBE_DEFER;
688 	}
689 
690 	drm_dbg(&xe->drm, "lmem ready after %ums",
691 		jiffies_to_msecs(jiffies - prev_jiffies));
692 
693 	return 0;
694 }
695 ALLOW_ERROR_INJECTION(wait_for_lmem_ready, ERRNO); /* See xe_pci_probe() */
696 
697 static void vf_update_device_info(struct xe_device *xe)
698 {
699 	xe_assert(xe, IS_SRIOV_VF(xe));
700 	/* disable features that are not available/applicable to VFs */
701 	xe->info.probe_display = 0;
702 	xe->info.has_heci_cscfi = 0;
703 	xe->info.has_heci_gscfi = 0;
704 	xe->info.has_late_bind = 0;
705 	xe->info.skip_guc_pc = 1;
706 	xe->info.skip_pcode = 1;
707 }
708 
709 static int xe_device_vram_alloc(struct xe_device *xe)
710 {
711 	struct xe_vram_region *vram;
712 
713 	if (!IS_DGFX(xe))
714 		return 0;
715 
716 	vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
717 	if (!vram)
718 		return -ENOMEM;
719 
720 	xe->mem.vram = vram;
721 	return 0;
722 }
723 
724 /**
725  * xe_device_probe_early: Device early probe
726  * @xe: xe device instance
727  *
728  * Initialize MMIO resources that don't require any
729  * knowledge about tile count. Also initialize pcode and
730  * check vram initialization on root tile.
731  *
732  * Return: 0 on success, error code on failure
733  */
734 int xe_device_probe_early(struct xe_device *xe)
735 {
736 	int err;
737 
738 	xe_wa_device_init(xe);
739 	xe_wa_process_device_oob(xe);
740 
741 	err = xe_mmio_probe_early(xe);
742 	if (err)
743 		return err;
744 
745 	xe_sriov_probe_early(xe);
746 
747 	if (IS_SRIOV_VF(xe))
748 		vf_update_device_info(xe);
749 
750 	err = xe_pcode_probe_early(xe);
751 	if (err || xe_survivability_mode_is_requested(xe)) {
752 		int save_err = err;
753 
754 		/*
755 		 * Try to leave device in survivability mode if device is
756 		 * possible, but still return the previous error for error
757 		 * propagation
758 		 */
759 		err = xe_survivability_mode_boot_enable(xe);
760 		if (err)
761 			return err;
762 
763 		return save_err;
764 	}
765 
766 	err = wait_for_lmem_ready(xe);
767 	if (err)
768 		return err;
769 
770 	xe->wedged.mode = xe_modparam.wedged_mode;
771 
772 	err = xe_device_vram_alloc(xe);
773 	if (err)
774 		return err;
775 
776 	return 0;
777 }
778 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
779 
780 static int probe_has_flat_ccs(struct xe_device *xe)
781 {
782 	struct xe_gt *gt;
783 	u32 reg;
784 
785 	/* Always enabled/disabled, no runtime check to do */
786 	if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
787 		return 0;
788 
789 	gt = xe_root_mmio_gt(xe);
790 	if (!gt)
791 		return 0;
792 
793 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
794 	if (!fw_ref.domains)
795 		return -ETIMEDOUT;
796 
797 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
798 	xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
799 
800 	if (!xe->info.has_flat_ccs)
801 		drm_dbg(&xe->drm,
802 			"Flat CCS has been disabled in bios, May lead to performance impact");
803 
804 	return 0;
805 }
806 
807 int xe_device_probe(struct xe_device *xe)
808 {
809 	struct xe_tile *tile;
810 	struct xe_gt *gt;
811 	int err;
812 	u8 id;
813 
814 	xe_pat_init_early(xe);
815 
816 	err = xe_sriov_init(xe);
817 	if (err)
818 		return err;
819 
820 	xe->info.mem_region_mask = 1;
821 
822 	err = xe_set_dma_info(xe);
823 	if (err)
824 		return err;
825 
826 	err = xe_mmio_probe_tiles(xe);
827 	if (err)
828 		return err;
829 
830 	for_each_gt(gt, xe, id) {
831 		err = xe_gt_init_early(gt);
832 		if (err)
833 			return err;
834 	}
835 
836 	for_each_tile(tile, xe, id) {
837 		err = xe_ggtt_init_early(tile->mem.ggtt);
838 		if (err)
839 			return err;
840 	}
841 
842 	/*
843 	 * From here on, if a step fails, make sure a Driver-FLR is triggereed
844 	 */
845 	err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
846 	if (err)
847 		return err;
848 
849 	err = probe_has_flat_ccs(xe);
850 	if (err)
851 		return err;
852 
853 	err = xe_vram_probe(xe);
854 	if (err)
855 		return err;
856 
857 	for_each_tile(tile, xe, id) {
858 		err = xe_tile_init_noalloc(tile);
859 		if (err)
860 			return err;
861 	}
862 
863 	/*
864 	 * Allow allocations only now to ensure xe_display_init_early()
865 	 * is the first to allocate, always.
866 	 */
867 	err = xe_ttm_sys_mgr_init(xe);
868 	if (err)
869 		return err;
870 
871 	/* Allocate and map stolen after potential VRAM resize */
872 	err = xe_ttm_stolen_mgr_init(xe);
873 	if (err)
874 		return err;
875 
876 	/*
877 	 * Now that GT is initialized (TTM in particular),
878 	 * we can try to init display, and inherit the initial fb.
879 	 * This is the reason the first allocation needs to be done
880 	 * inside display.
881 	 */
882 	err = xe_display_init_early(xe);
883 	if (err)
884 		return err;
885 
886 	for_each_tile(tile, xe, id) {
887 		err = xe_tile_init(tile);
888 		if (err)
889 			return err;
890 	}
891 
892 	err = xe_irq_install(xe);
893 	if (err)
894 		return err;
895 
896 	for_each_gt(gt, xe, id) {
897 		err = xe_gt_init(gt);
898 		if (err)
899 			return err;
900 	}
901 
902 	err = xe_pagefault_init(xe);
903 	if (err)
904 		return err;
905 
906 	if (xe->tiles->media_gt &&
907 	    XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
908 		XE_DEVICE_WA_DISABLE(xe, 15015404425);
909 
910 	err = xe_devcoredump_init(xe);
911 	if (err)
912 		return err;
913 
914 	xe_nvm_init(xe);
915 
916 	err = xe_heci_gsc_init(xe);
917 	if (err)
918 		return err;
919 
920 	err = xe_late_bind_init(&xe->late_bind);
921 	if (err)
922 		return err;
923 
924 	err = xe_oa_init(xe);
925 	if (err)
926 		return err;
927 
928 	err = xe_display_init(xe);
929 	if (err)
930 		return err;
931 
932 	err = xe_pxp_init(xe);
933 	if (err)
934 		return err;
935 
936 	err = xe_psmi_init(xe);
937 	if (err)
938 		return err;
939 
940 	err = drm_dev_register(&xe->drm, 0);
941 	if (err)
942 		return err;
943 
944 	xe_display_register(xe);
945 
946 	err = xe_oa_register(xe);
947 	if (err)
948 		goto err_unregister_display;
949 
950 	err = xe_pmu_register(&xe->pmu);
951 	if (err)
952 		goto err_unregister_display;
953 
954 	err = xe_device_sysfs_init(xe);
955 	if (err)
956 		goto err_unregister_display;
957 
958 	xe_debugfs_register(xe);
959 
960 	err = xe_hwmon_register(xe);
961 	if (err)
962 		goto err_unregister_display;
963 
964 	err = xe_i2c_probe(xe);
965 	if (err)
966 		goto err_unregister_display;
967 
968 	for_each_gt(gt, xe, id)
969 		xe_gt_sanitize_freq(gt);
970 
971 	xe_vsec_init(xe);
972 
973 	err = xe_sriov_init_late(xe);
974 	if (err)
975 		goto err_unregister_display;
976 
977 	return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
978 
979 err_unregister_display:
980 	xe_display_unregister(xe);
981 
982 	return err;
983 }
984 
985 void xe_device_remove(struct xe_device *xe)
986 {
987 	xe_display_unregister(xe);
988 
989 	xe_nvm_fini(xe);
990 
991 	drm_dev_unplug(&xe->drm);
992 
993 	xe_bo_pci_dev_remove_all(xe);
994 }
995 
996 void xe_device_shutdown(struct xe_device *xe)
997 {
998 	struct xe_gt *gt;
999 	u8 id;
1000 
1001 	drm_dbg(&xe->drm, "Shutting down device\n");
1002 
1003 	xe_display_pm_shutdown(xe);
1004 
1005 	xe_irq_suspend(xe);
1006 
1007 	for_each_gt(gt, xe, id)
1008 		xe_gt_shutdown(gt);
1009 
1010 	xe_display_pm_shutdown_late(xe);
1011 
1012 	if (!xe_driver_flr_disabled(xe)) {
1013 		/* BOOM! */
1014 		__xe_driver_flr(xe);
1015 	}
1016 }
1017 
1018 /**
1019  * xe_device_wmb() - Device specific write memory barrier
1020  * @xe: the &xe_device
1021  *
1022  * While wmb() is sufficient for a barrier if we use system memory, on discrete
1023  * platforms with device memory we additionally need to issue a register write.
1024  * Since it doesn't matter which register we write to, use the read-only VF_CAP
1025  * register that is also marked as accessible by the VFs.
1026  */
1027 void xe_device_wmb(struct xe_device *xe)
1028 {
1029 	wmb();
1030 	if (IS_DGFX(xe))
1031 		xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
1032 }
1033 
1034 /*
1035  * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
1036  */
1037 static void tdf_request_sync(struct xe_device *xe)
1038 {
1039 	struct xe_gt *gt;
1040 	u8 id;
1041 
1042 	for_each_gt(gt, xe, id) {
1043 		if (xe_gt_is_media_type(gt))
1044 			continue;
1045 
1046 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1047 		if (!fw_ref.domains)
1048 			return;
1049 
1050 		xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
1051 
1052 		/*
1053 		 * FIXME: We can likely do better here with our choice of
1054 		 * timeout. Currently we just assume the worst case, i.e. 150us,
1055 		 * which is believed to be sufficient to cover the worst case
1056 		 * scenario on current platforms if all cache entries are
1057 		 * transient and need to be flushed..
1058 		 */
1059 		if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
1060 				   150, NULL, false))
1061 			xe_gt_err_once(gt, "TD flush timeout\n");
1062 	}
1063 }
1064 
1065 void xe_device_l2_flush(struct xe_device *xe)
1066 {
1067 	struct xe_gt *gt;
1068 
1069 	gt = xe_root_mmio_gt(xe);
1070 	if (!gt)
1071 		return;
1072 
1073 	if (!XE_GT_WA(gt, 16023588340))
1074 		return;
1075 
1076 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1077 	if (!fw_ref.domains)
1078 		return;
1079 
1080 	spin_lock(&gt->global_invl_lock);
1081 
1082 	xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
1083 	if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))
1084 		xe_gt_err_once(gt, "Global invalidation timeout\n");
1085 
1086 	spin_unlock(&gt->global_invl_lock);
1087 }
1088 
1089 /**
1090  * xe_device_td_flush() - Flush transient L3 cache entries
1091  * @xe: The device
1092  *
1093  * Display engine has direct access to memory and is never coherent with L3/L4
1094  * caches (or CPU caches), however KMD is responsible for specifically flushing
1095  * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
1096  * can happen from such a surface without seeing corruption.
1097  *
1098  * Display surfaces can be tagged as transient by mapping it using one of the
1099  * various L3:XD PAT index modes on Xe2.
1100  *
1101  * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1102  * at the end of each submission via PIPE_CONTROL for compute/render, since SA
1103  * Media is not coherent with L3 and we want to support render-vs-media
1104  * usescases. For other engines like copy/blt the HW internally forces uncached
1105  * behaviour, hence why we can skip the TDF on such platforms.
1106  */
1107 void xe_device_td_flush(struct xe_device *xe)
1108 {
1109 	struct xe_gt *root_gt;
1110 
1111 	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
1112 		return;
1113 
1114 	root_gt = xe_root_mmio_gt(xe);
1115 	if (!root_gt)
1116 		return;
1117 
1118 	if (XE_GT_WA(root_gt, 16023588340)) {
1119 		/* A transient flush is not sufficient: flush the L2 */
1120 		xe_device_l2_flush(xe);
1121 	} else {
1122 		xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc);
1123 		tdf_request_sync(xe);
1124 		xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc);
1125 	}
1126 }
1127 
1128 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
1129 {
1130 	return xe_device_has_flat_ccs(xe) ?
1131 		DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
1132 }
1133 
1134 /**
1135  * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1136  * @xe: xe device instance
1137  *
1138  * To be used before any kind of memory access. It will splat a debug warning
1139  * if the device is currently sleeping. But it doesn't guarantee in any way
1140  * that the device is going to remain awake. Xe PM runtime get and put
1141  * functions might be added to the outer bound of the memory access, while
1142  * this check is intended for inner usage to splat some warning if the worst
1143  * case has just happened.
1144  */
1145 void xe_device_assert_mem_access(struct xe_device *xe)
1146 {
1147 	xe_assert(xe, !xe_pm_runtime_suspended(xe));
1148 }
1149 
1150 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
1151 {
1152 	struct xe_gt *gt;
1153 	u8 id;
1154 
1155 	drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
1156 	drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
1157 
1158 	for_each_gt(gt, xe, id) {
1159 		drm_printf(p, "GT id: %u\n", id);
1160 		drm_printf(p, "\tTile: %u\n", gt->tile->id);
1161 		drm_printf(p, "\tType: %s\n",
1162 			   gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
1163 		drm_printf(p, "\tIP ver: %u.%u.%u\n",
1164 			   REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
1165 			   REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
1166 			   REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
1167 		drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
1168 	}
1169 }
1170 
1171 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
1172 {
1173 	return sign_extend64(address, xe->info.va_bits - 1);
1174 }
1175 
1176 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
1177 {
1178 	return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
1179 }
1180 
1181 static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
1182 {
1183 	struct xe_device *xe = arg;
1184 
1185 	xe_pm_runtime_put(xe);
1186 }
1187 
1188 /**
1189  * DOC: Xe Device Wedging
1190  *
1191  * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1192  * When device is in wedged state, every IOCTL will be blocked and GT cannot be
1193  * used. Certain critical errors like gt reset failure, firmware failures can cause
1194  * the device to be wedged. The default recovery method for a wedged state
1195  * is rebind/bus-reset.
1196  *
1197  * Another recovery method is vendor-specific. Below are the cases that send
1198  * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1199  *
1200  * Case: Firmware Flash
1201  * --------------------
1202  *
1203  * Identification Hint
1204  * +++++++++++++++++++
1205  *
1206  * ``WEDGED=vendor-specific`` drm device wedged uevent with
1207  * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1208  * admin/userspace consumer about the need for a firmware flash.
1209  *
1210  * Recovery Procedure
1211  * ++++++++++++++++++
1212  *
1213  * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1214  * the below steps
1215  *
1216  * - Check Runtime Survivability mode sysfs.
1217  *   If enabled, firmware flash is required to recover the device.
1218  *
1219  *   /sys/bus/pci/devices/<device>/survivability_mode
1220  *
1221  * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
1222  *   firmware and restore device to normal operation.
1223  */
1224 
1225 /**
1226  * xe_device_set_wedged_method - Set wedged recovery method
1227  * @xe: xe device instance
1228  * @method: recovery method to set
1229  *
1230  * Set wedged recovery method to be sent in drm wedged uevent.
1231  */
1232 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
1233 {
1234 	xe->wedged.method = method;
1235 }
1236 
1237 /**
1238  * xe_device_declare_wedged - Declare device wedged
1239  * @xe: xe device instance
1240  *
1241  * This is a final state that can only be cleared with the recovery method
1242  * specified in the drm wedged uevent. The method can be set using
1243  * xe_device_set_wedged_method before declaring the device as wedged. If no method
1244  * is set, reprobe (unbind/re-bind) will be sent by default.
1245  *
1246  * In this state every IOCTL will be blocked so the GT cannot be used.
1247  * In general it will be called upon any critical error such as gt reset
1248  * failure or guc loading failure. Userspace will be notified of this state
1249  * through device wedged uevent.
1250  * If xe.wedged module parameter is set to 2, this function will be called
1251  * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
1252  * snapshot capture. In this mode, GT reset won't be attempted so the state of
1253  * the issue is preserved for further debugging.
1254  */
1255 void xe_device_declare_wedged(struct xe_device *xe)
1256 {
1257 	struct xe_gt *gt;
1258 	u8 id;
1259 
1260 	if (xe->wedged.mode == 0) {
1261 		drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
1262 		return;
1263 	}
1264 
1265 	xe_pm_runtime_get_noresume(xe);
1266 
1267 	if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
1268 		drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
1269 		return;
1270 	}
1271 
1272 	if (!atomic_xchg(&xe->wedged.flag, 1)) {
1273 		xe->needs_flr_on_fini = true;
1274 		drm_err(&xe->drm,
1275 			"CRITICAL: Xe has declared device %s as wedged.\n"
1276 			"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
1277 			"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
1278 			dev_name(xe->drm.dev));
1279 	}
1280 
1281 	for_each_gt(gt, xe, id)
1282 		xe_gt_declare_wedged(gt);
1283 
1284 	if (xe_device_wedged(xe)) {
1285 		/* If no wedge recovery method is set, use default */
1286 		if (!xe->wedged.method)
1287 			xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
1288 						    DRM_WEDGE_RECOVERY_BUS_RESET);
1289 
1290 		/* Notify userspace of wedged device */
1291 		drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
1292 	}
1293 }
1294