xref: /linux/drivers/gpu/drm/xe/xe_device.c (revision 939faf71cf7ca9ab3d1bd2912ac0e203d4d7156a)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_device.h"
7 
8 #include <linux/aperture.h>
9 #include <linux/delay.h>
10 #include <linux/fault-inject.h>
11 #include <linux/units.h>
12 
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client.h>
15 #include <drm/drm_gem_ttm_helper.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_pagemap_util.h>
19 #include <drm/drm_print.h>
20 #include <uapi/drm/xe_drm.h>
21 
22 #include "display/xe_display.h"
23 #include "instructions/xe_gpu_commands.h"
24 #include "regs/xe_gt_regs.h"
25 #include "regs/xe_regs.h"
26 #include "xe_bo.h"
27 #include "xe_bo_evict.h"
28 #include "xe_debugfs.h"
29 #include "xe_devcoredump.h"
30 #include "xe_device_sysfs.h"
31 #include "xe_dma_buf.h"
32 #include "xe_drm_client.h"
33 #include "xe_drv.h"
34 #include "xe_exec.h"
35 #include "xe_exec_queue.h"
36 #include "xe_force_wake.h"
37 #include "xe_ggtt.h"
38 #include "xe_gt.h"
39 #include "xe_gt_mcr.h"
40 #include "xe_gt_printk.h"
41 #include "xe_gt_sriov_vf.h"
42 #include "xe_guc.h"
43 #include "xe_guc_pc.h"
44 #include "xe_hw_engine_group.h"
45 #include "xe_hwmon.h"
46 #include "xe_i2c.h"
47 #include "xe_irq.h"
48 #include "xe_late_bind_fw.h"
49 #include "xe_mmio.h"
50 #include "xe_module.h"
51 #include "xe_nvm.h"
52 #include "xe_oa.h"
53 #include "xe_observation.h"
54 #include "xe_pagefault.h"
55 #include "xe_pat.h"
56 #include "xe_pcode.h"
57 #include "xe_pm.h"
58 #include "xe_pmu.h"
59 #include "xe_psmi.h"
60 #include "xe_pxp.h"
61 #include "xe_query.h"
62 #include "xe_shrinker.h"
63 #include "xe_soc_remapper.h"
64 #include "xe_survivability_mode.h"
65 #include "xe_sriov.h"
66 #include "xe_svm.h"
67 #include "xe_tile.h"
68 #include "xe_ttm_stolen_mgr.h"
69 #include "xe_ttm_sys_mgr.h"
70 #include "xe_vm.h"
71 #include "xe_vm_madvise.h"
72 #include "xe_vram.h"
73 #include "xe_vram_types.h"
74 #include "xe_vsec.h"
75 #include "xe_wait_user_fence.h"
76 #include "xe_wa.h"
77 
78 #include <generated/xe_device_wa_oob.h>
79 #include <generated/xe_wa_oob.h>
80 
81 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
82 {
83 	struct xe_device *xe = to_xe_device(dev);
84 	struct xe_drm_client *client;
85 	struct xe_file *xef;
86 	int ret = -ENOMEM;
87 	struct task_struct *task = NULL;
88 
89 	xef = kzalloc(sizeof(*xef), GFP_KERNEL);
90 	if (!xef)
91 		return ret;
92 
93 	client = xe_drm_client_alloc();
94 	if (!client) {
95 		kfree(xef);
96 		return ret;
97 	}
98 
99 	xef->drm = file;
100 	xef->client = client;
101 	xef->xe = xe;
102 
103 	mutex_init(&xef->vm.lock);
104 	xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
105 
106 	mutex_init(&xef->exec_queue.lock);
107 	xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
108 
109 	file->driver_priv = xef;
110 	kref_init(&xef->refcount);
111 
112 	task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
113 	if (task) {
114 		xef->process_name = kstrdup(task->comm, GFP_KERNEL);
115 		xef->pid = task->pid;
116 		put_task_struct(task);
117 	}
118 
119 	return 0;
120 }
121 
122 static void xe_file_destroy(struct kref *ref)
123 {
124 	struct xe_file *xef = container_of(ref, struct xe_file, refcount);
125 
126 	xa_destroy(&xef->exec_queue.xa);
127 	mutex_destroy(&xef->exec_queue.lock);
128 	xa_destroy(&xef->vm.xa);
129 	mutex_destroy(&xef->vm.lock);
130 
131 	xe_drm_client_put(xef->client);
132 	kfree(xef->process_name);
133 	kfree(xef);
134 }
135 
136 /**
137  * xe_file_get() - Take a reference to the xe file object
138  * @xef: Pointer to the xe file
139  *
140  * Anyone with a pointer to xef must take a reference to the xe file
141  * object using this call.
142  *
143  * Return: xe file pointer
144  */
145 struct xe_file *xe_file_get(struct xe_file *xef)
146 {
147 	kref_get(&xef->refcount);
148 	return xef;
149 }
150 
151 /**
152  * xe_file_put() - Drop a reference to the xe file object
153  * @xef: Pointer to the xe file
154  *
155  * Used to drop reference to the xef object
156  */
157 void xe_file_put(struct xe_file *xef)
158 {
159 	kref_put(&xef->refcount, xe_file_destroy);
160 }
161 
162 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
163 {
164 	struct xe_device *xe = to_xe_device(dev);
165 	struct xe_file *xef = file->driver_priv;
166 	struct xe_vm *vm;
167 	struct xe_exec_queue *q;
168 	unsigned long idx;
169 
170 	guard(xe_pm_runtime)(xe);
171 
172 	/*
173 	 * No need for exec_queue.lock here as there is no contention for it
174 	 * when FD is closing as IOCTLs presumably can't be modifying the
175 	 * xarray. Taking exec_queue.lock here causes undue dependency on
176 	 * vm->lock taken during xe_exec_queue_kill().
177 	 */
178 	xa_for_each(&xef->exec_queue.xa, idx, q) {
179 		if (q->vm && q->hwe->hw_engine_group)
180 			xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
181 		xe_exec_queue_kill(q);
182 		xe_exec_queue_put(q);
183 	}
184 	xa_for_each(&xef->vm.xa, idx, vm)
185 		xe_vm_close_and_put(vm);
186 
187 	xe_file_put(xef);
188 }
189 
190 static const struct drm_ioctl_desc xe_ioctls[] = {
191 	DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
192 	DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
193 	DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
194 			  DRM_RENDER_ALLOW),
195 	DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
196 	DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
197 	DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
198 	DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
199 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
200 			  DRM_RENDER_ALLOW),
201 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
202 			  DRM_RENDER_ALLOW),
203 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
204 			  DRM_RENDER_ALLOW),
205 	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
206 			  DRM_RENDER_ALLOW),
207 	DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
208 	DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
209 	DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
210 			  DRM_RENDER_ALLOW),
211 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
212 			  DRM_RENDER_ALLOW),
213 };
214 
215 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
216 {
217 	struct drm_file *file_priv = file->private_data;
218 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
219 	long ret;
220 
221 	if (xe_device_wedged(xe))
222 		return -ECANCELED;
223 
224 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
225 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
226 	if (ret >= 0)
227 		ret = drm_ioctl(file, cmd, arg);
228 
229 	return ret;
230 }
231 
232 #ifdef CONFIG_COMPAT
233 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
234 {
235 	struct drm_file *file_priv = file->private_data;
236 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
237 	long ret;
238 
239 	if (xe_device_wedged(xe))
240 		return -ECANCELED;
241 
242 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
243 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
244 	if (ret >= 0)
245 		ret = drm_compat_ioctl(file, cmd, arg);
246 
247 	return ret;
248 }
249 #else
250 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
251 #define xe_drm_compat_ioctl NULL
252 #endif
253 
254 static void barrier_open(struct vm_area_struct *vma)
255 {
256 	drm_dev_get(vma->vm_private_data);
257 }
258 
259 static void barrier_close(struct vm_area_struct *vma)
260 {
261 	drm_dev_put(vma->vm_private_data);
262 }
263 
264 static void barrier_release_dummy_page(struct drm_device *dev, void *res)
265 {
266 	struct page *dummy_page = (struct page *)res;
267 
268 	__free_page(dummy_page);
269 }
270 
271 static vm_fault_t barrier_fault(struct vm_fault *vmf)
272 {
273 	struct drm_device *dev = vmf->vma->vm_private_data;
274 	struct vm_area_struct *vma = vmf->vma;
275 	vm_fault_t ret = VM_FAULT_NOPAGE;
276 	pgprot_t prot;
277 	int idx;
278 
279 	prot = vm_get_page_prot(vma->vm_flags);
280 
281 	if (drm_dev_enter(dev, &idx)) {
282 		unsigned long pfn;
283 
284 #define LAST_DB_PAGE_OFFSET 0x7ff001
285 		pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
286 				LAST_DB_PAGE_OFFSET);
287 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
288 					  pgprot_noncached(prot));
289 		drm_dev_exit(idx);
290 	} else {
291 		struct page *page;
292 
293 		/* Allocate new dummy page to map all the VA range in this VMA to it*/
294 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
295 		if (!page)
296 			return VM_FAULT_OOM;
297 
298 		/* Set the page to be freed using drmm release action */
299 		if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
300 			return VM_FAULT_OOM;
301 
302 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
303 					  prot);
304 	}
305 
306 	return ret;
307 }
308 
309 static const struct vm_operations_struct vm_ops_barrier = {
310 	.open = barrier_open,
311 	.close = barrier_close,
312 	.fault = barrier_fault,
313 };
314 
315 static int xe_pci_barrier_mmap(struct file *filp,
316 			       struct vm_area_struct *vma)
317 {
318 	struct drm_file *priv = filp->private_data;
319 	struct drm_device *dev = priv->minor->dev;
320 	struct xe_device *xe = to_xe_device(dev);
321 
322 	if (!IS_DGFX(xe))
323 		return -EINVAL;
324 
325 	if (vma->vm_end - vma->vm_start > SZ_4K)
326 		return -EINVAL;
327 
328 	if (is_cow_mapping(vma->vm_flags))
329 		return -EINVAL;
330 
331 	if (vma->vm_flags & (VM_READ | VM_EXEC))
332 		return -EINVAL;
333 
334 	vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
335 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
336 	vma->vm_ops = &vm_ops_barrier;
337 	vma->vm_private_data = dev;
338 	drm_dev_get(vma->vm_private_data);
339 
340 	return 0;
341 }
342 
343 static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
344 {
345 	struct drm_file *priv = filp->private_data;
346 	struct drm_device *dev = priv->minor->dev;
347 
348 	if (drm_dev_is_unplugged(dev))
349 		return -ENODEV;
350 
351 	switch (vma->vm_pgoff) {
352 	case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
353 		return xe_pci_barrier_mmap(filp, vma);
354 	}
355 
356 	return drm_gem_mmap(filp, vma);
357 }
358 
359 static const struct file_operations xe_driver_fops = {
360 	.owner = THIS_MODULE,
361 	.open = drm_open,
362 	.release = drm_release_noglobal,
363 	.unlocked_ioctl = xe_drm_ioctl,
364 	.mmap = xe_mmap,
365 	.poll = drm_poll,
366 	.read = drm_read,
367 	.compat_ioctl = xe_drm_compat_ioctl,
368 	.llseek = noop_llseek,
369 #ifdef CONFIG_PROC_FS
370 	.show_fdinfo = drm_show_fdinfo,
371 #endif
372 	.fop_flags = FOP_UNSIGNED_OFFSET,
373 };
374 
375 /**
376  * xe_is_xe_file() - Is the file an xe device file?
377  * @file: The file.
378  *
379  * Checks whether the file is opened against
380  * an xe device.
381  *
382  * Return: %true if an xe file, %false if not.
383  */
384 bool xe_is_xe_file(const struct file *file)
385 {
386 	return file->f_op == &xe_driver_fops;
387 }
388 
389 static struct drm_driver driver = {
390 	/* Don't use MTRRs here; the Xserver or userspace app should
391 	 * deal with them for Intel hardware.
392 	 */
393 	.driver_features =
394 	    DRIVER_GEM |
395 	    DRIVER_RENDER | DRIVER_SYNCOBJ |
396 	    DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
397 	.open = xe_file_open,
398 	.postclose = xe_file_close,
399 
400 	.gem_prime_import = xe_gem_prime_import,
401 
402 	.dumb_create = xe_bo_dumb_create,
403 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
404 #ifdef CONFIG_PROC_FS
405 	.show_fdinfo = xe_drm_client_fdinfo,
406 #endif
407 	.ioctls = xe_ioctls,
408 	.num_ioctls = ARRAY_SIZE(xe_ioctls),
409 	.fops = &xe_driver_fops,
410 	.name = DRIVER_NAME,
411 	.desc = DRIVER_DESC,
412 	.major = DRIVER_MAJOR,
413 	.minor = DRIVER_MINOR,
414 	.patchlevel = DRIVER_PATCHLEVEL,
415 };
416 
417 static void xe_device_destroy(struct drm_device *dev, void *dummy)
418 {
419 	struct xe_device *xe = to_xe_device(dev);
420 
421 	xe_bo_dev_fini(&xe->bo_device);
422 
423 	if (xe->preempt_fence_wq)
424 		destroy_workqueue(xe->preempt_fence_wq);
425 
426 	if (xe->ordered_wq)
427 		destroy_workqueue(xe->ordered_wq);
428 
429 	if (xe->unordered_wq)
430 		destroy_workqueue(xe->unordered_wq);
431 
432 	if (xe->destroy_wq)
433 		destroy_workqueue(xe->destroy_wq);
434 
435 	ttm_device_fini(&xe->ttm);
436 }
437 
438 struct xe_device *xe_device_create(struct pci_dev *pdev,
439 				   const struct pci_device_id *ent)
440 {
441 	struct xe_device *xe;
442 	int err;
443 
444 	xe_display_driver_set_hooks(&driver);
445 
446 	err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
447 	if (err)
448 		return ERR_PTR(err);
449 
450 	xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
451 	if (IS_ERR(xe))
452 		return xe;
453 
454 	err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
455 			      xe->drm.anon_inode->i_mapping,
456 			      xe->drm.vma_offset_manager, 0);
457 	if (WARN_ON(err))
458 		goto err;
459 
460 	xe_bo_dev_init(&xe->bo_device);
461 	err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
462 	if (err)
463 		goto err;
464 
465 	err = xe_shrinker_create(xe);
466 	if (err)
467 		goto err;
468 
469 	xe->info.devid = pdev->device;
470 	xe->info.revid = pdev->revision;
471 	xe->info.force_execlist = xe_modparam.force_execlist;
472 	xe->atomic_svm_timeslice_ms = 5;
473 	xe->min_run_period_lr_ms = 5;
474 
475 	err = xe_irq_init(xe);
476 	if (err)
477 		goto err;
478 
479 	xe_validation_device_init(&xe->val);
480 
481 	init_waitqueue_head(&xe->ufence_wq);
482 
483 	init_rwsem(&xe->usm.lock);
484 
485 	err = xe_pagemap_shrinker_create(xe);
486 	if (err)
487 		goto err;
488 
489 	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
490 
491 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
492 		/* Trigger a large asid and an early asid wrap. */
493 		u32 asid;
494 
495 		BUILD_BUG_ON(XE_MAX_ASID < 2);
496 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
497 				      XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
498 				      &xe->usm.next_asid, GFP_KERNEL);
499 		drm_WARN_ON(&xe->drm, err);
500 		if (err >= 0)
501 			xa_erase(&xe->usm.asid_to_vm, asid);
502 	}
503 
504 	err = xe_bo_pinned_init(xe);
505 	if (err)
506 		goto err;
507 
508 	xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
509 						       WQ_MEM_RECLAIM);
510 	xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
511 	xe->unordered_wq = alloc_workqueue("xe-unordered-wq", 0, 0);
512 	xe->destroy_wq = alloc_workqueue("xe-destroy-wq", 0, 0);
513 	if (!xe->ordered_wq || !xe->unordered_wq ||
514 	    !xe->preempt_fence_wq || !xe->destroy_wq) {
515 		/*
516 		 * Cleanup done in xe_device_destroy via
517 		 * drmm_add_action_or_reset register above
518 		 */
519 		drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
520 		err = -ENOMEM;
521 		goto err;
522 	}
523 
524 	err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
525 	if (err)
526 		goto err;
527 
528 	return xe;
529 
530 err:
531 	return ERR_PTR(err);
532 }
533 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
534 
535 static bool xe_driver_flr_disabled(struct xe_device *xe)
536 {
537 	if (IS_SRIOV_VF(xe))
538 		return true;
539 
540 	if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
541 		drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
542 		return true;
543 	}
544 
545 	return false;
546 }
547 
548 /*
549  * The driver-initiated FLR is the highest level of reset that we can trigger
550  * from within the driver. It is different from the PCI FLR in that it doesn't
551  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
552  * it doesn't require a re-enumeration of the PCI BARs. However, the
553  * driver-initiated FLR does still cause a reset of both GT and display and a
554  * memory wipe of local and stolen memory, so recovery would require a full HW
555  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
556  * perform the FLR as the very last action before releasing access to the HW
557  * during the driver release flow, we don't attempt recovery at all, because
558  * if/when a new instance of Xe is bound to the device it will do a full
559  * re-init anyway.
560  */
561 static void __xe_driver_flr(struct xe_device *xe)
562 {
563 	const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
564 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
565 	int ret;
566 
567 	drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
568 
569 	/*
570 	 * Make sure any pending FLR requests have cleared by waiting for the
571 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
572 	 * to make sure it's not still set from a prior attempt (it's a write to
573 	 * clear bit).
574 	 * Note that we should never be in a situation where a previous attempt
575 	 * is still pending (unless the HW is totally dead), but better to be
576 	 * safe in case something unexpected happens
577 	 */
578 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
579 	if (ret) {
580 		drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
581 		return;
582 	}
583 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
584 
585 	/* Trigger the actual Driver-FLR */
586 	xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
587 
588 	/* Wait for hardware teardown to complete */
589 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
590 	if (ret) {
591 		drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
592 		return;
593 	}
594 
595 	/* Wait for hardware/firmware re-init to complete */
596 	ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
597 			     flr_timeout, NULL, false);
598 	if (ret) {
599 		drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
600 		return;
601 	}
602 
603 	/* Clear sticky completion status */
604 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
605 }
606 
607 static void xe_driver_flr(struct xe_device *xe)
608 {
609 	if (xe_driver_flr_disabled(xe))
610 		return;
611 
612 	__xe_driver_flr(xe);
613 }
614 
615 static void xe_driver_flr_fini(void *arg)
616 {
617 	struct xe_device *xe = arg;
618 
619 	if (xe->needs_flr_on_fini)
620 		xe_driver_flr(xe);
621 }
622 
623 static void xe_device_sanitize(void *arg)
624 {
625 	struct xe_device *xe = arg;
626 	struct xe_gt *gt;
627 	u8 id;
628 
629 	for_each_gt(gt, xe, id)
630 		xe_gt_sanitize(gt);
631 }
632 
633 static int xe_set_dma_info(struct xe_device *xe)
634 {
635 	unsigned int mask_size = xe->info.dma_mask_size;
636 	int err;
637 
638 	dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
639 
640 	err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
641 	if (err)
642 		goto mask_err;
643 
644 	err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
645 	if (err)
646 		goto mask_err;
647 
648 	return 0;
649 
650 mask_err:
651 	drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
652 	return err;
653 }
654 
655 static void assert_lmem_ready(struct xe_device *xe)
656 {
657 	if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
658 		return;
659 
660 	xe_assert(xe, xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) &
661 		  LMEM_INIT);
662 }
663 
664 static void vf_update_device_info(struct xe_device *xe)
665 {
666 	xe_assert(xe, IS_SRIOV_VF(xe));
667 	/* disable features that are not available/applicable to VFs */
668 	xe->info.probe_display = 0;
669 	xe->info.has_heci_cscfi = 0;
670 	xe->info.has_heci_gscfi = 0;
671 	xe->info.has_late_bind = 0;
672 	xe->info.skip_guc_pc = 1;
673 	xe->info.skip_pcode = 1;
674 }
675 
676 static int xe_device_vram_alloc(struct xe_device *xe)
677 {
678 	struct xe_vram_region *vram;
679 
680 	if (!IS_DGFX(xe))
681 		return 0;
682 
683 	vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
684 	if (!vram)
685 		return -ENOMEM;
686 
687 	xe->mem.vram = vram;
688 	return 0;
689 }
690 
691 /**
692  * xe_device_probe_early: Device early probe
693  * @xe: xe device instance
694  *
695  * Initialize MMIO resources that don't require any
696  * knowledge about tile count. Also initialize pcode and
697  * check vram initialization on root tile.
698  *
699  * Return: 0 on success, error code on failure
700  */
701 int xe_device_probe_early(struct xe_device *xe)
702 {
703 	int err;
704 
705 	xe_wa_device_init(xe);
706 	xe_wa_process_device_oob(xe);
707 
708 	err = xe_mmio_probe_early(xe);
709 	if (err)
710 		return err;
711 
712 	xe_sriov_probe_early(xe);
713 
714 	if (IS_SRIOV_VF(xe))
715 		vf_update_device_info(xe);
716 
717 	/*
718 	 * Check for pcode uncore_init status to confirm if the SoC
719 	 * initialization is complete. Until done, any MMIO or lmem access from
720 	 * the driver will be blocked
721 	 */
722 	err = xe_pcode_probe_early(xe);
723 	if (err || xe_survivability_mode_is_requested(xe)) {
724 		int save_err = err;
725 
726 		/*
727 		 * Try to leave device in survivability mode if device is
728 		 * possible, but still return the previous error for error
729 		 * propagation
730 		 */
731 		err = xe_survivability_mode_boot_enable(xe);
732 		if (err)
733 			return err;
734 
735 		return save_err;
736 	}
737 
738 	/*
739 	 * Make sure the lmem is initialized and ready to use. xe_pcode_ready()
740 	 * is flagged after full initialization is complete. Assert if lmem is
741 	 * not initialized.
742 	 */
743 	assert_lmem_ready(xe);
744 
745 	xe->wedged.mode = xe_device_validate_wedged_mode(xe, xe_modparam.wedged_mode) ?
746 			  XE_WEDGED_MODE_DEFAULT : xe_modparam.wedged_mode;
747 	drm_dbg(&xe->drm, "wedged_mode: setting mode (%u) %s\n",
748 		xe->wedged.mode, xe_wedged_mode_to_string(xe->wedged.mode));
749 
750 	err = xe_device_vram_alloc(xe);
751 	if (err)
752 		return err;
753 
754 	return 0;
755 }
756 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
757 
758 static int probe_has_flat_ccs(struct xe_device *xe)
759 {
760 	struct xe_gt *gt;
761 	u32 reg;
762 
763 	/* Always enabled/disabled, no runtime check to do */
764 	if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
765 		return 0;
766 
767 	gt = xe_root_mmio_gt(xe);
768 	if (!gt)
769 		return 0;
770 
771 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
772 	if (!fw_ref.domains)
773 		return -ETIMEDOUT;
774 
775 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
776 	xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
777 
778 	if (!xe->info.has_flat_ccs)
779 		drm_dbg(&xe->drm,
780 			"Flat CCS has been disabled in bios, May lead to performance impact");
781 
782 	return 0;
783 }
784 
785 /*
786  * Detect if the driver is being run on pre-production hardware.  We don't
787  * keep workarounds for pre-production hardware long term, so print an
788  * error and add taint if we're being loaded on a pre-production platform
789  * for which the pre-prod workarounds have already been removed.
790  *
791  * The general policy is that we'll remove any workarounds that only apply to
792  * pre-production hardware around the time force_probe restrictions are lifted
793  * for a platform of the next major IP generation (for example, Xe2 pre-prod
794  * workarounds should be removed around the time the first Xe3 platforms have
795  * force_probe lifted).
796  */
797 static void detect_preproduction_hw(struct xe_device *xe)
798 {
799 	struct xe_gt *gt;
800 	int id;
801 
802 	/*
803 	 * SR-IOV VFs don't have access to the FUSE2 register, so we can't
804 	 * check pre-production status there.  But the host OS will notice
805 	 * and report the pre-production status, which should be enough to
806 	 * help us catch mistaken use of pre-production hardware.
807 	 */
808 	if (IS_SRIOV_VF(xe))
809 		return;
810 
811 	/*
812 	 * The "SW_CAP" fuse contains a bit indicating whether the device is a
813 	 * production or pre-production device.  This fuse is reflected through
814 	 * the GT "FUSE2" register, even though the contents of the fuse are
815 	 * not GT-specific.  Every GT's reflection of this fuse should show the
816 	 * same value, so we'll just use the first available GT for lookup.
817 	 */
818 	for_each_gt(gt, xe, id)
819 		break;
820 
821 	if (!gt)
822 		return;
823 
824 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
825 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT)) {
826 		xe_gt_err(gt, "Forcewake failure; cannot determine production/pre-production hw status.\n");
827 		return;
828 	}
829 
830 	if (xe_mmio_read32(&gt->mmio, FUSE2) & PRODUCTION_HW)
831 		return;
832 
833 	xe_info(xe, "Pre-production hardware detected.\n");
834 	if (!xe->info.has_pre_prod_wa) {
835 		xe_err(xe, "Pre-production workarounds for this platform have already been removed.\n");
836 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
837 	}
838 }
839 
840 int xe_device_probe(struct xe_device *xe)
841 {
842 	struct xe_tile *tile;
843 	struct xe_gt *gt;
844 	int err;
845 	u8 id;
846 
847 	xe_pat_init_early(xe);
848 
849 	err = xe_sriov_init(xe);
850 	if (err)
851 		return err;
852 
853 	xe->info.mem_region_mask = 1;
854 
855 	err = xe_set_dma_info(xe);
856 	if (err)
857 		return err;
858 
859 	err = xe_mmio_probe_tiles(xe);
860 	if (err)
861 		return err;
862 
863 	for_each_gt(gt, xe, id) {
864 		err = xe_gt_init_early(gt);
865 		if (err)
866 			return err;
867 	}
868 
869 	for_each_tile(tile, xe, id) {
870 		err = xe_ggtt_init_early(tile->mem.ggtt);
871 		if (err)
872 			return err;
873 	}
874 
875 	/*
876 	 * From here on, if a step fails, make sure a Driver-FLR is triggereed
877 	 */
878 	err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
879 	if (err)
880 		return err;
881 
882 	err = probe_has_flat_ccs(xe);
883 	if (err)
884 		return err;
885 
886 	err = xe_vram_probe(xe);
887 	if (err)
888 		return err;
889 
890 	for_each_tile(tile, xe, id) {
891 		err = xe_tile_init_noalloc(tile);
892 		if (err)
893 			return err;
894 	}
895 
896 	/*
897 	 * Allow allocations only now to ensure xe_display_init_early()
898 	 * is the first to allocate, always.
899 	 */
900 	err = xe_ttm_sys_mgr_init(xe);
901 	if (err)
902 		return err;
903 
904 	/* Allocate and map stolen after potential VRAM resize */
905 	err = xe_ttm_stolen_mgr_init(xe);
906 	if (err)
907 		return err;
908 
909 	/*
910 	 * Now that GT is initialized (TTM in particular),
911 	 * we can try to init display, and inherit the initial fb.
912 	 * This is the reason the first allocation needs to be done
913 	 * inside display.
914 	 */
915 	err = xe_display_init_early(xe);
916 	if (err)
917 		return err;
918 
919 	for_each_tile(tile, xe, id) {
920 		err = xe_tile_init(tile);
921 		if (err)
922 			return err;
923 	}
924 
925 	err = xe_irq_install(xe);
926 	if (err)
927 		return err;
928 
929 	for_each_gt(gt, xe, id) {
930 		err = xe_gt_init(gt);
931 		if (err)
932 			return err;
933 	}
934 
935 	err = xe_pagefault_init(xe);
936 	if (err)
937 		return err;
938 
939 	if (xe->tiles->media_gt &&
940 	    XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
941 		XE_DEVICE_WA_DISABLE(xe, 15015404425);
942 
943 	err = xe_devcoredump_init(xe);
944 	if (err)
945 		return err;
946 
947 	xe_nvm_init(xe);
948 
949 	err = xe_soc_remapper_init(xe);
950 	if (err)
951 		return err;
952 
953 	err = xe_heci_gsc_init(xe);
954 	if (err)
955 		return err;
956 
957 	err = xe_late_bind_init(&xe->late_bind);
958 	if (err)
959 		return err;
960 
961 	err = xe_oa_init(xe);
962 	if (err)
963 		return err;
964 
965 	err = xe_display_init(xe);
966 	if (err)
967 		return err;
968 
969 	err = xe_pxp_init(xe);
970 	if (err)
971 		return err;
972 
973 	err = xe_psmi_init(xe);
974 	if (err)
975 		return err;
976 
977 	err = drm_dev_register(&xe->drm, 0);
978 	if (err)
979 		return err;
980 
981 	xe_display_register(xe);
982 
983 	err = xe_oa_register(xe);
984 	if (err)
985 		goto err_unregister_display;
986 
987 	err = xe_pmu_register(&xe->pmu);
988 	if (err)
989 		goto err_unregister_display;
990 
991 	err = xe_device_sysfs_init(xe);
992 	if (err)
993 		goto err_unregister_display;
994 
995 	xe_debugfs_register(xe);
996 
997 	err = xe_hwmon_register(xe);
998 	if (err)
999 		goto err_unregister_display;
1000 
1001 	err = xe_i2c_probe(xe);
1002 	if (err)
1003 		goto err_unregister_display;
1004 
1005 	for_each_gt(gt, xe, id)
1006 		xe_gt_sanitize_freq(gt);
1007 
1008 	xe_vsec_init(xe);
1009 
1010 	err = xe_sriov_init_late(xe);
1011 	if (err)
1012 		goto err_unregister_display;
1013 
1014 	detect_preproduction_hw(xe);
1015 
1016 	return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
1017 
1018 err_unregister_display:
1019 	xe_display_unregister(xe);
1020 	drm_dev_unregister(&xe->drm);
1021 
1022 	return err;
1023 }
1024 
1025 void xe_device_remove(struct xe_device *xe)
1026 {
1027 	xe_display_unregister(xe);
1028 
1029 	drm_dev_unplug(&xe->drm);
1030 
1031 	xe_bo_pci_dev_remove_all(xe);
1032 }
1033 
1034 void xe_device_shutdown(struct xe_device *xe)
1035 {
1036 	struct xe_gt *gt;
1037 	u8 id;
1038 
1039 	drm_dbg(&xe->drm, "Shutting down device\n");
1040 
1041 	xe_display_pm_shutdown(xe);
1042 
1043 	xe_irq_suspend(xe);
1044 
1045 	for_each_gt(gt, xe, id)
1046 		xe_gt_shutdown(gt);
1047 
1048 	xe_display_pm_shutdown_late(xe);
1049 
1050 	if (!xe_driver_flr_disabled(xe)) {
1051 		/* BOOM! */
1052 		__xe_driver_flr(xe);
1053 	}
1054 }
1055 
1056 /**
1057  * xe_device_wmb() - Device specific write memory barrier
1058  * @xe: the &xe_device
1059  *
1060  * While wmb() is sufficient for a barrier if we use system memory, on discrete
1061  * platforms with device memory we additionally need to issue a register write.
1062  * Since it doesn't matter which register we write to, use the read-only VF_CAP
1063  * register that is also marked as accessible by the VFs.
1064  */
1065 void xe_device_wmb(struct xe_device *xe)
1066 {
1067 	wmb();
1068 	if (IS_DGFX(xe))
1069 		xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
1070 }
1071 
1072 /*
1073  * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
1074  */
1075 static void tdf_request_sync(struct xe_device *xe)
1076 {
1077 	struct xe_gt *gt;
1078 	u8 id;
1079 
1080 	for_each_gt(gt, xe, id) {
1081 		if (xe_gt_is_media_type(gt))
1082 			continue;
1083 
1084 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1085 		if (!fw_ref.domains)
1086 			return;
1087 
1088 		xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
1089 
1090 		/*
1091 		 * FIXME: We can likely do better here with our choice of
1092 		 * timeout. Currently we just assume the worst case, i.e. 150us,
1093 		 * which is believed to be sufficient to cover the worst case
1094 		 * scenario on current platforms if all cache entries are
1095 		 * transient and need to be flushed..
1096 		 */
1097 		if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
1098 				   300, NULL, false))
1099 			xe_gt_err_once(gt, "TD flush timeout\n");
1100 	}
1101 }
1102 
1103 void xe_device_l2_flush(struct xe_device *xe)
1104 {
1105 	struct xe_gt *gt;
1106 
1107 	gt = xe_root_mmio_gt(xe);
1108 	if (!gt)
1109 		return;
1110 
1111 	if (!XE_GT_WA(gt, 16023588340))
1112 		return;
1113 
1114 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1115 	if (!fw_ref.domains)
1116 		return;
1117 
1118 	spin_lock(&gt->global_invl_lock);
1119 
1120 	xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
1121 	if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))
1122 		xe_gt_err_once(gt, "Global invalidation timeout\n");
1123 
1124 	spin_unlock(&gt->global_invl_lock);
1125 }
1126 
1127 /**
1128  * xe_device_td_flush() - Flush transient L3 cache entries
1129  * @xe: The device
1130  *
1131  * Display engine has direct access to memory and is never coherent with L3/L4
1132  * caches (or CPU caches), however KMD is responsible for specifically flushing
1133  * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
1134  * can happen from such a surface without seeing corruption.
1135  *
1136  * Display surfaces can be tagged as transient by mapping it using one of the
1137  * various L3:XD PAT index modes on Xe2.
1138  *
1139  * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1140  * at the end of each submission via PIPE_CONTROL for compute/render, since SA
1141  * Media is not coherent with L3 and we want to support render-vs-media
1142  * usescases. For other engines like copy/blt the HW internally forces uncached
1143  * behaviour, hence why we can skip the TDF on such platforms.
1144  */
1145 void xe_device_td_flush(struct xe_device *xe)
1146 {
1147 	struct xe_gt *root_gt;
1148 
1149 	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
1150 		return;
1151 
1152 	root_gt = xe_root_mmio_gt(xe);
1153 	if (!root_gt)
1154 		return;
1155 
1156 	if (XE_GT_WA(root_gt, 16023588340)) {
1157 		/* A transient flush is not sufficient: flush the L2 */
1158 		xe_device_l2_flush(xe);
1159 	} else {
1160 		xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc);
1161 		tdf_request_sync(xe);
1162 		xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc);
1163 	}
1164 }
1165 
1166 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
1167 {
1168 	return xe_device_has_flat_ccs(xe) ?
1169 		DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
1170 }
1171 
1172 /**
1173  * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1174  * @xe: xe device instance
1175  *
1176  * To be used before any kind of memory access. It will splat a debug warning
1177  * if the device is currently sleeping. But it doesn't guarantee in any way
1178  * that the device is going to remain awake. Xe PM runtime get and put
1179  * functions might be added to the outer bound of the memory access, while
1180  * this check is intended for inner usage to splat some warning if the worst
1181  * case has just happened.
1182  */
1183 void xe_device_assert_mem_access(struct xe_device *xe)
1184 {
1185 	xe_assert(xe, !xe_pm_runtime_suspended(xe));
1186 }
1187 
1188 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
1189 {
1190 	struct xe_gt *gt;
1191 	u8 id;
1192 
1193 	drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
1194 	drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
1195 
1196 	for_each_gt(gt, xe, id) {
1197 		drm_printf(p, "GT id: %u\n", id);
1198 		drm_printf(p, "\tTile: %u\n", gt->tile->id);
1199 		drm_printf(p, "\tType: %s\n",
1200 			   gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
1201 		drm_printf(p, "\tIP ver: %u.%u.%u\n",
1202 			   REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
1203 			   REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
1204 			   REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
1205 		drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
1206 	}
1207 }
1208 
1209 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
1210 {
1211 	return sign_extend64(address, xe->info.va_bits - 1);
1212 }
1213 
1214 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
1215 {
1216 	return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
1217 }
1218 
1219 static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
1220 {
1221 	struct xe_device *xe = arg;
1222 
1223 	xe_pm_runtime_put(xe);
1224 }
1225 
1226 /**
1227  * DOC: Xe Device Wedging
1228  *
1229  * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1230  * When device is in wedged state, every IOCTL will be blocked and GT cannot
1231  * be used. The conditions under which the driver declares the device wedged
1232  * depend on the wedged mode configuration (see &enum xe_wedged_mode). The
1233  * default recovery method for a wedged state is rebind/bus-reset.
1234  *
1235  * Another recovery method is vendor-specific. Below are the cases that send
1236  * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1237  *
1238  * Case: Firmware Flash
1239  * --------------------
1240  *
1241  * Identification Hint
1242  * +++++++++++++++++++
1243  *
1244  * ``WEDGED=vendor-specific`` drm device wedged uevent with
1245  * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1246  * admin/userspace consumer about the need for a firmware flash.
1247  *
1248  * Recovery Procedure
1249  * ++++++++++++++++++
1250  *
1251  * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1252  * the below steps
1253  *
1254  * - Check Runtime Survivability mode sysfs.
1255  *   If enabled, firmware flash is required to recover the device.
1256  *
1257  *   /sys/bus/pci/devices/<device>/survivability_mode
1258  *
1259  * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
1260  *   firmware and restore device to normal operation.
1261  */
1262 
1263 /**
1264  * xe_device_set_wedged_method - Set wedged recovery method
1265  * @xe: xe device instance
1266  * @method: recovery method to set
1267  *
1268  * Set wedged recovery method to be sent in drm wedged uevent.
1269  */
1270 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
1271 {
1272 	xe->wedged.method = method;
1273 }
1274 
1275 /**
1276  * xe_device_declare_wedged - Declare device wedged
1277  * @xe: xe device instance
1278  *
1279  * This is a final state that can only be cleared with the recovery method
1280  * specified in the drm wedged uevent. The method can be set using
1281  * xe_device_set_wedged_method before declaring the device as wedged. If no method
1282  * is set, reprobe (unbind/re-bind) will be sent by default.
1283  *
1284  * In this state every IOCTL will be blocked so the GT cannot be used.
1285  * In general it will be called upon any critical error such as gt reset
1286  * failure or guc loading failure. Userspace will be notified of this state
1287  * through device wedged uevent.
1288  * If xe.wedged module parameter is set to 2, this function will be called
1289  * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
1290  * snapshot capture. In this mode, GT reset won't be attempted so the state of
1291  * the issue is preserved for further debugging.
1292  */
1293 void xe_device_declare_wedged(struct xe_device *xe)
1294 {
1295 	struct xe_gt *gt;
1296 	u8 id;
1297 
1298 	if (xe->wedged.mode == XE_WEDGED_MODE_NEVER) {
1299 		drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
1300 		return;
1301 	}
1302 
1303 	xe_pm_runtime_get_noresume(xe);
1304 
1305 	if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
1306 		drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
1307 		return;
1308 	}
1309 
1310 	if (!atomic_xchg(&xe->wedged.flag, 1)) {
1311 		xe->needs_flr_on_fini = true;
1312 		drm_err(&xe->drm,
1313 			"CRITICAL: Xe has declared device %s as wedged.\n"
1314 			"IOCTLs and executions are blocked. Only a rebind may clear the failure\n"
1315 			"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
1316 			dev_name(xe->drm.dev));
1317 	}
1318 
1319 	for_each_gt(gt, xe, id)
1320 		xe_gt_declare_wedged(gt);
1321 
1322 	if (xe_device_wedged(xe)) {
1323 		/* If no wedge recovery method is set, use default */
1324 		if (!xe->wedged.method)
1325 			xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
1326 						    DRM_WEDGE_RECOVERY_BUS_RESET);
1327 
1328 		/* Notify userspace of wedged device */
1329 		drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
1330 	}
1331 }
1332 
1333 /**
1334  * xe_device_validate_wedged_mode - Check if given mode is supported
1335  * @xe: the &xe_device
1336  * @mode: requested mode to validate
1337  *
1338  * Check whether the provided wedged mode is supported.
1339  *
1340  * Return: 0 if mode is supported, error code otherwise.
1341  */
1342 int xe_device_validate_wedged_mode(struct xe_device *xe, unsigned int mode)
1343 {
1344 	if (mode > XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) {
1345 		drm_dbg(&xe->drm, "wedged_mode: invalid value (%u)\n", mode);
1346 		return -EINVAL;
1347 	} else if (mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET && (IS_SRIOV_VF(xe) ||
1348 		   (IS_SRIOV_PF(xe) && !IS_ENABLED(CONFIG_DRM_XE_DEBUG)))) {
1349 		drm_dbg(&xe->drm, "wedged_mode: (%u) %s mode is not supported for %s\n",
1350 			mode, xe_wedged_mode_to_string(mode),
1351 			xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
1352 		return -EPERM;
1353 	}
1354 
1355 	return 0;
1356 }
1357 
1358 /**
1359  * xe_wedged_mode_to_string - Convert enum value to string.
1360  * @mode: the &xe_wedged_mode to convert
1361  *
1362  * Returns: wedged mode as a user friendly string.
1363  */
1364 const char *xe_wedged_mode_to_string(enum xe_wedged_mode mode)
1365 {
1366 	switch (mode) {
1367 	case XE_WEDGED_MODE_NEVER:
1368 		return "never";
1369 	case XE_WEDGED_MODE_UPON_CRITICAL_ERROR:
1370 		return "upon-critical-error";
1371 	case XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET:
1372 		return "upon-any-hang-no-reset";
1373 	default:
1374 		return "<invalid>";
1375 	}
1376 }
1377