xref: /linux/drivers/gpu/drm/xe/xe_device.c (revision 38f7e5450ebfc6f2e046a249a3f629ea7bec8c31)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2021 Intel Corporation
4  */
5 
6 #include "xe_device.h"
7 
8 #include <linux/aperture.h>
9 #include <linux/delay.h>
10 #include <linux/fault-inject.h>
11 #include <linux/units.h>
12 
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client.h>
15 #include <drm/drm_gem_ttm_helper.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_pagemap_util.h>
19 #include <drm/drm_print.h>
20 #include <uapi/drm/xe_drm.h>
21 
22 #include "display/xe_display.h"
23 #include "instructions/xe_gpu_commands.h"
24 #include "regs/xe_gt_regs.h"
25 #include "regs/xe_regs.h"
26 #include "xe_bo.h"
27 #include "xe_bo_evict.h"
28 #include "xe_debugfs.h"
29 #include "xe_defaults.h"
30 #include "xe_devcoredump.h"
31 #include "xe_device_sysfs.h"
32 #include "xe_dma_buf.h"
33 #include "xe_drm_client.h"
34 #include "xe_drv.h"
35 #include "xe_exec.h"
36 #include "xe_exec_queue.h"
37 #include "xe_force_wake.h"
38 #include "xe_ggtt.h"
39 #include "xe_gt.h"
40 #include "xe_gt_mcr.h"
41 #include "xe_gt_printk.h"
42 #include "xe_gt_sriov_vf.h"
43 #include "xe_guc.h"
44 #include "xe_guc_pc.h"
45 #include "xe_hw_engine_group.h"
46 #include "xe_hwmon.h"
47 #include "xe_i2c.h"
48 #include "xe_irq.h"
49 #include "xe_late_bind_fw.h"
50 #include "xe_mmio.h"
51 #include "xe_module.h"
52 #include "xe_nvm.h"
53 #include "xe_oa.h"
54 #include "xe_observation.h"
55 #include "xe_pagefault.h"
56 #include "xe_pat.h"
57 #include "xe_pcode.h"
58 #include "xe_pm.h"
59 #include "xe_pmu.h"
60 #include "xe_psmi.h"
61 #include "xe_pxp.h"
62 #include "xe_query.h"
63 #include "xe_shrinker.h"
64 #include "xe_soc_remapper.h"
65 #include "xe_survivability_mode.h"
66 #include "xe_sriov.h"
67 #include "xe_svm.h"
68 #include "xe_tile.h"
69 #include "xe_ttm_stolen_mgr.h"
70 #include "xe_ttm_sys_mgr.h"
71 #include "xe_vm.h"
72 #include "xe_vm_madvise.h"
73 #include "xe_vram.h"
74 #include "xe_vram_types.h"
75 #include "xe_vsec.h"
76 #include "xe_wait_user_fence.h"
77 #include "xe_wa.h"
78 
79 #include <generated/xe_device_wa_oob.h>
80 #include <generated/xe_wa_oob.h>
81 
82 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
83 {
84 	struct xe_device *xe = to_xe_device(dev);
85 	struct xe_drm_client *client;
86 	struct xe_file *xef;
87 	int ret = -ENOMEM;
88 	struct task_struct *task = NULL;
89 
90 	xef = kzalloc_obj(*xef);
91 	if (!xef)
92 		return ret;
93 
94 	client = xe_drm_client_alloc();
95 	if (!client) {
96 		kfree(xef);
97 		return ret;
98 	}
99 
100 	xef->drm = file;
101 	xef->client = client;
102 	xef->xe = xe;
103 
104 	mutex_init(&xef->vm.lock);
105 	xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
106 
107 	mutex_init(&xef->exec_queue.lock);
108 	xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
109 
110 	file->driver_priv = xef;
111 	kref_init(&xef->refcount);
112 
113 	task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
114 	if (task) {
115 		xef->process_name = kstrdup(task->comm, GFP_KERNEL);
116 		xef->pid = task->pid;
117 		put_task_struct(task);
118 	}
119 
120 	return 0;
121 }
122 
123 static void xe_file_destroy(struct kref *ref)
124 {
125 	struct xe_file *xef = container_of(ref, struct xe_file, refcount);
126 
127 	xa_destroy(&xef->exec_queue.xa);
128 	mutex_destroy(&xef->exec_queue.lock);
129 	xa_destroy(&xef->vm.xa);
130 	mutex_destroy(&xef->vm.lock);
131 
132 	xe_drm_client_put(xef->client);
133 	kfree(xef->process_name);
134 	kfree(xef);
135 }
136 
137 /**
138  * xe_file_get() - Take a reference to the xe file object
139  * @xef: Pointer to the xe file
140  *
141  * Anyone with a pointer to xef must take a reference to the xe file
142  * object using this call.
143  *
144  * Return: xe file pointer
145  */
146 struct xe_file *xe_file_get(struct xe_file *xef)
147 {
148 	kref_get(&xef->refcount);
149 	return xef;
150 }
151 
152 /**
153  * xe_file_put() - Drop a reference to the xe file object
154  * @xef: Pointer to the xe file
155  *
156  * Used to drop reference to the xef object
157  */
158 void xe_file_put(struct xe_file *xef)
159 {
160 	kref_put(&xef->refcount, xe_file_destroy);
161 }
162 
163 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
164 {
165 	struct xe_device *xe = to_xe_device(dev);
166 	struct xe_file *xef = file->driver_priv;
167 	struct xe_vm *vm;
168 	struct xe_exec_queue *q;
169 	unsigned long idx;
170 
171 	guard(xe_pm_runtime)(xe);
172 
173 	/*
174 	 * No need for exec_queue.lock here as there is no contention for it
175 	 * when FD is closing as IOCTLs presumably can't be modifying the
176 	 * xarray. Taking exec_queue.lock here causes undue dependency on
177 	 * vm->lock taken during xe_exec_queue_kill().
178 	 */
179 	xa_for_each(&xef->exec_queue.xa, idx, q) {
180 		if (q->vm && q->hwe->hw_engine_group)
181 			xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
182 		xe_exec_queue_kill(q);
183 		xe_exec_queue_put(q);
184 	}
185 	xa_for_each(&xef->vm.xa, idx, vm)
186 		xe_vm_close_and_put(vm);
187 
188 	xe_file_put(xef);
189 }
190 
191 static const struct drm_ioctl_desc xe_ioctls[] = {
192 	DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
193 	DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
194 	DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
195 			  DRM_RENDER_ALLOW),
196 	DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
197 	DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
198 	DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
199 	DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
200 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
201 			  DRM_RENDER_ALLOW),
202 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
203 			  DRM_RENDER_ALLOW),
204 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
205 			  DRM_RENDER_ALLOW),
206 	DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
207 			  DRM_RENDER_ALLOW),
208 	DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
209 	DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
210 	DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
211 			  DRM_RENDER_ALLOW),
212 	DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
213 			  DRM_RENDER_ALLOW),
214 };
215 
216 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
217 {
218 	struct drm_file *file_priv = file->private_data;
219 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
220 	long ret;
221 
222 	if (xe_device_wedged(xe))
223 		return -ECANCELED;
224 
225 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
226 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
227 	if (ret >= 0)
228 		ret = drm_ioctl(file, cmd, arg);
229 
230 	return ret;
231 }
232 
233 #ifdef CONFIG_COMPAT
234 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
235 {
236 	struct drm_file *file_priv = file->private_data;
237 	struct xe_device *xe = to_xe_device(file_priv->minor->dev);
238 	long ret;
239 
240 	if (xe_device_wedged(xe))
241 		return -ECANCELED;
242 
243 	ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
244 	ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
245 	if (ret >= 0)
246 		ret = drm_compat_ioctl(file, cmd, arg);
247 
248 	return ret;
249 }
250 #else
251 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
252 #define xe_drm_compat_ioctl NULL
253 #endif
254 
255 static void barrier_open(struct vm_area_struct *vma)
256 {
257 	drm_dev_get(vma->vm_private_data);
258 }
259 
260 static void barrier_close(struct vm_area_struct *vma)
261 {
262 	drm_dev_put(vma->vm_private_data);
263 }
264 
265 static void barrier_release_dummy_page(struct drm_device *dev, void *res)
266 {
267 	struct page *dummy_page = (struct page *)res;
268 
269 	__free_page(dummy_page);
270 }
271 
272 static vm_fault_t barrier_fault(struct vm_fault *vmf)
273 {
274 	struct drm_device *dev = vmf->vma->vm_private_data;
275 	struct vm_area_struct *vma = vmf->vma;
276 	vm_fault_t ret = VM_FAULT_NOPAGE;
277 	pgprot_t prot;
278 	int idx;
279 
280 	prot = vm_get_page_prot(vma->vm_flags);
281 
282 	if (drm_dev_enter(dev, &idx)) {
283 		unsigned long pfn;
284 
285 #define LAST_DB_PAGE_OFFSET 0x7ff001
286 		pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
287 				LAST_DB_PAGE_OFFSET);
288 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
289 					  pgprot_noncached(prot));
290 		drm_dev_exit(idx);
291 	} else {
292 		struct page *page;
293 
294 		/* Allocate new dummy page to map all the VA range in this VMA to it*/
295 		page = alloc_page(GFP_KERNEL | __GFP_ZERO);
296 		if (!page)
297 			return VM_FAULT_OOM;
298 
299 		/* Set the page to be freed using drmm release action */
300 		if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
301 			return VM_FAULT_OOM;
302 
303 		ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
304 					  prot);
305 	}
306 
307 	return ret;
308 }
309 
310 static const struct vm_operations_struct vm_ops_barrier = {
311 	.open = barrier_open,
312 	.close = barrier_close,
313 	.fault = barrier_fault,
314 };
315 
316 static int xe_pci_barrier_mmap(struct file *filp,
317 			       struct vm_area_struct *vma)
318 {
319 	struct drm_file *priv = filp->private_data;
320 	struct drm_device *dev = priv->minor->dev;
321 	struct xe_device *xe = to_xe_device(dev);
322 
323 	if (!IS_DGFX(xe))
324 		return -EINVAL;
325 
326 	if (vma->vm_end - vma->vm_start > SZ_4K)
327 		return -EINVAL;
328 
329 	if (is_cow_mapping(vma->vm_flags))
330 		return -EINVAL;
331 
332 	if (vma->vm_flags & (VM_READ | VM_EXEC))
333 		return -EINVAL;
334 
335 	vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
336 	vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
337 	vma->vm_ops = &vm_ops_barrier;
338 	vma->vm_private_data = dev;
339 	drm_dev_get(vma->vm_private_data);
340 
341 	return 0;
342 }
343 
344 static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
345 {
346 	struct drm_file *priv = filp->private_data;
347 	struct drm_device *dev = priv->minor->dev;
348 
349 	if (drm_dev_is_unplugged(dev))
350 		return -ENODEV;
351 
352 	switch (vma->vm_pgoff) {
353 	case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
354 		return xe_pci_barrier_mmap(filp, vma);
355 	}
356 
357 	return drm_gem_mmap(filp, vma);
358 }
359 
360 static const struct file_operations xe_driver_fops = {
361 	.owner = THIS_MODULE,
362 	.open = drm_open,
363 	.release = drm_release_noglobal,
364 	.unlocked_ioctl = xe_drm_ioctl,
365 	.mmap = xe_mmap,
366 	.poll = drm_poll,
367 	.read = drm_read,
368 	.compat_ioctl = xe_drm_compat_ioctl,
369 	.llseek = noop_llseek,
370 #ifdef CONFIG_PROC_FS
371 	.show_fdinfo = drm_show_fdinfo,
372 #endif
373 	.fop_flags = FOP_UNSIGNED_OFFSET,
374 };
375 
376 /**
377  * xe_is_xe_file() - Is the file an xe device file?
378  * @file: The file.
379  *
380  * Checks whether the file is opened against
381  * an xe device.
382  *
383  * Return: %true if an xe file, %false if not.
384  */
385 bool xe_is_xe_file(const struct file *file)
386 {
387 	return file->f_op == &xe_driver_fops;
388 }
389 
390 static struct drm_driver driver = {
391 	/* Don't use MTRRs here; the Xserver or userspace app should
392 	 * deal with them for Intel hardware.
393 	 */
394 	.driver_features =
395 	    DRIVER_GEM |
396 	    DRIVER_RENDER | DRIVER_SYNCOBJ |
397 	    DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
398 	.open = xe_file_open,
399 	.postclose = xe_file_close,
400 
401 	.gem_prime_import = xe_gem_prime_import,
402 
403 	.dumb_create = xe_bo_dumb_create,
404 	.dumb_map_offset = drm_gem_ttm_dumb_map_offset,
405 #ifdef CONFIG_PROC_FS
406 	.show_fdinfo = xe_drm_client_fdinfo,
407 #endif
408 	.ioctls = xe_ioctls,
409 	.num_ioctls = ARRAY_SIZE(xe_ioctls),
410 	.fops = &xe_driver_fops,
411 	.name = DRIVER_NAME,
412 	.desc = DRIVER_DESC,
413 	.major = DRIVER_MAJOR,
414 	.minor = DRIVER_MINOR,
415 	.patchlevel = DRIVER_PATCHLEVEL,
416 };
417 
418 static void xe_device_destroy(struct drm_device *dev, void *dummy)
419 {
420 	struct xe_device *xe = to_xe_device(dev);
421 
422 	xe_bo_dev_fini(&xe->bo_device);
423 
424 	if (xe->preempt_fence_wq)
425 		destroy_workqueue(xe->preempt_fence_wq);
426 
427 	if (xe->ordered_wq)
428 		destroy_workqueue(xe->ordered_wq);
429 
430 	if (xe->unordered_wq)
431 		destroy_workqueue(xe->unordered_wq);
432 
433 	if (xe->destroy_wq)
434 		destroy_workqueue(xe->destroy_wq);
435 
436 	ttm_device_fini(&xe->ttm);
437 }
438 
439 struct xe_device *xe_device_create(struct pci_dev *pdev,
440 				   const struct pci_device_id *ent)
441 {
442 	struct xe_device *xe;
443 	int err;
444 
445 	xe_display_driver_set_hooks(&driver);
446 
447 	err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
448 	if (err)
449 		return ERR_PTR(err);
450 
451 	xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
452 	if (IS_ERR(xe))
453 		return xe;
454 
455 	err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
456 			      xe->drm.anon_inode->i_mapping,
457 			      xe->drm.vma_offset_manager, 0);
458 	if (WARN_ON(err))
459 		return ERR_PTR(err);
460 
461 	xe_bo_dev_init(&xe->bo_device);
462 	err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
463 	if (err)
464 		return ERR_PTR(err);
465 
466 	err = xe_shrinker_create(xe);
467 	if (err)
468 		return ERR_PTR(err);
469 
470 	xe->info.devid = pdev->device;
471 	xe->info.revid = pdev->revision;
472 	xe->info.force_execlist = xe_modparam.force_execlist;
473 	xe->atomic_svm_timeslice_ms = 5;
474 	xe->min_run_period_lr_ms = 5;
475 
476 	err = xe_irq_init(xe);
477 	if (err)
478 		return ERR_PTR(err);
479 
480 	xe_validation_device_init(&xe->val);
481 
482 	init_waitqueue_head(&xe->ufence_wq);
483 
484 	init_rwsem(&xe->usm.lock);
485 
486 	err = xe_pagemap_shrinker_create(xe);
487 	if (err)
488 		return ERR_PTR(err);
489 
490 	xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
491 
492 	if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
493 		/* Trigger a large asid and an early asid wrap. */
494 		u32 asid;
495 
496 		BUILD_BUG_ON(XE_MAX_ASID < 2);
497 		err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
498 				      XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
499 				      &xe->usm.next_asid, GFP_KERNEL);
500 		drm_WARN_ON(&xe->drm, err);
501 		if (err >= 0)
502 			xa_erase(&xe->usm.asid_to_vm, asid);
503 	}
504 
505 	err = xe_bo_pinned_init(xe);
506 	if (err)
507 		return ERR_PTR(err);
508 
509 	xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
510 						       WQ_MEM_RECLAIM);
511 	xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
512 	xe->unordered_wq = alloc_workqueue("xe-unordered-wq", WQ_PERCPU, 0);
513 	xe->destroy_wq = alloc_workqueue("xe-destroy-wq", WQ_PERCPU, 0);
514 	if (!xe->ordered_wq || !xe->unordered_wq ||
515 	    !xe->preempt_fence_wq || !xe->destroy_wq) {
516 		/*
517 		 * Cleanup done in xe_device_destroy via
518 		 * drmm_add_action_or_reset register above
519 		 */
520 		drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
521 		return ERR_PTR(-ENOMEM);
522 	}
523 
524 	err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
525 	if (err)
526 		return ERR_PTR(err);
527 
528 	return xe;
529 }
530 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
531 
532 static bool xe_driver_flr_disabled(struct xe_device *xe)
533 {
534 	if (IS_SRIOV_VF(xe))
535 		return true;
536 
537 	if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
538 		drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
539 		return true;
540 	}
541 
542 	return false;
543 }
544 
545 /*
546  * The driver-initiated FLR is the highest level of reset that we can trigger
547  * from within the driver. It is different from the PCI FLR in that it doesn't
548  * fully reset the SGUnit and doesn't modify the PCI config space and therefore
549  * it doesn't require a re-enumeration of the PCI BARs. However, the
550  * driver-initiated FLR does still cause a reset of both GT and display and a
551  * memory wipe of local and stolen memory, so recovery would require a full HW
552  * re-init and saving/restoring (or re-populating) the wiped memory. Since we
553  * perform the FLR as the very last action before releasing access to the HW
554  * during the driver release flow, we don't attempt recovery at all, because
555  * if/when a new instance of Xe is bound to the device it will do a full
556  * re-init anyway.
557  */
558 static void __xe_driver_flr(struct xe_device *xe)
559 {
560 	const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
561 	struct xe_mmio *mmio = xe_root_tile_mmio(xe);
562 	int ret;
563 
564 	drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
565 
566 	/*
567 	 * Make sure any pending FLR requests have cleared by waiting for the
568 	 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
569 	 * to make sure it's not still set from a prior attempt (it's a write to
570 	 * clear bit).
571 	 * Note that we should never be in a situation where a previous attempt
572 	 * is still pending (unless the HW is totally dead), but better to be
573 	 * safe in case something unexpected happens
574 	 */
575 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
576 	if (ret) {
577 		drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
578 		return;
579 	}
580 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
581 
582 	/* Trigger the actual Driver-FLR */
583 	xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
584 
585 	/* Wait for hardware teardown to complete */
586 	ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
587 	if (ret) {
588 		drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
589 		return;
590 	}
591 
592 	/* Wait for hardware/firmware re-init to complete */
593 	ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
594 			     flr_timeout, NULL, false);
595 	if (ret) {
596 		drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
597 		return;
598 	}
599 
600 	/* Clear sticky completion status */
601 	xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
602 }
603 
604 static void xe_driver_flr(struct xe_device *xe)
605 {
606 	if (xe_driver_flr_disabled(xe))
607 		return;
608 
609 	__xe_driver_flr(xe);
610 }
611 
612 static void xe_driver_flr_fini(void *arg)
613 {
614 	struct xe_device *xe = arg;
615 
616 	if (xe->needs_flr_on_fini)
617 		xe_driver_flr(xe);
618 }
619 
620 static void xe_device_sanitize(void *arg)
621 {
622 	struct xe_device *xe = arg;
623 	struct xe_gt *gt;
624 	u8 id;
625 
626 	for_each_gt(gt, xe, id)
627 		xe_gt_sanitize(gt);
628 }
629 
630 static int xe_set_dma_info(struct xe_device *xe)
631 {
632 	unsigned int mask_size = xe->info.dma_mask_size;
633 	int err;
634 
635 	dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
636 
637 	err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
638 	if (err)
639 		goto mask_err;
640 
641 	err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
642 	if (err)
643 		goto mask_err;
644 
645 	return 0;
646 
647 mask_err:
648 	drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
649 	return err;
650 }
651 
652 static void assert_lmem_ready(struct xe_device *xe)
653 {
654 	if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
655 		return;
656 
657 	xe_assert(xe, xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) &
658 		  LMEM_INIT);
659 }
660 
661 static void vf_update_device_info(struct xe_device *xe)
662 {
663 	xe_assert(xe, IS_SRIOV_VF(xe));
664 	/* disable features that are not available/applicable to VFs */
665 	xe->info.probe_display = 0;
666 	xe->info.has_heci_cscfi = 0;
667 	xe->info.has_heci_gscfi = 0;
668 	xe->info.has_late_bind = 0;
669 	xe->info.skip_guc_pc = 1;
670 	xe->info.skip_pcode = 1;
671 }
672 
673 static int xe_device_vram_alloc(struct xe_device *xe)
674 {
675 	struct xe_vram_region *vram;
676 
677 	if (!IS_DGFX(xe))
678 		return 0;
679 
680 	vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
681 	if (!vram)
682 		return -ENOMEM;
683 
684 	xe->mem.vram = vram;
685 	return 0;
686 }
687 
688 /**
689  * xe_device_probe_early: Device early probe
690  * @xe: xe device instance
691  *
692  * Initialize MMIO resources that don't require any
693  * knowledge about tile count. Also initialize pcode and
694  * check vram initialization on root tile.
695  *
696  * Return: 0 on success, error code on failure
697  */
698 int xe_device_probe_early(struct xe_device *xe)
699 {
700 	int err;
701 
702 	xe_wa_device_init(xe);
703 	xe_wa_process_device_oob(xe);
704 
705 	err = xe_mmio_probe_early(xe);
706 	if (err)
707 		return err;
708 
709 	xe_sriov_probe_early(xe);
710 
711 	if (IS_SRIOV_VF(xe))
712 		vf_update_device_info(xe);
713 
714 	/*
715 	 * Check for pcode uncore_init status to confirm if the SoC
716 	 * initialization is complete. Until done, any MMIO or lmem access from
717 	 * the driver will be blocked
718 	 */
719 	err = xe_pcode_probe_early(xe);
720 	if (err || xe_survivability_mode_is_requested(xe)) {
721 		int save_err = err;
722 
723 		/*
724 		 * Try to leave device in survivability mode if device is
725 		 * possible, but still return the previous error for error
726 		 * propagation
727 		 */
728 		err = xe_survivability_mode_boot_enable(xe);
729 		if (err)
730 			return err;
731 
732 		return save_err;
733 	}
734 
735 	/*
736 	 * Make sure the lmem is initialized and ready to use. xe_pcode_ready()
737 	 * is flagged after full initialization is complete. Assert if lmem is
738 	 * not initialized.
739 	 */
740 	assert_lmem_ready(xe);
741 
742 	xe->wedged.mode = xe_device_validate_wedged_mode(xe, xe_modparam.wedged_mode) ?
743 			  XE_DEFAULT_WEDGED_MODE : xe_modparam.wedged_mode;
744 	drm_dbg(&xe->drm, "wedged_mode: setting mode (%u) %s\n",
745 		xe->wedged.mode, xe_wedged_mode_to_string(xe->wedged.mode));
746 
747 	err = xe_device_vram_alloc(xe);
748 	if (err)
749 		return err;
750 
751 	return 0;
752 }
753 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
754 
755 static int probe_has_flat_ccs(struct xe_device *xe)
756 {
757 	struct xe_gt *gt;
758 	u32 reg;
759 
760 	/* Always enabled/disabled, no runtime check to do */
761 	if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
762 		return 0;
763 
764 	gt = xe_root_mmio_gt(xe);
765 	if (!gt)
766 		return 0;
767 
768 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
769 	if (!fw_ref.domains)
770 		return -ETIMEDOUT;
771 
772 	reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
773 	xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
774 
775 	if (!xe->info.has_flat_ccs)
776 		drm_dbg(&xe->drm,
777 			"Flat CCS has been disabled in bios, May lead to performance impact");
778 
779 	return 0;
780 }
781 
782 /*
783  * Detect if the driver is being run on pre-production hardware.  We don't
784  * keep workarounds for pre-production hardware long term, so print an
785  * error and add taint if we're being loaded on a pre-production platform
786  * for which the pre-prod workarounds have already been removed.
787  *
788  * The general policy is that we'll remove any workarounds that only apply to
789  * pre-production hardware around the time force_probe restrictions are lifted
790  * for a platform of the next major IP generation (for example, Xe2 pre-prod
791  * workarounds should be removed around the time the first Xe3 platforms have
792  * force_probe lifted).
793  */
794 static void detect_preproduction_hw(struct xe_device *xe)
795 {
796 	struct xe_gt *gt;
797 	int id;
798 
799 	/*
800 	 * SR-IOV VFs don't have access to the FUSE2 register, so we can't
801 	 * check pre-production status there.  But the host OS will notice
802 	 * and report the pre-production status, which should be enough to
803 	 * help us catch mistaken use of pre-production hardware.
804 	 */
805 	if (IS_SRIOV_VF(xe))
806 		return;
807 
808 	/*
809 	 * The "SW_CAP" fuse contains a bit indicating whether the device is a
810 	 * production or pre-production device.  This fuse is reflected through
811 	 * the GT "FUSE2" register, even though the contents of the fuse are
812 	 * not GT-specific.  Every GT's reflection of this fuse should show the
813 	 * same value, so we'll just use the first available GT for lookup.
814 	 */
815 	for_each_gt(gt, xe, id)
816 		break;
817 
818 	if (!gt)
819 		return;
820 
821 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
822 	if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT)) {
823 		xe_gt_err(gt, "Forcewake failure; cannot determine production/pre-production hw status.\n");
824 		return;
825 	}
826 
827 	if (xe_mmio_read32(&gt->mmio, FUSE2) & PRODUCTION_HW)
828 		return;
829 
830 	xe_info(xe, "Pre-production hardware detected.\n");
831 	if (!xe->info.has_pre_prod_wa) {
832 		xe_err(xe, "Pre-production workarounds for this platform have already been removed.\n");
833 		add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
834 	}
835 }
836 
837 int xe_device_probe(struct xe_device *xe)
838 {
839 	struct xe_tile *tile;
840 	struct xe_gt *gt;
841 	int err;
842 	u8 id;
843 
844 	xe_pat_init_early(xe);
845 
846 	err = xe_sriov_init(xe);
847 	if (err)
848 		return err;
849 
850 	xe->info.mem_region_mask = 1;
851 
852 	err = xe_set_dma_info(xe);
853 	if (err)
854 		return err;
855 
856 	err = xe_mmio_probe_tiles(xe);
857 	if (err)
858 		return err;
859 
860 	for_each_gt(gt, xe, id) {
861 		err = xe_gt_init_early(gt);
862 		if (err)
863 			return err;
864 	}
865 
866 	for_each_tile(tile, xe, id) {
867 		err = xe_ggtt_init_early(tile->mem.ggtt);
868 		if (err)
869 			return err;
870 	}
871 
872 	/*
873 	 * From here on, if a step fails, make sure a Driver-FLR is triggereed
874 	 */
875 	err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
876 	if (err)
877 		return err;
878 
879 	err = probe_has_flat_ccs(xe);
880 	if (err)
881 		return err;
882 
883 	err = xe_vram_probe(xe);
884 	if (err)
885 		return err;
886 
887 	for_each_tile(tile, xe, id) {
888 		err = xe_tile_init_noalloc(tile);
889 		if (err)
890 			return err;
891 	}
892 
893 	/*
894 	 * Allow allocations only now to ensure xe_display_init_early()
895 	 * is the first to allocate, always.
896 	 */
897 	err = xe_ttm_sys_mgr_init(xe);
898 	if (err)
899 		return err;
900 
901 	/* Allocate and map stolen after potential VRAM resize */
902 	err = xe_ttm_stolen_mgr_init(xe);
903 	if (err)
904 		return err;
905 
906 	/*
907 	 * Now that GT is initialized (TTM in particular),
908 	 * we can try to init display, and inherit the initial fb.
909 	 * This is the reason the first allocation needs to be done
910 	 * inside display.
911 	 */
912 	err = xe_display_init_early(xe);
913 	if (err)
914 		return err;
915 
916 	for_each_tile(tile, xe, id) {
917 		err = xe_tile_init(tile);
918 		if (err)
919 			return err;
920 	}
921 
922 	err = xe_irq_install(xe);
923 	if (err)
924 		return err;
925 
926 	for_each_gt(gt, xe, id) {
927 		err = xe_gt_init(gt);
928 		if (err)
929 			return err;
930 	}
931 
932 	err = xe_pagefault_init(xe);
933 	if (err)
934 		return err;
935 
936 	if (xe->tiles->media_gt &&
937 	    XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
938 		XE_DEVICE_WA_DISABLE(xe, 15015404425);
939 
940 	err = xe_devcoredump_init(xe);
941 	if (err)
942 		return err;
943 
944 	xe_nvm_init(xe);
945 
946 	err = xe_soc_remapper_init(xe);
947 	if (err)
948 		return err;
949 
950 	err = xe_heci_gsc_init(xe);
951 	if (err)
952 		return err;
953 
954 	err = xe_late_bind_init(&xe->late_bind);
955 	if (err)
956 		return err;
957 
958 	err = xe_oa_init(xe);
959 	if (err)
960 		return err;
961 
962 	err = xe_display_init(xe);
963 	if (err)
964 		return err;
965 
966 	err = xe_pxp_init(xe);
967 	if (err)
968 		return err;
969 
970 	err = xe_psmi_init(xe);
971 	if (err)
972 		return err;
973 
974 	err = drm_dev_register(&xe->drm, 0);
975 	if (err)
976 		return err;
977 
978 	xe_display_register(xe);
979 
980 	err = xe_oa_register(xe);
981 	if (err)
982 		goto err_unregister_display;
983 
984 	err = xe_pmu_register(&xe->pmu);
985 	if (err)
986 		goto err_unregister_display;
987 
988 	err = xe_device_sysfs_init(xe);
989 	if (err)
990 		goto err_unregister_display;
991 
992 	xe_debugfs_register(xe);
993 
994 	err = xe_hwmon_register(xe);
995 	if (err)
996 		goto err_unregister_display;
997 
998 	err = xe_i2c_probe(xe);
999 	if (err)
1000 		goto err_unregister_display;
1001 
1002 	for_each_gt(gt, xe, id)
1003 		xe_gt_sanitize_freq(gt);
1004 
1005 	xe_vsec_init(xe);
1006 
1007 	err = xe_sriov_init_late(xe);
1008 	if (err)
1009 		goto err_unregister_display;
1010 
1011 	detect_preproduction_hw(xe);
1012 
1013 	return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
1014 
1015 err_unregister_display:
1016 	xe_display_unregister(xe);
1017 	drm_dev_unregister(&xe->drm);
1018 
1019 	return err;
1020 }
1021 
1022 void xe_device_remove(struct xe_device *xe)
1023 {
1024 	xe_display_unregister(xe);
1025 
1026 	drm_dev_unplug(&xe->drm);
1027 
1028 	xe_bo_pci_dev_remove_all(xe);
1029 }
1030 
1031 void xe_device_shutdown(struct xe_device *xe)
1032 {
1033 	struct xe_gt *gt;
1034 	u8 id;
1035 
1036 	drm_dbg(&xe->drm, "Shutting down device\n");
1037 
1038 	xe_display_pm_shutdown(xe);
1039 
1040 	xe_irq_suspend(xe);
1041 
1042 	for_each_gt(gt, xe, id)
1043 		xe_gt_shutdown(gt);
1044 
1045 	xe_display_pm_shutdown_late(xe);
1046 
1047 	if (!xe_driver_flr_disabled(xe)) {
1048 		/* BOOM! */
1049 		__xe_driver_flr(xe);
1050 	}
1051 }
1052 
1053 /**
1054  * xe_device_wmb() - Device specific write memory barrier
1055  * @xe: the &xe_device
1056  *
1057  * While wmb() is sufficient for a barrier if we use system memory, on discrete
1058  * platforms with device memory we additionally need to issue a register write.
1059  * Since it doesn't matter which register we write to, use the read-only VF_CAP
1060  * register that is also marked as accessible by the VFs.
1061  */
1062 void xe_device_wmb(struct xe_device *xe)
1063 {
1064 	wmb();
1065 	if (IS_DGFX(xe))
1066 		xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
1067 }
1068 
1069 /*
1070  * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
1071  */
1072 static void tdf_request_sync(struct xe_device *xe)
1073 {
1074 	struct xe_gt *gt;
1075 	u8 id;
1076 
1077 	for_each_gt(gt, xe, id) {
1078 		if (xe_gt_is_media_type(gt))
1079 			continue;
1080 
1081 		CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1082 		if (!fw_ref.domains)
1083 			return;
1084 
1085 		xe_mmio_write32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
1086 
1087 		/*
1088 		 * FIXME: We can likely do better here with our choice of
1089 		 * timeout. Currently we just assume the worst case, i.e. 150us,
1090 		 * which is believed to be sufficient to cover the worst case
1091 		 * scenario on current platforms if all cache entries are
1092 		 * transient and need to be flushed..
1093 		 */
1094 		if (xe_mmio_wait32(&gt->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
1095 				   300, NULL, false))
1096 			xe_gt_err_once(gt, "TD flush timeout\n");
1097 	}
1098 }
1099 
1100 void xe_device_l2_flush(struct xe_device *xe)
1101 {
1102 	struct xe_gt *gt;
1103 
1104 	gt = xe_root_mmio_gt(xe);
1105 	if (!gt)
1106 		return;
1107 
1108 	if (!XE_GT_WA(gt, 16023588340))
1109 		return;
1110 
1111 	CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1112 	if (!fw_ref.domains)
1113 		return;
1114 
1115 	spin_lock(&gt->global_invl_lock);
1116 
1117 	xe_mmio_write32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1);
1118 	if (xe_mmio_wait32(&gt->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))
1119 		xe_gt_err_once(gt, "Global invalidation timeout\n");
1120 
1121 	spin_unlock(&gt->global_invl_lock);
1122 }
1123 
1124 /**
1125  * xe_device_td_flush() - Flush transient L3 cache entries
1126  * @xe: The device
1127  *
1128  * Display engine has direct access to memory and is never coherent with L3/L4
1129  * caches (or CPU caches), however KMD is responsible for specifically flushing
1130  * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
1131  * can happen from such a surface without seeing corruption.
1132  *
1133  * Display surfaces can be tagged as transient by mapping it using one of the
1134  * various L3:XD PAT index modes on Xe2.
1135  *
1136  * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1137  * at the end of each submission via PIPE_CONTROL for compute/render, since SA
1138  * Media is not coherent with L3 and we want to support render-vs-media
1139  * usescases. For other engines like copy/blt the HW internally forces uncached
1140  * behaviour, hence why we can skip the TDF on such platforms.
1141  */
1142 void xe_device_td_flush(struct xe_device *xe)
1143 {
1144 	struct xe_gt *root_gt;
1145 
1146 	if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
1147 		return;
1148 
1149 	root_gt = xe_root_mmio_gt(xe);
1150 	if (!root_gt)
1151 		return;
1152 
1153 	if (XE_GT_WA(root_gt, 16023588340)) {
1154 		/* A transient flush is not sufficient: flush the L2 */
1155 		xe_device_l2_flush(xe);
1156 	} else {
1157 		xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc);
1158 		tdf_request_sync(xe);
1159 		xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc);
1160 	}
1161 }
1162 
1163 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
1164 {
1165 	return xe_device_has_flat_ccs(xe) ?
1166 		DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
1167 }
1168 
1169 /**
1170  * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1171  * @xe: xe device instance
1172  *
1173  * To be used before any kind of memory access. It will splat a debug warning
1174  * if the device is currently sleeping. But it doesn't guarantee in any way
1175  * that the device is going to remain awake. Xe PM runtime get and put
1176  * functions might be added to the outer bound of the memory access, while
1177  * this check is intended for inner usage to splat some warning if the worst
1178  * case has just happened.
1179  */
1180 void xe_device_assert_mem_access(struct xe_device *xe)
1181 {
1182 	xe_assert(xe, !xe_pm_runtime_suspended(xe));
1183 }
1184 
1185 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
1186 {
1187 	struct xe_gt *gt;
1188 	u8 id;
1189 
1190 	drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
1191 	drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
1192 
1193 	for_each_gt(gt, xe, id) {
1194 		drm_printf(p, "GT id: %u\n", id);
1195 		drm_printf(p, "\tTile: %u\n", gt->tile->id);
1196 		drm_printf(p, "\tType: %s\n",
1197 			   gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
1198 		drm_printf(p, "\tIP ver: %u.%u.%u\n",
1199 			   REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
1200 			   REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
1201 			   REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
1202 		drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
1203 	}
1204 }
1205 
1206 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
1207 {
1208 	return sign_extend64(address, xe->info.va_bits - 1);
1209 }
1210 
1211 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
1212 {
1213 	return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
1214 }
1215 
1216 static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
1217 {
1218 	struct xe_device *xe = arg;
1219 
1220 	xe_pm_runtime_put(xe);
1221 }
1222 
1223 /**
1224  * DOC: Xe Device Wedging
1225  *
1226  * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1227  * When device is in wedged state, every IOCTL will be blocked and GT cannot
1228  * be used. The conditions under which the driver declares the device wedged
1229  * depend on the wedged mode configuration (see &enum xe_wedged_mode). The
1230  * default recovery method for a wedged state is rebind/bus-reset.
1231  *
1232  * Another recovery method is vendor-specific. Below are the cases that send
1233  * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1234  *
1235  * Case: Firmware Flash
1236  * --------------------
1237  *
1238  * Identification Hint
1239  * +++++++++++++++++++
1240  *
1241  * ``WEDGED=vendor-specific`` drm device wedged uevent with
1242  * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1243  * admin/userspace consumer about the need for a firmware flash.
1244  *
1245  * Recovery Procedure
1246  * ++++++++++++++++++
1247  *
1248  * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1249  * the below steps
1250  *
1251  * - Check Runtime Survivability mode sysfs.
1252  *   If enabled, firmware flash is required to recover the device.
1253  *
1254  *   /sys/bus/pci/devices/<device>/survivability_mode
1255  *
1256  * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
1257  *   firmware and restore device to normal operation.
1258  */
1259 
1260 /**
1261  * xe_device_set_wedged_method - Set wedged recovery method
1262  * @xe: xe device instance
1263  * @method: recovery method to set
1264  *
1265  * Set wedged recovery method to be sent in drm wedged uevent.
1266  */
1267 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
1268 {
1269 	xe->wedged.method = method;
1270 }
1271 
1272 /**
1273  * xe_device_declare_wedged - Declare device wedged
1274  * @xe: xe device instance
1275  *
1276  * This is a final state that can only be cleared with the recovery method
1277  * specified in the drm wedged uevent. The method can be set using
1278  * xe_device_set_wedged_method before declaring the device as wedged. If no method
1279  * is set, reprobe (unbind/re-bind) will be sent by default.
1280  *
1281  * In this state every IOCTL will be blocked so the GT cannot be used.
1282  * In general it will be called upon any critical error such as gt reset
1283  * failure or guc loading failure. Userspace will be notified of this state
1284  * through device wedged uevent.
1285  * If xe.wedged module parameter is set to 2, this function will be called
1286  * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
1287  * snapshot capture. In this mode, GT reset won't be attempted so the state of
1288  * the issue is preserved for further debugging.
1289  */
1290 void xe_device_declare_wedged(struct xe_device *xe)
1291 {
1292 	struct xe_gt *gt;
1293 	u8 id;
1294 
1295 	if (xe->wedged.mode == XE_WEDGED_MODE_NEVER) {
1296 		drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
1297 		return;
1298 	}
1299 
1300 	xe_pm_runtime_get_noresume(xe);
1301 
1302 	if (drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe)) {
1303 		drm_err(&xe->drm, "Failed to register xe_device_wedged_fini clean-up. Although device is wedged.\n");
1304 		return;
1305 	}
1306 
1307 	if (!atomic_xchg(&xe->wedged.flag, 1)) {
1308 		xe->needs_flr_on_fini = true;
1309 		drm_err(&xe->drm,
1310 			"CRITICAL: Xe has declared device %s as wedged.\n"
1311 			"IOCTLs and executions are blocked.\n"
1312 			"For recovery procedure, refer to https://docs.kernel.org/gpu/drm-uapi.html#device-wedging\n"
1313 			"Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
1314 			dev_name(xe->drm.dev));
1315 	}
1316 
1317 	for_each_gt(gt, xe, id)
1318 		xe_gt_declare_wedged(gt);
1319 
1320 	if (xe_device_wedged(xe)) {
1321 		/* If no wedge recovery method is set, use default */
1322 		if (!xe->wedged.method)
1323 			xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
1324 						    DRM_WEDGE_RECOVERY_BUS_RESET);
1325 
1326 		/* Notify userspace of wedged device */
1327 		drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
1328 	}
1329 }
1330 
1331 /**
1332  * xe_device_validate_wedged_mode - Check if given mode is supported
1333  * @xe: the &xe_device
1334  * @mode: requested mode to validate
1335  *
1336  * Check whether the provided wedged mode is supported.
1337  *
1338  * Return: 0 if mode is supported, error code otherwise.
1339  */
1340 int xe_device_validate_wedged_mode(struct xe_device *xe, unsigned int mode)
1341 {
1342 	if (mode > XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) {
1343 		drm_dbg(&xe->drm, "wedged_mode: invalid value (%u)\n", mode);
1344 		return -EINVAL;
1345 	} else if (mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET && (IS_SRIOV_VF(xe) ||
1346 		   (IS_SRIOV_PF(xe) && !IS_ENABLED(CONFIG_DRM_XE_DEBUG)))) {
1347 		drm_dbg(&xe->drm, "wedged_mode: (%u) %s mode is not supported for %s\n",
1348 			mode, xe_wedged_mode_to_string(mode),
1349 			xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
1350 		return -EPERM;
1351 	}
1352 
1353 	return 0;
1354 }
1355 
1356 /**
1357  * xe_wedged_mode_to_string - Convert enum value to string.
1358  * @mode: the &xe_wedged_mode to convert
1359  *
1360  * Returns: wedged mode as a user friendly string.
1361  */
1362 const char *xe_wedged_mode_to_string(enum xe_wedged_mode mode)
1363 {
1364 	switch (mode) {
1365 	case XE_WEDGED_MODE_NEVER:
1366 		return "never";
1367 	case XE_WEDGED_MODE_UPON_CRITICAL_ERROR:
1368 		return "upon-critical-error";
1369 	case XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET:
1370 		return "upon-any-hang-no-reset";
1371 	default:
1372 		return "<invalid>";
1373 	}
1374 }
1375 
1376 /**
1377  * xe_device_asid_to_vm() - Find VM from ASID
1378  * @xe: the &xe_device
1379  * @asid: Address space ID
1380  *
1381  * Find a VM from ASID and take a reference to VM which caller must drop.
1382  * Reclaim safe.
1383  *
1384  * Return: VM on success, ERR_PTR on failure
1385  */
1386 struct xe_vm *xe_device_asid_to_vm(struct xe_device *xe, u32 asid)
1387 {
1388 	struct xe_vm *vm;
1389 
1390 	down_read(&xe->usm.lock);
1391 	vm = xa_load(&xe->usm.asid_to_vm, asid);
1392 	if (vm)
1393 		xe_vm_get(vm);
1394 	else
1395 		vm = ERR_PTR(-EINVAL);
1396 	up_read(&xe->usm.lock);
1397 
1398 	return vm;
1399 }
1400