1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2021 Intel Corporation
4 */
5
6 #include "xe_device.h"
7
8 #include <linux/aperture.h>
9 #include <linux/delay.h>
10 #include <linux/fault-inject.h>
11 #include <linux/units.h>
12
13 #include <drm/drm_atomic_helper.h>
14 #include <drm/drm_client.h>
15 #include <drm/drm_gem_ttm_helper.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_managed.h>
18 #include <drm/drm_pagemap_util.h>
19 #include <drm/drm_print.h>
20 #include <uapi/drm/xe_drm.h>
21
22 #include "display/xe_display.h"
23 #include "instructions/xe_gpu_commands.h"
24 #include "regs/xe_gt_regs.h"
25 #include "regs/xe_regs.h"
26 #include "xe_bo.h"
27 #include "xe_bo_evict.h"
28 #include "xe_debugfs.h"
29 #include "xe_defaults.h"
30 #include "xe_devcoredump.h"
31 #include "xe_device_sysfs.h"
32 #include "xe_dma_buf.h"
33 #include "xe_drm_client.h"
34 #include "xe_drv.h"
35 #include "xe_exec.h"
36 #include "xe_exec_queue.h"
37 #include "xe_force_wake.h"
38 #include "xe_ggtt.h"
39 #include "xe_gt.h"
40 #include "xe_gt_mcr.h"
41 #include "xe_gt_printk.h"
42 #include "xe_gt_sriov_vf.h"
43 #include "xe_guc.h"
44 #include "xe_guc_pc.h"
45 #include "xe_hw_engine_group.h"
46 #include "xe_hwmon.h"
47 #include "xe_i2c.h"
48 #include "xe_irq.h"
49 #include "xe_late_bind_fw.h"
50 #include "xe_mmio.h"
51 #include "xe_module.h"
52 #include "xe_nvm.h"
53 #include "xe_oa.h"
54 #include "xe_observation.h"
55 #include "xe_pagefault.h"
56 #include "xe_pat.h"
57 #include "xe_pcode.h"
58 #include "xe_pm.h"
59 #include "xe_pmu.h"
60 #include "xe_psmi.h"
61 #include "xe_pxp.h"
62 #include "xe_query.h"
63 #include "xe_shrinker.h"
64 #include "xe_soc_remapper.h"
65 #include "xe_survivability_mode.h"
66 #include "xe_sriov.h"
67 #include "xe_svm.h"
68 #include "xe_tile.h"
69 #include "xe_ttm_stolen_mgr.h"
70 #include "xe_ttm_sys_mgr.h"
71 #include "xe_vm.h"
72 #include "xe_vm_madvise.h"
73 #include "xe_vram.h"
74 #include "xe_vram_types.h"
75 #include "xe_vsec.h"
76 #include "xe_wait_user_fence.h"
77 #include "xe_wa.h"
78
79 #include <generated/xe_device_wa_oob.h>
80 #include <generated/xe_wa_oob.h>
81
xe_file_open(struct drm_device * dev,struct drm_file * file)82 static int xe_file_open(struct drm_device *dev, struct drm_file *file)
83 {
84 struct xe_device *xe = to_xe_device(dev);
85 struct xe_drm_client *client;
86 struct xe_file *xef;
87 int ret = -ENOMEM;
88 struct task_struct *task = NULL;
89
90 xef = kzalloc_obj(*xef);
91 if (!xef)
92 return ret;
93
94 client = xe_drm_client_alloc();
95 if (!client) {
96 kfree(xef);
97 return ret;
98 }
99
100 xef->drm = file;
101 xef->client = client;
102 xef->xe = xe;
103
104 mutex_init(&xef->vm.lock);
105 xa_init_flags(&xef->vm.xa, XA_FLAGS_ALLOC1);
106
107 mutex_init(&xef->exec_queue.lock);
108 xa_init_flags(&xef->exec_queue.xa, XA_FLAGS_ALLOC1);
109
110 file->driver_priv = xef;
111 kref_init(&xef->refcount);
112
113 task = get_pid_task(rcu_access_pointer(file->pid), PIDTYPE_PID);
114 if (task) {
115 xef->process_name = kstrdup(task->comm, GFP_KERNEL);
116 xef->pid = task->pid;
117 put_task_struct(task);
118 }
119
120 return 0;
121 }
122
xe_file_destroy(struct kref * ref)123 static void xe_file_destroy(struct kref *ref)
124 {
125 struct xe_file *xef = container_of(ref, struct xe_file, refcount);
126
127 xa_destroy(&xef->exec_queue.xa);
128 mutex_destroy(&xef->exec_queue.lock);
129 xa_destroy(&xef->vm.xa);
130 mutex_destroy(&xef->vm.lock);
131
132 xe_drm_client_put(xef->client);
133 kfree(xef->process_name);
134 kfree(xef);
135 }
136
137 /**
138 * xe_file_get() - Take a reference to the xe file object
139 * @xef: Pointer to the xe file
140 *
141 * Anyone with a pointer to xef must take a reference to the xe file
142 * object using this call.
143 *
144 * Return: xe file pointer
145 */
xe_file_get(struct xe_file * xef)146 struct xe_file *xe_file_get(struct xe_file *xef)
147 {
148 kref_get(&xef->refcount);
149 return xef;
150 }
151
152 /**
153 * xe_file_put() - Drop a reference to the xe file object
154 * @xef: Pointer to the xe file
155 *
156 * Used to drop reference to the xef object
157 */
xe_file_put(struct xe_file * xef)158 void xe_file_put(struct xe_file *xef)
159 {
160 kref_put(&xef->refcount, xe_file_destroy);
161 }
162
xe_file_close(struct drm_device * dev,struct drm_file * file)163 static void xe_file_close(struct drm_device *dev, struct drm_file *file)
164 {
165 struct xe_device *xe = to_xe_device(dev);
166 struct xe_file *xef = file->driver_priv;
167 struct xe_vm *vm;
168 struct xe_exec_queue *q;
169 unsigned long idx;
170
171 guard(xe_pm_runtime)(xe);
172
173 /*
174 * No need for exec_queue.lock here as there is no contention for it
175 * when FD is closing as IOCTLs presumably can't be modifying the
176 * xarray. Taking exec_queue.lock here causes undue dependency on
177 * vm->lock taken during xe_exec_queue_kill().
178 */
179 xa_for_each(&xef->exec_queue.xa, idx, q) {
180 if (q->vm && q->hwe->hw_engine_group)
181 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q);
182 xe_exec_queue_kill(q);
183 xe_exec_queue_put(q);
184 }
185 xa_for_each(&xef->vm.xa, idx, vm)
186 xe_vm_close_and_put(vm);
187
188 xe_file_put(xef);
189 }
190
191 static const struct drm_ioctl_desc xe_ioctls[] = {
192 DRM_IOCTL_DEF_DRV(XE_DEVICE_QUERY, xe_query_ioctl, DRM_RENDER_ALLOW),
193 DRM_IOCTL_DEF_DRV(XE_GEM_CREATE, xe_gem_create_ioctl, DRM_RENDER_ALLOW),
194 DRM_IOCTL_DEF_DRV(XE_GEM_MMAP_OFFSET, xe_gem_mmap_offset_ioctl,
195 DRM_RENDER_ALLOW),
196 DRM_IOCTL_DEF_DRV(XE_VM_CREATE, xe_vm_create_ioctl, DRM_RENDER_ALLOW),
197 DRM_IOCTL_DEF_DRV(XE_VM_DESTROY, xe_vm_destroy_ioctl, DRM_RENDER_ALLOW),
198 DRM_IOCTL_DEF_DRV(XE_VM_BIND, xe_vm_bind_ioctl, DRM_RENDER_ALLOW),
199 DRM_IOCTL_DEF_DRV(XE_EXEC, xe_exec_ioctl, DRM_RENDER_ALLOW),
200 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_CREATE, xe_exec_queue_create_ioctl,
201 DRM_RENDER_ALLOW),
202 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_DESTROY, xe_exec_queue_destroy_ioctl,
203 DRM_RENDER_ALLOW),
204 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_GET_PROPERTY, xe_exec_queue_get_property_ioctl,
205 DRM_RENDER_ALLOW),
206 DRM_IOCTL_DEF_DRV(XE_WAIT_USER_FENCE, xe_wait_user_fence_ioctl,
207 DRM_RENDER_ALLOW),
208 DRM_IOCTL_DEF_DRV(XE_OBSERVATION, xe_observation_ioctl, DRM_RENDER_ALLOW),
209 DRM_IOCTL_DEF_DRV(XE_MADVISE, xe_vm_madvise_ioctl, DRM_RENDER_ALLOW),
210 DRM_IOCTL_DEF_DRV(XE_VM_QUERY_MEM_RANGE_ATTRS, xe_vm_query_vmas_attrs_ioctl,
211 DRM_RENDER_ALLOW),
212 DRM_IOCTL_DEF_DRV(XE_EXEC_QUEUE_SET_PROPERTY, xe_exec_queue_set_property_ioctl,
213 DRM_RENDER_ALLOW),
214 DRM_IOCTL_DEF_DRV(XE_VM_GET_PROPERTY, xe_vm_get_property_ioctl,
215 DRM_RENDER_ALLOW),
216 };
217
xe_drm_ioctl(struct file * file,unsigned int cmd,unsigned long arg)218 static long xe_drm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
219 {
220 struct drm_file *file_priv = file->private_data;
221 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
222 long ret;
223
224 if (xe_device_wedged(xe))
225 return -ECANCELED;
226
227 ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
228 ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
229 if (ret >= 0)
230 ret = drm_ioctl(file, cmd, arg);
231
232 return ret;
233 }
234
235 #ifdef CONFIG_COMPAT
xe_drm_compat_ioctl(struct file * file,unsigned int cmd,unsigned long arg)236 static long xe_drm_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
237 {
238 struct drm_file *file_priv = file->private_data;
239 struct xe_device *xe = to_xe_device(file_priv->minor->dev);
240 long ret;
241
242 if (xe_device_wedged(xe))
243 return -ECANCELED;
244
245 ACQUIRE(xe_pm_runtime_ioctl, pm)(xe);
246 ret = ACQUIRE_ERR(xe_pm_runtime_ioctl, &pm);
247 if (ret >= 0)
248 ret = drm_compat_ioctl(file, cmd, arg);
249
250 return ret;
251 }
252 #else
253 /* similarly to drm_compat_ioctl, let's it be assigned to .compat_ioct unconditionally */
254 #define xe_drm_compat_ioctl NULL
255 #endif
256
barrier_open(struct vm_area_struct * vma)257 static void barrier_open(struct vm_area_struct *vma)
258 {
259 drm_dev_get(vma->vm_private_data);
260 }
261
barrier_close(struct vm_area_struct * vma)262 static void barrier_close(struct vm_area_struct *vma)
263 {
264 drm_dev_put(vma->vm_private_data);
265 }
266
barrier_release_dummy_page(struct drm_device * dev,void * res)267 static void barrier_release_dummy_page(struct drm_device *dev, void *res)
268 {
269 struct page *dummy_page = (struct page *)res;
270
271 __free_page(dummy_page);
272 }
273
barrier_fault(struct vm_fault * vmf)274 static vm_fault_t barrier_fault(struct vm_fault *vmf)
275 {
276 struct drm_device *dev = vmf->vma->vm_private_data;
277 struct vm_area_struct *vma = vmf->vma;
278 vm_fault_t ret = VM_FAULT_NOPAGE;
279 pgprot_t prot;
280 int idx;
281
282 prot = vm_get_page_prot(vma->vm_flags);
283
284 if (drm_dev_enter(dev, &idx)) {
285 unsigned long pfn;
286
287 #define LAST_DB_PAGE_OFFSET 0x7ff001
288 pfn = PHYS_PFN(pci_resource_start(to_pci_dev(dev->dev), 0) +
289 LAST_DB_PAGE_OFFSET);
290 ret = vmf_insert_pfn_prot(vma, vma->vm_start, pfn,
291 pgprot_noncached(prot));
292 drm_dev_exit(idx);
293 } else {
294 struct page *page;
295
296 /* Allocate new dummy page to map all the VA range in this VMA to it*/
297 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
298 if (!page)
299 return VM_FAULT_OOM;
300
301 /* Set the page to be freed using drmm release action */
302 if (drmm_add_action_or_reset(dev, barrier_release_dummy_page, page))
303 return VM_FAULT_OOM;
304
305 ret = vmf_insert_pfn_prot(vma, vma->vm_start, page_to_pfn(page),
306 prot);
307 }
308
309 return ret;
310 }
311
312 static const struct vm_operations_struct vm_ops_barrier = {
313 .open = barrier_open,
314 .close = barrier_close,
315 .fault = barrier_fault,
316 };
317
xe_pci_barrier_mmap(struct file * filp,struct vm_area_struct * vma)318 static int xe_pci_barrier_mmap(struct file *filp,
319 struct vm_area_struct *vma)
320 {
321 struct drm_file *priv = filp->private_data;
322 struct drm_device *dev = priv->minor->dev;
323 struct xe_device *xe = to_xe_device(dev);
324
325 if (!IS_DGFX(xe))
326 return -EINVAL;
327
328 if (vma->vm_end - vma->vm_start > SZ_4K)
329 return -EINVAL;
330
331 if (is_cow_mapping(vma->vm_flags))
332 return -EINVAL;
333
334 if (vma->vm_flags & (VM_READ | VM_EXEC))
335 return -EINVAL;
336
337 vm_flags_clear(vma, VM_MAYREAD | VM_MAYEXEC);
338 vm_flags_set(vma, VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_IO);
339 vma->vm_ops = &vm_ops_barrier;
340 vma->vm_private_data = dev;
341 drm_dev_get(vma->vm_private_data);
342
343 return 0;
344 }
345
xe_mmap(struct file * filp,struct vm_area_struct * vma)346 static int xe_mmap(struct file *filp, struct vm_area_struct *vma)
347 {
348 struct drm_file *priv = filp->private_data;
349 struct drm_device *dev = priv->minor->dev;
350
351 if (drm_dev_is_unplugged(dev))
352 return -ENODEV;
353
354 switch (vma->vm_pgoff) {
355 case XE_PCI_BARRIER_MMAP_OFFSET >> XE_PTE_SHIFT:
356 return xe_pci_barrier_mmap(filp, vma);
357 }
358
359 return drm_gem_mmap(filp, vma);
360 }
361
362 static const struct file_operations xe_driver_fops = {
363 .owner = THIS_MODULE,
364 .open = drm_open,
365 .release = drm_release_noglobal,
366 .unlocked_ioctl = xe_drm_ioctl,
367 .mmap = xe_mmap,
368 .poll = drm_poll,
369 .read = drm_read,
370 .compat_ioctl = xe_drm_compat_ioctl,
371 .llseek = noop_llseek,
372 #ifdef CONFIG_PROC_FS
373 .show_fdinfo = drm_show_fdinfo,
374 #endif
375 .fop_flags = FOP_UNSIGNED_OFFSET,
376 };
377
378 /**
379 * xe_is_xe_file() - Is the file an xe device file?
380 * @file: The file.
381 *
382 * Checks whether the file is opened against
383 * an xe device.
384 *
385 * Return: %true if an xe file, %false if not.
386 */
xe_is_xe_file(const struct file * file)387 bool xe_is_xe_file(const struct file *file)
388 {
389 return file->f_op == &xe_driver_fops;
390 }
391
392 static struct drm_driver driver = {
393 .driver_features =
394 DRIVER_GEM |
395 DRIVER_RENDER | DRIVER_SYNCOBJ |
396 DRIVER_SYNCOBJ_TIMELINE | DRIVER_GEM_GPUVA,
397 .open = xe_file_open,
398 .postclose = xe_file_close,
399
400 .gem_prime_import = xe_gem_prime_import,
401
402 .dumb_create = xe_bo_dumb_create,
403 .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
404 #ifdef CONFIG_PROC_FS
405 .show_fdinfo = xe_drm_client_fdinfo,
406 #endif
407 .ioctls = xe_ioctls,
408 .num_ioctls = ARRAY_SIZE(xe_ioctls),
409 .fops = &xe_driver_fops,
410 .name = DRIVER_NAME,
411 .desc = DRIVER_DESC,
412 .major = DRIVER_MAJOR,
413 .minor = DRIVER_MINOR,
414 .patchlevel = DRIVER_PATCHLEVEL,
415 };
416
xe_device_destroy(struct drm_device * dev,void * dummy)417 static void xe_device_destroy(struct drm_device *dev, void *dummy)
418 {
419 struct xe_device *xe = to_xe_device(dev);
420
421 xe_bo_dev_fini(&xe->bo_device);
422
423 if (xe->preempt_fence_wq)
424 destroy_workqueue(xe->preempt_fence_wq);
425
426 if (xe->ordered_wq)
427 destroy_workqueue(xe->ordered_wq);
428
429 if (xe->unordered_wq)
430 destroy_workqueue(xe->unordered_wq);
431
432 if (xe->destroy_wq)
433 destroy_workqueue(xe->destroy_wq);
434
435 ttm_device_fini(&xe->ttm);
436 }
437
xe_device_create(struct pci_dev * pdev,const struct pci_device_id * ent)438 struct xe_device *xe_device_create(struct pci_dev *pdev,
439 const struct pci_device_id *ent)
440 {
441 struct xe_device *xe;
442 int err;
443
444 xe_display_driver_set_hooks(&driver);
445
446 err = aperture_remove_conflicting_pci_devices(pdev, driver.name);
447 if (err)
448 return ERR_PTR(err);
449
450 xe = devm_drm_dev_alloc(&pdev->dev, &driver, struct xe_device, drm);
451 if (IS_ERR(xe))
452 return xe;
453
454 err = ttm_device_init(&xe->ttm, &xe_ttm_funcs, xe->drm.dev,
455 xe->drm.anon_inode->i_mapping,
456 xe->drm.vma_offset_manager, 0);
457 if (WARN_ON(err))
458 return ERR_PTR(err);
459
460 xe_bo_dev_init(&xe->bo_device);
461 err = drmm_add_action_or_reset(&xe->drm, xe_device_destroy, NULL);
462 if (err)
463 return ERR_PTR(err);
464
465 err = xe_shrinker_create(xe);
466 if (err)
467 return ERR_PTR(err);
468
469 xe->info.devid = pdev->device;
470 xe->info.revid = pdev->revision;
471 xe->info.force_execlist = xe_modparam.force_execlist;
472 xe->atomic_svm_timeslice_ms = 5;
473 xe->min_run_period_lr_ms = 5;
474
475 err = xe_irq_init(xe);
476 if (err)
477 return ERR_PTR(err);
478
479 xe_validation_device_init(&xe->val);
480
481 init_waitqueue_head(&xe->ufence_wq);
482
483 init_rwsem(&xe->usm.lock);
484
485 err = xe_pagemap_shrinker_create(xe);
486 if (err)
487 return ERR_PTR(err);
488
489 xa_init_flags(&xe->usm.asid_to_vm, XA_FLAGS_ALLOC);
490
491 if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) {
492 /* Trigger a large asid and an early asid wrap. */
493 u32 asid;
494
495 BUILD_BUG_ON(XE_MAX_ASID < 2);
496 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, NULL,
497 XA_LIMIT(XE_MAX_ASID - 2, XE_MAX_ASID - 1),
498 &xe->usm.next_asid, GFP_KERNEL);
499 drm_WARN_ON(&xe->drm, err);
500 if (err >= 0)
501 xa_erase(&xe->usm.asid_to_vm, asid);
502 }
503
504 err = xe_bo_pinned_init(xe);
505 if (err)
506 return ERR_PTR(err);
507
508 xe->preempt_fence_wq = alloc_ordered_workqueue("xe-preempt-fence-wq",
509 WQ_MEM_RECLAIM);
510 xe->ordered_wq = alloc_ordered_workqueue("xe-ordered-wq", 0);
511 xe->unordered_wq = alloc_workqueue("xe-unordered-wq", WQ_PERCPU, 0);
512 xe->destroy_wq = alloc_workqueue("xe-destroy-wq", WQ_PERCPU, 0);
513 if (!xe->ordered_wq || !xe->unordered_wq ||
514 !xe->preempt_fence_wq || !xe->destroy_wq) {
515 /*
516 * Cleanup done in xe_device_destroy via
517 * drmm_add_action_or_reset register above
518 */
519 drm_err(&xe->drm, "Failed to allocate xe workqueues\n");
520 return ERR_PTR(-ENOMEM);
521 }
522
523 err = drmm_mutex_init(&xe->drm, &xe->pmt.lock);
524 if (err)
525 return ERR_PTR(err);
526
527 return xe;
528 }
529 ALLOW_ERROR_INJECTION(xe_device_create, ERRNO); /* See xe_pci_probe() */
530
xe_driver_flr_disabled(struct xe_device * xe)531 static bool xe_driver_flr_disabled(struct xe_device *xe)
532 {
533 if (IS_SRIOV_VF(xe))
534 return true;
535
536 if (xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL_PROTECTED) & DRIVERINT_FLR_DIS) {
537 drm_info(&xe->drm, "Driver-FLR disabled by BIOS\n");
538 return true;
539 }
540
541 return false;
542 }
543
544 /*
545 * The driver-initiated FLR is the highest level of reset that we can trigger
546 * from within the driver. It is different from the PCI FLR in that it doesn't
547 * fully reset the SGUnit and doesn't modify the PCI config space and therefore
548 * it doesn't require a re-enumeration of the PCI BARs. However, the
549 * driver-initiated FLR does still cause a reset of both GT and display and a
550 * memory wipe of local and stolen memory, so recovery would require a full HW
551 * re-init and saving/restoring (or re-populating) the wiped memory. Since we
552 * perform the FLR as the very last action before releasing access to the HW
553 * during the driver release flow, we don't attempt recovery at all, because
554 * if/when a new instance of Xe is bound to the device it will do a full
555 * re-init anyway.
556 */
__xe_driver_flr(struct xe_device * xe)557 static void __xe_driver_flr(struct xe_device *xe)
558 {
559 const unsigned int flr_timeout = 3 * USEC_PER_SEC; /* specs recommend a 3s wait */
560 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
561 int ret;
562
563 drm_dbg(&xe->drm, "Triggering Driver-FLR\n");
564
565 /*
566 * Make sure any pending FLR requests have cleared by waiting for the
567 * FLR trigger bit to go to zero. Also clear GU_DEBUG's DRIVERFLR_STATUS
568 * to make sure it's not still set from a prior attempt (it's a write to
569 * clear bit).
570 * Note that we should never be in a situation where a previous attempt
571 * is still pending (unless the HW is totally dead), but better to be
572 * safe in case something unexpected happens
573 */
574 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
575 if (ret) {
576 drm_err(&xe->drm, "Driver-FLR-prepare wait for ready failed! %d\n", ret);
577 return;
578 }
579 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
580
581 /* Trigger the actual Driver-FLR */
582 xe_mmio_rmw32(mmio, GU_CNTL, 0, DRIVERFLR);
583
584 /* Wait for hardware teardown to complete */
585 ret = xe_mmio_wait32(mmio, GU_CNTL, DRIVERFLR, 0, flr_timeout, NULL, false);
586 if (ret) {
587 drm_err(&xe->drm, "Driver-FLR-teardown wait completion failed! %d\n", ret);
588 return;
589 }
590
591 /* Wait for hardware/firmware re-init to complete */
592 ret = xe_mmio_wait32(mmio, GU_DEBUG, DRIVERFLR_STATUS, DRIVERFLR_STATUS,
593 flr_timeout, NULL, false);
594 if (ret) {
595 drm_err(&xe->drm, "Driver-FLR-reinit wait completion failed! %d\n", ret);
596 return;
597 }
598
599 /* Clear sticky completion status */
600 xe_mmio_write32(mmio, GU_DEBUG, DRIVERFLR_STATUS);
601 }
602
xe_driver_flr(struct xe_device * xe)603 static void xe_driver_flr(struct xe_device *xe)
604 {
605 if (xe_driver_flr_disabled(xe))
606 return;
607
608 __xe_driver_flr(xe);
609 }
610
xe_driver_flr_fini(void * arg)611 static void xe_driver_flr_fini(void *arg)
612 {
613 struct xe_device *xe = arg;
614
615 if (xe->needs_flr_on_fini)
616 xe_driver_flr(xe);
617 }
618
xe_device_sanitize(void * arg)619 static void xe_device_sanitize(void *arg)
620 {
621 struct xe_device *xe = arg;
622 struct xe_gt *gt;
623 u8 id;
624
625 for_each_gt(gt, xe, id)
626 xe_gt_sanitize(gt);
627 }
628
xe_set_dma_info(struct xe_device * xe)629 static int xe_set_dma_info(struct xe_device *xe)
630 {
631 unsigned int mask_size = xe->info.dma_mask_size;
632 int err;
633
634 dma_set_max_seg_size(xe->drm.dev, xe_sg_segment_size(xe->drm.dev));
635
636 err = dma_set_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
637 if (err)
638 goto mask_err;
639
640 err = dma_set_coherent_mask(xe->drm.dev, DMA_BIT_MASK(mask_size));
641 if (err)
642 goto mask_err;
643
644 return 0;
645
646 mask_err:
647 drm_err(&xe->drm, "Can't set DMA mask/consistent mask (%d)\n", err);
648 return err;
649 }
650
assert_lmem_ready(struct xe_device * xe)651 static void assert_lmem_ready(struct xe_device *xe)
652 {
653 if (!IS_DGFX(xe) || IS_SRIOV_VF(xe))
654 return;
655
656 xe_assert(xe, xe_mmio_read32(xe_root_tile_mmio(xe), GU_CNTL) &
657 LMEM_INIT);
658 }
659
vf_update_device_info(struct xe_device * xe)660 static void vf_update_device_info(struct xe_device *xe)
661 {
662 xe_assert(xe, IS_SRIOV_VF(xe));
663 /* disable features that are not available/applicable to VFs */
664 xe->info.probe_display = 0;
665 xe->info.has_heci_cscfi = 0;
666 xe->info.has_heci_gscfi = 0;
667 xe->info.has_late_bind = 0;
668 xe->info.skip_guc_pc = 1;
669 xe->info.skip_pcode = 1;
670 }
671
xe_device_vram_alloc(struct xe_device * xe)672 static int xe_device_vram_alloc(struct xe_device *xe)
673 {
674 struct xe_vram_region *vram;
675
676 if (!IS_DGFX(xe))
677 return 0;
678
679 vram = drmm_kzalloc(&xe->drm, sizeof(*vram), GFP_KERNEL);
680 if (!vram)
681 return -ENOMEM;
682
683 xe->mem.vram = vram;
684 return 0;
685 }
686
687 /**
688 * xe_device_probe_early: Device early probe
689 * @xe: xe device instance
690 *
691 * Initialize MMIO resources that don't require any
692 * knowledge about tile count. Also initialize pcode and
693 * check vram initialization on root tile.
694 *
695 * Return: 0 on success, error code on failure
696 */
xe_device_probe_early(struct xe_device * xe)697 int xe_device_probe_early(struct xe_device *xe)
698 {
699 int err;
700
701 xe_wa_device_init(xe);
702 xe_wa_process_device_oob(xe);
703
704 err = xe_mmio_probe_early(xe);
705 if (err)
706 return err;
707
708 xe_sriov_probe_early(xe);
709
710 if (IS_SRIOV_VF(xe))
711 vf_update_device_info(xe);
712
713 /*
714 * Check for pcode uncore_init status to confirm if the SoC
715 * initialization is complete. Until done, any MMIO or lmem access from
716 * the driver will be blocked
717 */
718 err = xe_pcode_probe_early(xe);
719 if (err || xe_survivability_mode_is_requested(xe)) {
720 int save_err = err;
721
722 /*
723 * Try to leave device in survivability mode if device is
724 * possible, but still return the previous error for error
725 * propagation
726 */
727 err = xe_survivability_mode_boot_enable(xe);
728 if (err)
729 return err;
730
731 return save_err;
732 }
733
734 /*
735 * Make sure the lmem is initialized and ready to use. xe_pcode_ready()
736 * is flagged after full initialization is complete. Assert if lmem is
737 * not initialized.
738 */
739 assert_lmem_ready(xe);
740
741 xe->wedged.mode = xe_device_validate_wedged_mode(xe, xe_modparam.wedged_mode) ?
742 XE_DEFAULT_WEDGED_MODE : xe_modparam.wedged_mode;
743 drm_dbg(&xe->drm, "wedged_mode: setting mode (%u) %s\n",
744 xe->wedged.mode, xe_wedged_mode_to_string(xe->wedged.mode));
745
746 err = xe_device_vram_alloc(xe);
747 if (err)
748 return err;
749
750 return 0;
751 }
752 ALLOW_ERROR_INJECTION(xe_device_probe_early, ERRNO); /* See xe_pci_probe() */
753
probe_has_flat_ccs(struct xe_device * xe)754 static int probe_has_flat_ccs(struct xe_device *xe)
755 {
756 struct xe_gt *gt;
757 u32 reg;
758
759 /* Always enabled/disabled, no runtime check to do */
760 if (GRAPHICS_VER(xe) < 20 || !xe->info.has_flat_ccs || IS_SRIOV_VF(xe))
761 return 0;
762
763 gt = xe_root_mmio_gt(xe);
764 if (!gt)
765 return 0;
766
767 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
768 if (!fw_ref.domains)
769 return -ETIMEDOUT;
770
771 reg = xe_gt_mcr_unicast_read_any(gt, XE2_FLAT_CCS_BASE_RANGE_LOWER);
772 xe->info.has_flat_ccs = (reg & XE2_FLAT_CCS_ENABLE);
773
774 if (!xe->info.has_flat_ccs)
775 drm_dbg(&xe->drm,
776 "Flat CCS has been disabled in bios, May lead to performance impact");
777
778 return 0;
779 }
780
781 /*
782 * Detect if the driver is being run on pre-production hardware. We don't
783 * keep workarounds for pre-production hardware long term, so print an
784 * error and add taint if we're being loaded on a pre-production platform
785 * for which the pre-prod workarounds have already been removed.
786 *
787 * The general policy is that we'll remove any workarounds that only apply to
788 * pre-production hardware around the time force_probe restrictions are lifted
789 * for a platform of the next major IP generation (for example, Xe2 pre-prod
790 * workarounds should be removed around the time the first Xe3 platforms have
791 * force_probe lifted).
792 */
detect_preproduction_hw(struct xe_device * xe)793 static void detect_preproduction_hw(struct xe_device *xe)
794 {
795 struct xe_gt *gt;
796 int id;
797
798 /*
799 * SR-IOV VFs don't have access to the FUSE2 register, so we can't
800 * check pre-production status there. But the host OS will notice
801 * and report the pre-production status, which should be enough to
802 * help us catch mistaken use of pre-production hardware.
803 */
804 if (IS_SRIOV_VF(xe))
805 return;
806
807 /*
808 * The "SW_CAP" fuse contains a bit indicating whether the device is a
809 * production or pre-production device. This fuse is reflected through
810 * the GT "FUSE2" register, even though the contents of the fuse are
811 * not GT-specific. Every GT's reflection of this fuse should show the
812 * same value, so we'll just use the first available GT for lookup.
813 */
814 for_each_gt(gt, xe, id)
815 break;
816
817 if (!gt)
818 return;
819
820 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
821 if (!xe_force_wake_ref_has_domain(fw_ref.domains, XE_FW_GT)) {
822 xe_gt_err(gt, "Forcewake failure; cannot determine production/pre-production hw status.\n");
823 return;
824 }
825
826 if (xe_mmio_read32(>->mmio, FUSE2) & PRODUCTION_HW)
827 return;
828
829 xe_info(xe, "Pre-production hardware detected.\n");
830 if (!xe->info.has_pre_prod_wa) {
831 xe_err(xe, "Pre-production workarounds for this platform have already been removed.\n");
832 add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
833 }
834 }
835
xe_device_wedged_fini(struct drm_device * drm,void * arg)836 static void xe_device_wedged_fini(struct drm_device *drm, void *arg)
837 {
838 struct xe_device *xe = arg;
839
840 if (atomic_read(&xe->wedged.flag))
841 xe_pm_runtime_put(xe);
842 }
843
xe_device_probe(struct xe_device * xe)844 int xe_device_probe(struct xe_device *xe)
845 {
846 struct xe_tile *tile;
847 struct xe_gt *gt;
848 int err;
849 u8 id;
850
851 xe_pat_init_early(xe);
852
853 err = xe_sriov_init(xe);
854 if (err)
855 return err;
856
857 xe->info.mem_region_mask = 1;
858
859 err = xe_set_dma_info(xe);
860 if (err)
861 return err;
862
863 err = xe_mmio_probe_tiles(xe);
864 if (err)
865 return err;
866
867 for_each_gt(gt, xe, id) {
868 err = xe_gt_init_early(gt);
869 if (err)
870 return err;
871 }
872
873 for_each_tile(tile, xe, id) {
874 err = xe_ggtt_init_early(tile->mem.ggtt);
875 if (err)
876 return err;
877 }
878
879 /*
880 * From here on, if a step fails, make sure a Driver-FLR is triggereed
881 */
882 err = devm_add_action_or_reset(xe->drm.dev, xe_driver_flr_fini, xe);
883 if (err)
884 return err;
885
886 err = probe_has_flat_ccs(xe);
887 if (err)
888 return err;
889
890 err = xe_vram_probe(xe);
891 if (err)
892 return err;
893
894 for_each_tile(tile, xe, id) {
895 err = xe_tile_init_noalloc(tile);
896 if (err)
897 return err;
898 }
899
900 /*
901 * Allow allocations only now to ensure xe_display_init_early()
902 * is the first to allocate, always.
903 */
904 err = xe_ttm_sys_mgr_init(xe);
905 if (err)
906 return err;
907
908 /* Allocate and map stolen after potential VRAM resize */
909 err = xe_ttm_stolen_mgr_init(xe);
910 if (err)
911 return err;
912
913 /*
914 * Now that GT is initialized (TTM in particular),
915 * we can try to init display, and inherit the initial fb.
916 * This is the reason the first allocation needs to be done
917 * inside display.
918 */
919 err = xe_display_init_early(xe);
920 if (err)
921 return err;
922
923 for_each_tile(tile, xe, id) {
924 err = xe_tile_init(tile);
925 if (err)
926 return err;
927 }
928
929 err = xe_irq_install(xe);
930 if (err)
931 return err;
932
933 for_each_gt(gt, xe, id) {
934 err = xe_gt_init(gt);
935 if (err)
936 return err;
937 }
938
939 err = xe_pagefault_init(xe);
940 if (err)
941 return err;
942
943 if (xe->tiles->media_gt &&
944 XE_GT_WA(xe->tiles->media_gt, 15015404425_disable))
945 XE_DEVICE_WA_DISABLE(xe, 15015404425);
946
947 err = xe_devcoredump_init(xe);
948 if (err)
949 return err;
950
951 xe_nvm_init(xe);
952
953 err = xe_soc_remapper_init(xe);
954 if (err)
955 return err;
956
957 err = xe_heci_gsc_init(xe);
958 if (err)
959 return err;
960
961 err = xe_late_bind_init(&xe->late_bind);
962 if (err)
963 return err;
964
965 err = xe_oa_init(xe);
966 if (err)
967 return err;
968
969 err = xe_display_init(xe);
970 if (err)
971 return err;
972
973 err = xe_pxp_init(xe);
974 if (err)
975 return err;
976
977 err = xe_psmi_init(xe);
978 if (err)
979 return err;
980
981 err = drm_dev_register(&xe->drm, 0);
982 if (err)
983 return err;
984
985 xe_display_register(xe);
986
987 err = xe_oa_register(xe);
988 if (err)
989 goto err_unregister_display;
990
991 err = xe_pmu_register(&xe->pmu);
992 if (err)
993 goto err_unregister_display;
994
995 err = xe_device_sysfs_init(xe);
996 if (err)
997 goto err_unregister_display;
998
999 xe_debugfs_register(xe);
1000
1001 err = xe_hwmon_register(xe);
1002 if (err)
1003 goto err_unregister_display;
1004
1005 err = xe_i2c_probe(xe);
1006 if (err)
1007 goto err_unregister_display;
1008
1009 for_each_gt(gt, xe, id)
1010 xe_gt_sanitize_freq(gt);
1011
1012 xe_vsec_init(xe);
1013
1014 err = xe_sriov_init_late(xe);
1015 if (err)
1016 goto err_unregister_display;
1017
1018 detect_preproduction_hw(xe);
1019
1020 err = drmm_add_action_or_reset(&xe->drm, xe_device_wedged_fini, xe);
1021 if (err)
1022 goto err_unregister_display;
1023
1024 return devm_add_action_or_reset(xe->drm.dev, xe_device_sanitize, xe);
1025
1026 err_unregister_display:
1027 xe_display_unregister(xe);
1028 drm_dev_unregister(&xe->drm);
1029
1030 return err;
1031 }
1032
xe_device_remove(struct xe_device * xe)1033 void xe_device_remove(struct xe_device *xe)
1034 {
1035 xe_display_unregister(xe);
1036
1037 drm_dev_unplug(&xe->drm);
1038
1039 xe_bo_pci_dev_remove_all(xe);
1040 }
1041
xe_device_shutdown(struct xe_device * xe)1042 void xe_device_shutdown(struct xe_device *xe)
1043 {
1044 struct xe_gt *gt;
1045 u8 id;
1046
1047 drm_dbg(&xe->drm, "Shutting down device\n");
1048
1049 xe_display_pm_shutdown(xe);
1050
1051 xe_irq_suspend(xe);
1052
1053 for_each_gt(gt, xe, id)
1054 xe_gt_shutdown(gt);
1055
1056 xe_display_pm_shutdown_late(xe);
1057
1058 if (!xe_driver_flr_disabled(xe)) {
1059 /* BOOM! */
1060 __xe_driver_flr(xe);
1061 }
1062 }
1063
1064 /**
1065 * xe_device_wmb() - Device specific write memory barrier
1066 * @xe: the &xe_device
1067 *
1068 * While wmb() is sufficient for a barrier if we use system memory, on discrete
1069 * platforms with device memory we additionally need to issue a register write.
1070 * Since it doesn't matter which register we write to, use the read-only VF_CAP
1071 * register that is also marked as accessible by the VFs.
1072 */
xe_device_wmb(struct xe_device * xe)1073 void xe_device_wmb(struct xe_device *xe)
1074 {
1075 wmb();
1076 if (IS_DGFX(xe))
1077 xe_mmio_write32(xe_root_tile_mmio(xe), VF_CAP_REG, 0);
1078 }
1079
1080 /*
1081 * Issue a TRANSIENT_FLUSH_REQUEST and wait for completion on each gt.
1082 */
tdf_request_sync(struct xe_device * xe)1083 static void tdf_request_sync(struct xe_device *xe)
1084 {
1085 struct xe_gt *gt;
1086 u8 id;
1087
1088 for_each_gt_with_type(gt, xe, id, BIT(XE_GT_TYPE_MAIN)) {
1089 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1090 if (!fw_ref.domains)
1091 return;
1092
1093 xe_mmio_write32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST);
1094
1095 /*
1096 * FIXME: We can likely do better here with our choice of
1097 * timeout. Currently we just assume the worst case, i.e. 150us,
1098 * which is believed to be sufficient to cover the worst case
1099 * scenario on current platforms if all cache entries are
1100 * transient and need to be flushed..
1101 */
1102 if (xe_mmio_wait32(>->mmio, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0,
1103 300, NULL, false))
1104 xe_gt_err_once(gt, "TD flush timeout\n");
1105 }
1106 }
1107
1108 /**
1109 * xe_device_is_l2_flush_optimized - if L2 flush is optimized by HW
1110 * @xe: The device to check.
1111 *
1112 * Return: true if the HW device optimizing L2 flush, false otherwise.
1113 */
xe_device_is_l2_flush_optimized(struct xe_device * xe)1114 bool xe_device_is_l2_flush_optimized(struct xe_device *xe)
1115 {
1116 /* XA is *always* flushed, like at the end-of-submssion (and maybe other
1117 * places), just that internally as an optimisation hw doesn't need to make
1118 * that a full flush (which will also include XA) when Media is
1119 * off/powergated, since it doesn't need to worry about GT caches vs Media
1120 * coherency, and only CPU vs GPU coherency, so can make that flush a
1121 * targeted XA flush, since stuff tagged with XA now means it's shared with
1122 * the CPU. The main implication is that we now need to somehow flush non-XA before
1123 * freeing system memory pages, otherwise dirty cachelines could be flushed after the free
1124 * (like if Media suddenly turns on and does a full flush)
1125 */
1126 if (GRAPHICS_VER(xe) >= 35 && !IS_DGFX(xe))
1127 return true;
1128 return false;
1129 }
1130
xe_device_l2_flush(struct xe_device * xe)1131 void xe_device_l2_flush(struct xe_device *xe)
1132 {
1133 struct xe_gt *gt;
1134
1135 gt = xe_root_mmio_gt(xe);
1136 if (!gt)
1137 return;
1138
1139 if (!XE_GT_WA(gt, 16023588340))
1140 return;
1141
1142 CLASS(xe_force_wake, fw_ref)(gt_to_fw(gt), XE_FW_GT);
1143 if (!fw_ref.domains)
1144 return;
1145
1146 spin_lock(>->global_invl_lock);
1147
1148 xe_mmio_write32(>->mmio, XE2_GLOBAL_INVAL, 0x1);
1149 if (xe_mmio_wait32(>->mmio, XE2_GLOBAL_INVAL, 0x1, 0x0, 1000, NULL, true))
1150 xe_gt_err_once(gt, "Global invalidation timeout\n");
1151
1152 spin_unlock(>->global_invl_lock);
1153 }
1154
1155 /**
1156 * xe_device_td_flush() - Flush transient L3 cache entries
1157 * @xe: The device
1158 *
1159 * Display engine has direct access to memory and is never coherent with L3/L4
1160 * caches (or CPU caches), however KMD is responsible for specifically flushing
1161 * transient L3 GPU cache entries prior to the flip sequence to ensure scanout
1162 * can happen from such a surface without seeing corruption.
1163 *
1164 * Display surfaces can be tagged as transient by mapping it using one of the
1165 * various L3:XD PAT index modes on Xe2.
1166 *
1167 * Note: On non-discrete xe2 platforms, like LNL, the entire L3 cache is flushed
1168 * at the end of each submission via PIPE_CONTROL for compute/render, since SA
1169 * Media is not coherent with L3 and we want to support render-vs-media
1170 * usescases. For other engines like copy/blt the HW internally forces uncached
1171 * behaviour, hence why we can skip the TDF on such platforms.
1172 */
xe_device_td_flush(struct xe_device * xe)1173 void xe_device_td_flush(struct xe_device *xe)
1174 {
1175 struct xe_gt *root_gt;
1176
1177 /*
1178 * From Xe3p onward the HW takes care of flush of TD entries also along
1179 * with flushing XA entries, which will be at the usual sync points,
1180 * like at the end of submission, so no manual flush is needed here.
1181 */
1182 if (GRAPHICS_VER(xe) >= 35)
1183 return;
1184
1185 if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20)
1186 return;
1187
1188 root_gt = xe_root_mmio_gt(xe);
1189 if (!root_gt)
1190 return;
1191
1192 if (XE_GT_WA(root_gt, 16023588340)) {
1193 /* A transient flush is not sufficient: flush the L2 */
1194 xe_device_l2_flush(xe);
1195 } else {
1196 xe_guc_pc_apply_flush_freq_limit(&root_gt->uc.guc.pc);
1197 tdf_request_sync(xe);
1198 xe_guc_pc_remove_flush_freq_limit(&root_gt->uc.guc.pc);
1199 }
1200 }
1201
xe_device_ccs_bytes(struct xe_device * xe,u64 size)1202 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
1203 {
1204 return xe_device_has_flat_ccs(xe) ?
1205 DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
1206 }
1207
1208 /**
1209 * xe_device_assert_mem_access - Inspect the current runtime_pm state.
1210 * @xe: xe device instance
1211 *
1212 * To be used before any kind of memory access. It will splat a debug warning
1213 * if the device is currently sleeping. But it doesn't guarantee in any way
1214 * that the device is going to remain awake. Xe PM runtime get and put
1215 * functions might be added to the outer bound of the memory access, while
1216 * this check is intended for inner usage to splat some warning if the worst
1217 * case has just happened.
1218 */
xe_device_assert_mem_access(struct xe_device * xe)1219 void xe_device_assert_mem_access(struct xe_device *xe)
1220 {
1221 xe_assert(xe, !xe_pm_runtime_suspended(xe));
1222 }
1223
xe_device_snapshot_print(struct xe_device * xe,struct drm_printer * p)1224 void xe_device_snapshot_print(struct xe_device *xe, struct drm_printer *p)
1225 {
1226 struct xe_gt *gt;
1227 u8 id;
1228
1229 drm_printf(p, "PCI ID: 0x%04x\n", xe->info.devid);
1230 drm_printf(p, "PCI revision: 0x%02x\n", xe->info.revid);
1231
1232 for_each_gt(gt, xe, id) {
1233 drm_printf(p, "GT id: %u\n", id);
1234 drm_printf(p, "\tTile: %u\n", gt->tile->id);
1235 drm_printf(p, "\tType: %s\n",
1236 gt->info.type == XE_GT_TYPE_MAIN ? "main" : "media");
1237 drm_printf(p, "\tIP ver: %u.%u.%u\n",
1238 REG_FIELD_GET(GMD_ID_ARCH_MASK, gt->info.gmdid),
1239 REG_FIELD_GET(GMD_ID_RELEASE_MASK, gt->info.gmdid),
1240 REG_FIELD_GET(GMD_ID_REVID, gt->info.gmdid));
1241 drm_printf(p, "\tCS reference clock: %u\n", gt->info.reference_clock);
1242 }
1243 }
1244
xe_device_canonicalize_addr(struct xe_device * xe,u64 address)1245 u64 xe_device_canonicalize_addr(struct xe_device *xe, u64 address)
1246 {
1247 return sign_extend64(address, xe->info.va_bits - 1);
1248 }
1249
xe_device_uncanonicalize_addr(struct xe_device * xe,u64 address)1250 u64 xe_device_uncanonicalize_addr(struct xe_device *xe, u64 address)
1251 {
1252 return address & GENMASK_ULL(xe->info.va_bits - 1, 0);
1253 }
1254
1255 /**
1256 * DOC: Xe Device Wedging
1257 *
1258 * Xe driver uses drm device wedged uevent as documented in Documentation/gpu/drm-uapi.rst.
1259 * When device is in wedged state, every IOCTL will be blocked and GT cannot
1260 * be used. The conditions under which the driver declares the device wedged
1261 * depend on the wedged mode configuration (see &enum xe_wedged_mode). The
1262 * default recovery method for a wedged state is rebind/bus-reset.
1263 *
1264 * Another recovery method is vendor-specific. Below are the cases that send
1265 * ``WEDGED=vendor-specific`` recovery method in drm device wedged uevent.
1266 *
1267 * Case: Firmware Flash
1268 * --------------------
1269 *
1270 * Identification Hint
1271 * +++++++++++++++++++
1272 *
1273 * ``WEDGED=vendor-specific`` drm device wedged uevent with
1274 * :ref:`Runtime Survivability mode <xe-survivability-mode>` is used to notify
1275 * admin/userspace consumer about the need for a firmware flash.
1276 *
1277 * Recovery Procedure
1278 * ++++++++++++++++++
1279 *
1280 * Once ``WEDGED=vendor-specific`` drm device wedged uevent is received, follow
1281 * the below steps
1282 *
1283 * - Check Runtime Survivability mode sysfs.
1284 * If enabled, firmware flash is required to recover the device.
1285 *
1286 * /sys/bus/pci/devices/<device>/survivability_mode
1287 *
1288 * - Admin/userspace consumer can use firmware flashing tools like fwupd to flash
1289 * firmware and restore device to normal operation.
1290 */
1291
1292 /**
1293 * xe_device_set_wedged_method - Set wedged recovery method
1294 * @xe: xe device instance
1295 * @method: recovery method to set
1296 *
1297 * Set wedged recovery method to be sent in drm wedged uevent.
1298 */
xe_device_set_wedged_method(struct xe_device * xe,unsigned long method)1299 void xe_device_set_wedged_method(struct xe_device *xe, unsigned long method)
1300 {
1301 xe->wedged.method = method;
1302 }
1303
1304 /**
1305 * xe_device_declare_wedged - Declare device wedged
1306 * @xe: xe device instance
1307 *
1308 * This is a final state that can only be cleared with the recovery method
1309 * specified in the drm wedged uevent. The method can be set using
1310 * xe_device_set_wedged_method before declaring the device as wedged. If no method
1311 * is set, reprobe (unbind/re-bind) will be sent by default.
1312 *
1313 * In this state every IOCTL will be blocked so the GT cannot be used.
1314 * In general it will be called upon any critical error such as gt reset
1315 * failure or guc loading failure. Userspace will be notified of this state
1316 * through device wedged uevent.
1317 * If xe.wedged module parameter is set to 2, this function will be called
1318 * on every single execution timeout (a.k.a. GPU hang) right after devcoredump
1319 * snapshot capture. In this mode, GT reset won't be attempted so the state of
1320 * the issue is preserved for further debugging.
1321 */
xe_device_declare_wedged(struct xe_device * xe)1322 void xe_device_declare_wedged(struct xe_device *xe)
1323 {
1324 struct xe_gt *gt;
1325 u8 id;
1326
1327 if (xe->wedged.mode == XE_WEDGED_MODE_NEVER) {
1328 drm_dbg(&xe->drm, "Wedged mode is forcibly disabled\n");
1329 return;
1330 }
1331
1332 if (!atomic_xchg(&xe->wedged.flag, 1)) {
1333 xe->needs_flr_on_fini = true;
1334 xe_pm_runtime_get_noresume(xe);
1335 drm_err(&xe->drm,
1336 "CRITICAL: Xe has declared device %s as wedged.\n"
1337 "IOCTLs and executions are blocked.\n"
1338 "For recovery procedure, refer to https://docs.kernel.org/gpu/drm-uapi.html#device-wedging\n"
1339 "Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/xe/kernel/issues/new\n",
1340 dev_name(xe->drm.dev));
1341 }
1342
1343 for_each_gt(gt, xe, id)
1344 xe_gt_declare_wedged(gt);
1345
1346 if (xe_device_wedged(xe)) {
1347 /*
1348 * XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET is intended for debugging
1349 * hangs, so wedge the device with 'none' recovery method and have
1350 * it available to the user for debugging.
1351 */
1352 if (xe->wedged.mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET)
1353 xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_NONE);
1354 /* If no wedge recovery method is set, use default */
1355 else if (!xe->wedged.method)
1356 xe_device_set_wedged_method(xe, DRM_WEDGE_RECOVERY_REBIND |
1357 DRM_WEDGE_RECOVERY_BUS_RESET);
1358
1359 /* Notify userspace of wedged device */
1360 drm_dev_wedged_event(&xe->drm, xe->wedged.method, NULL);
1361 }
1362 }
1363
1364 /**
1365 * xe_device_validate_wedged_mode - Check if given mode is supported
1366 * @xe: the &xe_device
1367 * @mode: requested mode to validate
1368 *
1369 * Check whether the provided wedged mode is supported.
1370 *
1371 * Return: 0 if mode is supported, error code otherwise.
1372 */
xe_device_validate_wedged_mode(struct xe_device * xe,unsigned int mode)1373 int xe_device_validate_wedged_mode(struct xe_device *xe, unsigned int mode)
1374 {
1375 if (mode > XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET) {
1376 drm_dbg(&xe->drm, "wedged_mode: invalid value (%u)\n", mode);
1377 return -EINVAL;
1378 } else if (mode == XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET && (IS_SRIOV_VF(xe) ||
1379 (IS_SRIOV_PF(xe) && !IS_ENABLED(CONFIG_DRM_XE_DEBUG)))) {
1380 drm_dbg(&xe->drm, "wedged_mode: (%u) %s mode is not supported for %s\n",
1381 mode, xe_wedged_mode_to_string(mode),
1382 xe_sriov_mode_to_string(xe_device_sriov_mode(xe)));
1383 return -EPERM;
1384 }
1385
1386 return 0;
1387 }
1388
1389 /**
1390 * xe_wedged_mode_to_string - Convert enum value to string.
1391 * @mode: the &xe_wedged_mode to convert
1392 *
1393 * Returns: wedged mode as a user friendly string.
1394 */
xe_wedged_mode_to_string(enum xe_wedged_mode mode)1395 const char *xe_wedged_mode_to_string(enum xe_wedged_mode mode)
1396 {
1397 switch (mode) {
1398 case XE_WEDGED_MODE_NEVER:
1399 return "never";
1400 case XE_WEDGED_MODE_UPON_CRITICAL_ERROR:
1401 return "upon-critical-error";
1402 case XE_WEDGED_MODE_UPON_ANY_HANG_NO_RESET:
1403 return "upon-any-hang-no-reset";
1404 default:
1405 return "<invalid>";
1406 }
1407 }
1408
1409 /**
1410 * xe_device_asid_to_vm() - Find VM from ASID
1411 * @xe: the &xe_device
1412 * @asid: Address space ID
1413 *
1414 * Find a VM from ASID and take a reference to VM which caller must drop.
1415 * Reclaim safe.
1416 *
1417 * Return: VM on success, ERR_PTR on failure
1418 */
xe_device_asid_to_vm(struct xe_device * xe,u32 asid)1419 struct xe_vm *xe_device_asid_to_vm(struct xe_device *xe, u32 asid)
1420 {
1421 struct xe_vm *vm;
1422
1423 down_read(&xe->usm.lock);
1424 vm = xa_load(&xe->usm.asid_to_vm, asid);
1425 if (vm)
1426 xe_vm_get(vm);
1427 else
1428 vm = ERR_PTR(-EINVAL);
1429 up_read(&xe->usm.lock);
1430
1431 return vm;
1432 }
1433