ivpu_drv.c (60a2f25de7b8b785baee2932db932ae9a5b8c86d) ivpu_drv.c (8a27ad81f7d3a4bc30e00e334a369b69c5f8da90)
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/pci.h>

--- 12 unchanged lines hidden (view full) ---

21#include "ivpu_fw_log.h"
22#include "ivpu_gem.h"
23#include "ivpu_hw.h"
24#include "ivpu_ipc.h"
25#include "ivpu_job.h"
26#include "ivpu_jsm_msg.h"
27#include "ivpu_mmu.h"
28#include "ivpu_mmu_context.h"
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2020-2024 Intel Corporation
4 */
5
6#include <linux/firmware.h>
7#include <linux/module.h>
8#include <linux/pci.h>

--- 12 unchanged lines hidden (view full) ---

21#include "ivpu_fw_log.h"
22#include "ivpu_gem.h"
23#include "ivpu_hw.h"
24#include "ivpu_ipc.h"
25#include "ivpu_job.h"
26#include "ivpu_jsm_msg.h"
27#include "ivpu_mmu.h"
28#include "ivpu_mmu_context.h"
29#include "ivpu_ms.h"
29#include "ivpu_pm.h"
30#include "ivpu_pm.h"
31#include "ivpu_sysfs.h"
30
31#ifndef DRIVER_VERSION_STR
32#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
33 __stringify(DRM_IVPU_DRIVER_MINOR) "."
34#endif
35
36static struct lock_class_key submitted_jobs_xa_lock_class_key;
37

--- 8 unchanged lines hidden (view full) ---

46u8 ivpu_pll_min_ratio;
47module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
48MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
49
50u8 ivpu_pll_max_ratio = U8_MAX;
51module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
52MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
53
32
33#ifndef DRIVER_VERSION_STR
34#define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
35 __stringify(DRM_IVPU_DRIVER_MINOR) "."
36#endif
37
38static struct lock_class_key submitted_jobs_xa_lock_class_key;
39

--- 8 unchanged lines hidden (view full) ---

48u8 ivpu_pll_min_ratio;
49module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
50MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
51
52u8 ivpu_pll_max_ratio = U8_MAX;
53module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
54MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
55
56int ivpu_sched_mode;
57module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
58MODULE_PARM_DESC(sched_mode, "Scheduler mode: 0 - Default scheduler, 1 - Force HW scheduler");
59
54bool ivpu_disable_mmu_cont_pages;
55module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
56MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
57
60bool ivpu_disable_mmu_cont_pages;
61module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
62MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
63
64bool ivpu_force_snoop;
65module_param_named(force_snoop, ivpu_force_snoop, bool, 0644);
66MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
67
58struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
59{
60 struct ivpu_device *vdev = file_priv->vdev;
61
62 kref_get(&file_priv->ref);
63
64 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
65 file_priv->ctx.id, kref_read(&file_priv->ref));
66
67 return file_priv;
68}
69
70static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
71{
72 mutex_lock(&file_priv->lock);
73 if (file_priv->bound) {
74 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
75
76 ivpu_cmdq_release_all_locked(file_priv);
68struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
69{
70 struct ivpu_device *vdev = file_priv->vdev;
71
72 kref_get(&file_priv->ref);
73
74 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
75 file_priv->ctx.id, kref_read(&file_priv->ref));
76
77 return file_priv;
78}
79
80static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
81{
82 mutex_lock(&file_priv->lock);
83 if (file_priv->bound) {
84 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
85
86 ivpu_cmdq_release_all_locked(file_priv);
77 ivpu_jsm_context_release(vdev, file_priv->ctx.id);
78 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
79 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
80 file_priv->bound = false;
81 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
82 }
83 mutex_unlock(&file_priv->lock);
84}
85

--- 6 unchanged lines hidden (view full) ---

92 file_priv->ctx.id, (bool)file_priv->bound);
93
94 pm_runtime_get_sync(vdev->drm.dev);
95 mutex_lock(&vdev->context_list_lock);
96 file_priv_unbind(vdev, file_priv);
97 mutex_unlock(&vdev->context_list_lock);
98 pm_runtime_put_autosuspend(vdev->drm.dev);
99
87 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
88 ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
89 file_priv->bound = false;
90 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
91 }
92 mutex_unlock(&file_priv->lock);
93}
94

--- 6 unchanged lines hidden (view full) ---

101 file_priv->ctx.id, (bool)file_priv->bound);
102
103 pm_runtime_get_sync(vdev->drm.dev);
104 mutex_lock(&vdev->context_list_lock);
105 file_priv_unbind(vdev, file_priv);
106 mutex_unlock(&vdev->context_list_lock);
107 pm_runtime_put_autosuspend(vdev->drm.dev);
108
109 mutex_destroy(&file_priv->ms_lock);
100 mutex_destroy(&file_priv->lock);
101 kfree(file_priv);
102}
103
104void ivpu_file_priv_put(struct ivpu_file_priv **link)
105{
106 struct ivpu_file_priv *file_priv = *link;
107 struct ivpu_device *vdev = file_priv->vdev;

--- 6 unchanged lines hidden (view full) ---

114 *link = NULL;
115 kref_put(&file_priv->ref, file_priv_release);
116}
117
118static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
119{
120 switch (args->index) {
121 case DRM_IVPU_CAP_METRIC_STREAMER:
110 mutex_destroy(&file_priv->lock);
111 kfree(file_priv);
112}
113
114void ivpu_file_priv_put(struct ivpu_file_priv **link)
115{
116 struct ivpu_file_priv *file_priv = *link;
117 struct ivpu_device *vdev = file_priv->vdev;

--- 6 unchanged lines hidden (view full) ---

124 *link = NULL;
125 kref_put(&file_priv->ref, file_priv_release);
126}
127
128static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
129{
130 switch (args->index) {
131 case DRM_IVPU_CAP_METRIC_STREAMER:
122 args->value = 0;
132 args->value = 1;
123 break;
124 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
125 args->value = 1;
126 break;
127 default:
128 return -EINVAL;
129 }
130

--- 92 unchanged lines hidden (view full) ---

223 return -ENODEV;
224
225 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
226 if (!file_priv) {
227 ret = -ENOMEM;
228 goto err_dev_exit;
229 }
230
133 break;
134 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
135 args->value = 1;
136 break;
137 default:
138 return -EINVAL;
139 }
140

--- 92 unchanged lines hidden (view full) ---

233 return -ENODEV;
234
235 file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
236 if (!file_priv) {
237 ret = -ENOMEM;
238 goto err_dev_exit;
239 }
240
241 INIT_LIST_HEAD(&file_priv->ms_instance_list);
242
231 file_priv->vdev = vdev;
232 file_priv->bound = true;
233 kref_init(&file_priv->ref);
234 mutex_init(&file_priv->lock);
243 file_priv->vdev = vdev;
244 file_priv->bound = true;
245 kref_init(&file_priv->ref);
246 mutex_init(&file_priv->lock);
247 mutex_init(&file_priv->ms_lock);
235
236 mutex_lock(&vdev->context_list_lock);
237
238 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
239 vdev->context_xa_limit, GFP_KERNEL);
240 if (ret) {
241 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
242 goto err_unlock;

--- 12 unchanged lines hidden (view full) ---

255 ctx_id, current->comm, task_pid_nr(current));
256
257 return 0;
258
259err_xa_erase:
260 xa_erase_irq(&vdev->context_xa, ctx_id);
261err_unlock:
262 mutex_unlock(&vdev->context_list_lock);
248
249 mutex_lock(&vdev->context_list_lock);
250
251 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
252 vdev->context_xa_limit, GFP_KERNEL);
253 if (ret) {
254 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
255 goto err_unlock;

--- 12 unchanged lines hidden (view full) ---

268 ctx_id, current->comm, task_pid_nr(current));
269
270 return 0;
271
272err_xa_erase:
273 xa_erase_irq(&vdev->context_xa, ctx_id);
274err_unlock:
275 mutex_unlock(&vdev->context_list_lock);
276 mutex_destroy(&file_priv->ms_lock);
263 mutex_destroy(&file_priv->lock);
264 kfree(file_priv);
265err_dev_exit:
266 drm_dev_exit(idx);
267 return ret;
268}
269
270static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
271{
272 struct ivpu_file_priv *file_priv = file->driver_priv;
273 struct ivpu_device *vdev = to_ivpu_device(dev);
274
275 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
276 file_priv->ctx.id, current->comm, task_pid_nr(current));
277
277 mutex_destroy(&file_priv->lock);
278 kfree(file_priv);
279err_dev_exit:
280 drm_dev_exit(idx);
281 return ret;
282}
283
284static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
285{
286 struct ivpu_file_priv *file_priv = file->driver_priv;
287 struct ivpu_device *vdev = to_ivpu_device(dev);
288
289 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
290 file_priv->ctx.id, current->comm, task_pid_nr(current));
291
292 ivpu_ms_cleanup(file_priv);
278 ivpu_file_priv_put(&file_priv);
279}
280
281static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
282 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
283 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
284 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
285 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
286 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
287 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
293 ivpu_file_priv_put(&file_priv);
294}
295
296static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
297 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
298 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
299 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
300 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
301 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
302 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
303 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
304 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
305 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
288};
289
290static int ivpu_wait_for_ready(struct ivpu_device *vdev)
291{
292 struct ivpu_ipc_consumer cons;
293 struct ivpu_ipc_hdr ipc_hdr;
294 unsigned long timeout;
295 int ret;

--- 22 unchanged lines hidden (view full) ---

318 }
319
320 if (!ret)
321 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
322
323 return ret;
324}
325
307};
308
309static int ivpu_wait_for_ready(struct ivpu_device *vdev)
310{
311 struct ivpu_ipc_consumer cons;
312 struct ivpu_ipc_hdr ipc_hdr;
313 unsigned long timeout;
314 int ret;

--- 22 unchanged lines hidden (view full) ---

337 }
338
339 if (!ret)
340 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
341
342 return ret;
343}
344
345static int ivpu_hw_sched_init(struct ivpu_device *vdev)
346{
347 int ret = 0;
348
349 if (vdev->hw->sched_mode == VPU_SCHEDULING_MODE_HW) {
350 ret = ivpu_jsm_hws_setup_priority_bands(vdev);
351 if (ret) {
352 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
353 return ret;
354 }
355 }
356
357 return ret;
358}
359
326/**
327 * ivpu_boot() - Start VPU firmware
328 * @vdev: VPU device
329 *
330 * This function is paired with ivpu_shutdown() but it doesn't power up the
331 * VPU because power up has to be called very early in ivpu_probe().
332 */
333int ivpu_boot(struct ivpu_device *vdev)

--- 17 unchanged lines hidden (view full) ---

351 ivpu_fw_log_dump(vdev);
352 return ret;
353 }
354
355 ivpu_hw_irq_clear(vdev);
356 enable_irq(vdev->irq);
357 ivpu_hw_irq_enable(vdev);
358 ivpu_ipc_enable(vdev);
360/**
361 * ivpu_boot() - Start VPU firmware
362 * @vdev: VPU device
363 *
364 * This function is paired with ivpu_shutdown() but it doesn't power up the
365 * VPU because power up has to be called very early in ivpu_probe().
366 */
367int ivpu_boot(struct ivpu_device *vdev)

--- 17 unchanged lines hidden (view full) ---

385 ivpu_fw_log_dump(vdev);
386 return ret;
387 }
388
389 ivpu_hw_irq_clear(vdev);
390 enable_irq(vdev->irq);
391 ivpu_hw_irq_enable(vdev);
392 ivpu_ipc_enable(vdev);
393
394 if (ivpu_fw_is_cold_boot(vdev))
395 return ivpu_hw_sched_init(vdev);
396
359 return 0;
360}
361
362void ivpu_prepare_for_reset(struct ivpu_device *vdev)
363{
364 ivpu_hw_irq_disable(vdev);
365 disable_irq(vdev->irq);
366 ivpu_ipc_disable(vdev);

--- 54 unchanged lines hidden (view full) ---

421 int ret;
422
423 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
424 if (ret < 0) {
425 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
426 return ret;
427 }
428
397 return 0;
398}
399
400void ivpu_prepare_for_reset(struct ivpu_device *vdev)
401{
402 ivpu_hw_irq_disable(vdev);
403 disable_irq(vdev->irq);
404 ivpu_ipc_disable(vdev);

--- 54 unchanged lines hidden (view full) ---

459 int ret;
460
461 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
462 if (ret < 0) {
463 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
464 return ret;
465 }
466
467 ivpu_irq_handlers_init(vdev);
468
429 vdev->irq = pci_irq_vector(pdev, 0);
430
469 vdev->irq = pci_irq_vector(pdev, 0);
470
431 ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
471 ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
432 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
433 if (ret)
434 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
435
436 return ret;
437}
438
439static int ivpu_pci_init(struct ivpu_device *vdev)

--- 60 unchanged lines hidden (view full) ---

500 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
501 if (!vdev->ipc)
502 return -ENOMEM;
503
504 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
505 if (!vdev->pm)
506 return -ENOMEM;
507
472 ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
473 if (ret)
474 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
475
476 return ret;
477}
478
479static int ivpu_pci_init(struct ivpu_device *vdev)

--- 60 unchanged lines hidden (view full) ---

540 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
541 if (!vdev->ipc)
542 return -ENOMEM;
543
544 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
545 if (!vdev->pm)
546 return -ENOMEM;
547
508 if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
509 vdev->hw->ops = &ivpu_hw_40xx_ops;
548 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
510 vdev->hw->dma_bits = 48;
549 vdev->hw->dma_bits = 48;
511 } else {
512 vdev->hw->ops = &ivpu_hw_37xx_ops;
550 else
513 vdev->hw->dma_bits = 38;
551 vdev->hw->dma_bits = 38;
514 }
515
516 vdev->platform = IVPU_PLATFORM_INVALID;
517 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
518 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
519 atomic64_set(&vdev->unique_id_counter, 0);
520 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
521 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
522 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);

--- 12 unchanged lines hidden (view full) ---

535 if (ret)
536 goto err_xa_destroy;
537
538 ret = ivpu_irq_init(vdev);
539 if (ret)
540 goto err_xa_destroy;
541
542 /* Init basic HW info based on buttress registers which are accessible before power up */
552
553 vdev->platform = IVPU_PLATFORM_INVALID;
554 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
555 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
556 atomic64_set(&vdev->unique_id_counter, 0);
557 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
558 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
559 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);

--- 12 unchanged lines hidden (view full) ---

572 if (ret)
573 goto err_xa_destroy;
574
575 ret = ivpu_irq_init(vdev);
576 if (ret)
577 goto err_xa_destroy;
578
579 /* Init basic HW info based on buttress registers which are accessible before power up */
543 ret = ivpu_hw_info_init(vdev);
580 ret = ivpu_hw_init(vdev);
544 if (ret)
545 goto err_xa_destroy;
546
547 /* Power up early so the rest of init code can access VPU registers */
548 ret = ivpu_hw_power_up(vdev);
549 if (ret)
550 goto err_shutdown;
551

--- 59 unchanged lines hidden (view full) ---

611}
612
613static void ivpu_dev_fini(struct ivpu_device *vdev)
614{
615 ivpu_pm_disable(vdev);
616 ivpu_prepare_for_reset(vdev);
617 ivpu_shutdown(vdev);
618
581 if (ret)
582 goto err_xa_destroy;
583
584 /* Power up early so the rest of init code can access VPU registers */
585 ret = ivpu_hw_power_up(vdev);
586 if (ret)
587 goto err_shutdown;
588

--- 59 unchanged lines hidden (view full) ---

648}
649
650static void ivpu_dev_fini(struct ivpu_device *vdev)
651{
652 ivpu_pm_disable(vdev);
653 ivpu_prepare_for_reset(vdev);
654 ivpu_shutdown(vdev);
655
656 ivpu_ms_cleanup_all(vdev);
619 ivpu_jobs_abort_all(vdev);
620 ivpu_job_done_consumer_fini(vdev);
621 ivpu_pm_cancel_recovery(vdev);
622 ivpu_bo_unbind_all_user_contexts(vdev);
623
624 ivpu_ipc_fini(vdev);
625 ivpu_fw_fini(vdev);
626 ivpu_mmu_reserved_context_fini(vdev);

--- 26 unchanged lines hidden (view full) ---

653
654 pci_set_drvdata(pdev, vdev);
655
656 ret = ivpu_dev_init(vdev);
657 if (ret)
658 return ret;
659
660 ivpu_debugfs_init(vdev);
657 ivpu_jobs_abort_all(vdev);
658 ivpu_job_done_consumer_fini(vdev);
659 ivpu_pm_cancel_recovery(vdev);
660 ivpu_bo_unbind_all_user_contexts(vdev);
661
662 ivpu_ipc_fini(vdev);
663 ivpu_fw_fini(vdev);
664 ivpu_mmu_reserved_context_fini(vdev);

--- 26 unchanged lines hidden (view full) ---

691
692 pci_set_drvdata(pdev, vdev);
693
694 ret = ivpu_dev_init(vdev);
695 if (ret)
696 return ret;
697
698 ivpu_debugfs_init(vdev);
699 ivpu_sysfs_init(vdev);
661
662 ret = drm_dev_register(&vdev->drm, 0);
663 if (ret) {
664 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
665 ivpu_dev_fini(vdev);
666 }
667
668 return ret;

--- 37 unchanged lines hidden ---
700
701 ret = drm_dev_register(&vdev->drm, 0);
702 if (ret) {
703 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
704 ivpu_dev_fini(vdev);
705 }
706
707 return ret;

--- 37 unchanged lines hidden ---