xref: /linux/drivers/accel/ivpu/ivpu_drv.c (revision 1947b92464c3268381604bbe2ac977a3fd78192f)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2023 Intel Corporation
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 
10 #include <drm/drm_accel.h>
11 #include <drm/drm_file.h>
12 #include <drm/drm_gem.h>
13 #include <drm/drm_ioctl.h>
14 #include <drm/drm_prime.h>
15 
16 #include "vpu_boot_api.h"
17 #include "ivpu_debugfs.h"
18 #include "ivpu_drv.h"
19 #include "ivpu_fw.h"
20 #include "ivpu_gem.h"
21 #include "ivpu_hw.h"
22 #include "ivpu_ipc.h"
23 #include "ivpu_job.h"
24 #include "ivpu_jsm_msg.h"
25 #include "ivpu_mmu.h"
26 #include "ivpu_mmu_context.h"
27 #include "ivpu_pm.h"
28 
29 #ifndef DRIVER_VERSION_STR
30 #define DRIVER_VERSION_STR __stringify(DRM_IVPU_DRIVER_MAJOR) "." \
31 			   __stringify(DRM_IVPU_DRIVER_MINOR) "."
32 #endif
33 
34 static struct lock_class_key submitted_jobs_xa_lock_class_key;
35 
36 int ivpu_dbg_mask;
37 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
38 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
39 
40 int ivpu_test_mode;
41 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
42 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
43 
44 u8 ivpu_pll_min_ratio;
45 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
46 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set VPU frequency");
47 
48 u8 ivpu_pll_max_ratio = U8_MAX;
49 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
50 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set VPU frequency");
51 
52 bool ivpu_disable_mmu_cont_pages;
53 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0644);
54 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
55 
56 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
57 {
58 	struct ivpu_device *vdev = file_priv->vdev;
59 
60 	kref_get(&file_priv->ref);
61 
62 	ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
63 		 file_priv->ctx.id, kref_read(&file_priv->ref));
64 
65 	return file_priv;
66 }
67 
68 struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
69 {
70 	struct ivpu_file_priv *file_priv;
71 
72 	xa_lock_irq(&vdev->context_xa);
73 	file_priv = xa_load(&vdev->context_xa, id);
74 	/* file_priv may still be in context_xa during file_priv_release() */
75 	if (file_priv && !kref_get_unless_zero(&file_priv->ref))
76 		file_priv = NULL;
77 	xa_unlock_irq(&vdev->context_xa);
78 
79 	if (file_priv)
80 		ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
81 			 file_priv->ctx.id, kref_read(&file_priv->ref));
82 
83 	return file_priv;
84 }
85 
86 static void file_priv_release(struct kref *ref)
87 {
88 	struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
89 	struct ivpu_device *vdev = file_priv->vdev;
90 
91 	ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
92 
93 	ivpu_cmdq_release_all(file_priv);
94 	ivpu_jsm_context_release(vdev, file_priv->ctx.id);
95 	ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
96 	ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
97 	drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
98 	mutex_destroy(&file_priv->lock);
99 	kfree(file_priv);
100 }
101 
102 void ivpu_file_priv_put(struct ivpu_file_priv **link)
103 {
104 	struct ivpu_file_priv *file_priv = *link;
105 	struct ivpu_device *vdev = file_priv->vdev;
106 
107 	drm_WARN_ON(&vdev->drm, !file_priv);
108 
109 	ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
110 		 file_priv->ctx.id, kref_read(&file_priv->ref));
111 
112 	*link = NULL;
113 	kref_put(&file_priv->ref, file_priv_release);
114 }
115 
116 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
117 {
118 	switch (args->index) {
119 	case DRM_IVPU_CAP_METRIC_STREAMER:
120 		args->value = 0;
121 		break;
122 	case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
123 		args->value = 1;
124 		break;
125 	default:
126 		return -EINVAL;
127 	}
128 
129 	return 0;
130 }
131 
132 static int ivpu_get_core_clock_rate(struct ivpu_device *vdev, u64 *clk_rate)
133 {
134 	int ret;
135 
136 	ret = ivpu_rpm_get_if_active(vdev);
137 	if (ret < 0)
138 		return ret;
139 
140 	*clk_rate = ret ? ivpu_hw_reg_pll_freq_get(vdev) : 0;
141 
142 	if (ret)
143 		ivpu_rpm_put(vdev);
144 
145 	return 0;
146 }
147 
148 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
149 {
150 	struct ivpu_file_priv *file_priv = file->driver_priv;
151 	struct ivpu_device *vdev = file_priv->vdev;
152 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
153 	struct drm_ivpu_param *args = data;
154 	int ret = 0;
155 	int idx;
156 
157 	if (!drm_dev_enter(dev, &idx))
158 		return -ENODEV;
159 
160 	switch (args->param) {
161 	case DRM_IVPU_PARAM_DEVICE_ID:
162 		args->value = pdev->device;
163 		break;
164 	case DRM_IVPU_PARAM_DEVICE_REVISION:
165 		args->value = pdev->revision;
166 		break;
167 	case DRM_IVPU_PARAM_PLATFORM_TYPE:
168 		args->value = vdev->platform;
169 		break;
170 	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
171 		ret = ivpu_get_core_clock_rate(vdev, &args->value);
172 		break;
173 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
174 		args->value = ivpu_get_context_count(vdev);
175 		break;
176 	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
177 		args->value = vdev->hw->ranges.user.start;
178 		break;
179 	case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
180 		args->value = file_priv->priority;
181 		break;
182 	case DRM_IVPU_PARAM_CONTEXT_ID:
183 		args->value = file_priv->ctx.id;
184 		break;
185 	case DRM_IVPU_PARAM_FW_API_VERSION:
186 		if (args->index < VPU_FW_API_VER_NUM) {
187 			struct vpu_firmware_header *fw_hdr;
188 
189 			fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
190 			args->value = fw_hdr->api_version[args->index];
191 		} else {
192 			ret = -EINVAL;
193 		}
194 		break;
195 	case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
196 		ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
197 		break;
198 	case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
199 		args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
200 		break;
201 	case DRM_IVPU_PARAM_TILE_CONFIG:
202 		args->value = vdev->hw->tile_fuse;
203 		break;
204 	case DRM_IVPU_PARAM_SKU:
205 		args->value = vdev->hw->sku;
206 		break;
207 	case DRM_IVPU_PARAM_CAPABILITIES:
208 		ret = ivpu_get_capabilities(vdev, args);
209 		break;
210 	default:
211 		ret = -EINVAL;
212 		break;
213 	}
214 
215 	drm_dev_exit(idx);
216 	return ret;
217 }
218 
219 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
220 {
221 	struct ivpu_file_priv *file_priv = file->driver_priv;
222 	struct drm_ivpu_param *args = data;
223 	int ret = 0;
224 
225 	switch (args->param) {
226 	case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
227 		if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
228 			file_priv->priority = args->value;
229 		else
230 			ret = -EINVAL;
231 		break;
232 	default:
233 		ret = -EINVAL;
234 	}
235 
236 	return ret;
237 }
238 
239 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
240 {
241 	struct ivpu_device *vdev = to_ivpu_device(dev);
242 	struct ivpu_file_priv *file_priv;
243 	u32 ctx_id;
244 	void *old;
245 	int ret;
246 
247 	ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
248 	if (ret) {
249 		ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
250 		return ret;
251 	}
252 
253 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
254 	if (!file_priv) {
255 		ret = -ENOMEM;
256 		goto err_xa_erase;
257 	}
258 
259 	file_priv->vdev = vdev;
260 	file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
261 	kref_init(&file_priv->ref);
262 	mutex_init(&file_priv->lock);
263 
264 	ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
265 	if (ret)
266 		goto err_mutex_destroy;
267 
268 	old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
269 	if (xa_is_err(old)) {
270 		ret = xa_err(old);
271 		ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
272 		goto err_ctx_fini;
273 	}
274 
275 	ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
276 		 ctx_id, current->comm, task_pid_nr(current));
277 
278 	file->driver_priv = file_priv;
279 	return 0;
280 
281 err_ctx_fini:
282 	ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
283 err_mutex_destroy:
284 	mutex_destroy(&file_priv->lock);
285 	kfree(file_priv);
286 err_xa_erase:
287 	xa_erase_irq(&vdev->context_xa, ctx_id);
288 	return ret;
289 }
290 
291 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
292 {
293 	struct ivpu_file_priv *file_priv = file->driver_priv;
294 	struct ivpu_device *vdev = to_ivpu_device(dev);
295 
296 	ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
297 		 file_priv->ctx.id, current->comm, task_pid_nr(current));
298 
299 	ivpu_file_priv_put(&file_priv);
300 }
301 
302 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
303 	DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
304 	DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
305 	DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
306 	DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
307 	DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
308 	DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
309 };
310 
311 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
312 {
313 	struct ivpu_ipc_consumer cons;
314 	struct ivpu_ipc_hdr ipc_hdr;
315 	unsigned long timeout;
316 	int ret;
317 
318 	if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
319 		return 0;
320 
321 	ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
322 
323 	timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
324 	while (1) {
325 		ivpu_ipc_irq_handler(vdev, NULL);
326 		ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
327 		if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
328 			break;
329 
330 		cond_resched();
331 	}
332 
333 	ivpu_ipc_consumer_del(vdev, &cons);
334 
335 	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
336 		ivpu_err(vdev, "Invalid VPU ready message: 0x%x\n",
337 			 ipc_hdr.data_addr);
338 		return -EIO;
339 	}
340 
341 	if (!ret)
342 		ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
343 	else
344 		ivpu_hw_diagnose_failure(vdev);
345 
346 	return ret;
347 }
348 
349 /**
350  * ivpu_boot() - Start VPU firmware
351  * @vdev: VPU device
352  *
353  * This function is paired with ivpu_shutdown() but it doesn't power up the
354  * VPU because power up has to be called very early in ivpu_probe().
355  */
356 int ivpu_boot(struct ivpu_device *vdev)
357 {
358 	int ret;
359 
360 	/* Update boot params located at first 4KB of FW memory */
361 	ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
362 
363 	ret = ivpu_hw_boot_fw(vdev);
364 	if (ret) {
365 		ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
366 		return ret;
367 	}
368 
369 	ret = ivpu_wait_for_ready(vdev);
370 	if (ret) {
371 		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
372 		return ret;
373 	}
374 
375 	ivpu_hw_irq_clear(vdev);
376 	enable_irq(vdev->irq);
377 	ivpu_hw_irq_enable(vdev);
378 	ivpu_ipc_enable(vdev);
379 	return 0;
380 }
381 
382 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
383 {
384 	ivpu_hw_irq_disable(vdev);
385 	disable_irq(vdev->irq);
386 	ivpu_ipc_disable(vdev);
387 	ivpu_mmu_disable(vdev);
388 }
389 
390 int ivpu_shutdown(struct ivpu_device *vdev)
391 {
392 	int ret;
393 
394 	ivpu_prepare_for_reset(vdev);
395 
396 	ret = ivpu_hw_power_down(vdev);
397 	if (ret)
398 		ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
399 
400 	return ret;
401 }
402 
403 static const struct file_operations ivpu_fops = {
404 	.owner		= THIS_MODULE,
405 	DRM_ACCEL_FOPS,
406 };
407 
408 static const struct drm_driver driver = {
409 	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
410 
411 	.open = ivpu_open,
412 	.postclose = ivpu_postclose,
413 
414 	.gem_create_object = ivpu_gem_create_object,
415 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
416 
417 	.ioctls = ivpu_drm_ioctls,
418 	.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
419 	.fops = &ivpu_fops,
420 
421 	.name = DRIVER_NAME,
422 	.desc = DRIVER_DESC,
423 	.date = DRIVER_DATE,
424 	.major = DRM_IVPU_DRIVER_MAJOR,
425 	.minor = DRM_IVPU_DRIVER_MINOR,
426 };
427 
428 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
429 {
430 	struct ivpu_device *vdev = arg;
431 
432 	return ivpu_ipc_irq_thread_handler(vdev);
433 }
434 
435 static int ivpu_irq_init(struct ivpu_device *vdev)
436 {
437 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
438 	int ret;
439 
440 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
441 	if (ret < 0) {
442 		ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
443 		return ret;
444 	}
445 
446 	vdev->irq = pci_irq_vector(pdev, 0);
447 
448 	ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, vdev->hw->ops->irq_handler,
449 					ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
450 	if (ret)
451 		ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
452 
453 	return ret;
454 }
455 
456 static int ivpu_pci_init(struct ivpu_device *vdev)
457 {
458 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
459 	struct resource *bar0 = &pdev->resource[0];
460 	struct resource *bar4 = &pdev->resource[4];
461 	int ret;
462 
463 	ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
464 	vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
465 	if (IS_ERR(vdev->regv)) {
466 		ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
467 		return PTR_ERR(vdev->regv);
468 	}
469 
470 	ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
471 	vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
472 	if (IS_ERR(vdev->regb)) {
473 		ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
474 		return PTR_ERR(vdev->regb);
475 	}
476 
477 	ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
478 	if (ret) {
479 		ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
480 		return ret;
481 	}
482 	dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
483 
484 	/* Clear any pending errors */
485 	pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
486 
487 	/* VPU 37XX does not require 10m D3hot delay */
488 	if (ivpu_hw_gen(vdev) == IVPU_HW_37XX)
489 		pdev->d3hot_delay = 0;
490 
491 	ret = pcim_enable_device(pdev);
492 	if (ret) {
493 		ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
494 		return ret;
495 	}
496 
497 	pci_set_master(pdev);
498 
499 	return 0;
500 }
501 
502 static int ivpu_dev_init(struct ivpu_device *vdev)
503 {
504 	int ret;
505 
506 	vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
507 	if (!vdev->hw)
508 		return -ENOMEM;
509 
510 	vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
511 	if (!vdev->mmu)
512 		return -ENOMEM;
513 
514 	vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
515 	if (!vdev->fw)
516 		return -ENOMEM;
517 
518 	vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
519 	if (!vdev->ipc)
520 		return -ENOMEM;
521 
522 	vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
523 	if (!vdev->pm)
524 		return -ENOMEM;
525 
526 	if (ivpu_hw_gen(vdev) >= IVPU_HW_40XX) {
527 		vdev->hw->ops = &ivpu_hw_40xx_ops;
528 		vdev->hw->dma_bits = 48;
529 	} else {
530 		vdev->hw->ops = &ivpu_hw_37xx_ops;
531 		vdev->hw->dma_bits = 38;
532 	}
533 
534 	vdev->platform = IVPU_PLATFORM_INVALID;
535 	vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
536 	vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
537 	atomic64_set(&vdev->unique_id_counter, 0);
538 	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC);
539 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
540 	lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
541 	INIT_LIST_HEAD(&vdev->bo_list);
542 
543 	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
544 	if (ret)
545 		goto err_xa_destroy;
546 
547 	ret = ivpu_pci_init(vdev);
548 	if (ret)
549 		goto err_xa_destroy;
550 
551 	ret = ivpu_irq_init(vdev);
552 	if (ret)
553 		goto err_xa_destroy;
554 
555 	/* Init basic HW info based on buttress registers which are accessible before power up */
556 	ret = ivpu_hw_info_init(vdev);
557 	if (ret)
558 		goto err_xa_destroy;
559 
560 	/* Power up early so the rest of init code can access VPU registers */
561 	ret = ivpu_hw_power_up(vdev);
562 	if (ret)
563 		goto err_power_down;
564 
565 	ret = ivpu_mmu_global_context_init(vdev);
566 	if (ret)
567 		goto err_power_down;
568 
569 	ret = ivpu_mmu_init(vdev);
570 	if (ret)
571 		goto err_mmu_gctx_fini;
572 
573 	ret = ivpu_mmu_reserved_context_init(vdev);
574 	if (ret)
575 		goto err_mmu_gctx_fini;
576 
577 	ret = ivpu_fw_init(vdev);
578 	if (ret)
579 		goto err_mmu_rctx_fini;
580 
581 	ret = ivpu_ipc_init(vdev);
582 	if (ret)
583 		goto err_fw_fini;
584 
585 	ivpu_pm_init(vdev);
586 
587 	ret = ivpu_boot(vdev);
588 	if (ret)
589 		goto err_ipc_fini;
590 
591 	ivpu_job_done_consumer_init(vdev);
592 	ivpu_pm_enable(vdev);
593 
594 	return 0;
595 
596 err_ipc_fini:
597 	ivpu_ipc_fini(vdev);
598 err_fw_fini:
599 	ivpu_fw_fini(vdev);
600 err_mmu_rctx_fini:
601 	ivpu_mmu_reserved_context_fini(vdev);
602 err_mmu_gctx_fini:
603 	ivpu_mmu_global_context_fini(vdev);
604 err_power_down:
605 	ivpu_hw_power_down(vdev);
606 	if (IVPU_WA(d3hot_after_power_off))
607 		pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
608 err_xa_destroy:
609 	xa_destroy(&vdev->submitted_jobs_xa);
610 	xa_destroy(&vdev->context_xa);
611 	return ret;
612 }
613 
614 static void ivpu_dev_fini(struct ivpu_device *vdev)
615 {
616 	ivpu_pm_disable(vdev);
617 	ivpu_shutdown(vdev);
618 	if (IVPU_WA(d3hot_after_power_off))
619 		pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
620 	ivpu_job_done_consumer_fini(vdev);
621 	ivpu_pm_cancel_recovery(vdev);
622 
623 	ivpu_ipc_fini(vdev);
624 	ivpu_fw_fini(vdev);
625 	ivpu_mmu_reserved_context_fini(vdev);
626 	ivpu_mmu_global_context_fini(vdev);
627 
628 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
629 	xa_destroy(&vdev->submitted_jobs_xa);
630 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
631 	xa_destroy(&vdev->context_xa);
632 }
633 
634 static struct pci_device_id ivpu_pci_ids[] = {
635 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
636 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
637 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
638 	{ }
639 };
640 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
641 
642 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
643 {
644 	struct ivpu_device *vdev;
645 	int ret;
646 
647 	vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
648 	if (IS_ERR(vdev))
649 		return PTR_ERR(vdev);
650 
651 	pci_set_drvdata(pdev, vdev);
652 
653 	ret = ivpu_dev_init(vdev);
654 	if (ret)
655 		return ret;
656 
657 	ivpu_debugfs_init(vdev);
658 
659 	ret = drm_dev_register(&vdev->drm, 0);
660 	if (ret) {
661 		dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
662 		ivpu_dev_fini(vdev);
663 	}
664 
665 	return ret;
666 }
667 
668 static void ivpu_remove(struct pci_dev *pdev)
669 {
670 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
671 
672 	drm_dev_unplug(&vdev->drm);
673 	ivpu_dev_fini(vdev);
674 }
675 
676 static const struct dev_pm_ops ivpu_drv_pci_pm = {
677 	SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
678 	SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
679 };
680 
681 static const struct pci_error_handlers ivpu_drv_pci_err = {
682 	.reset_prepare = ivpu_pm_reset_prepare_cb,
683 	.reset_done = ivpu_pm_reset_done_cb,
684 };
685 
686 static struct pci_driver ivpu_pci_driver = {
687 	.name = KBUILD_MODNAME,
688 	.id_table = ivpu_pci_ids,
689 	.probe = ivpu_probe,
690 	.remove = ivpu_remove,
691 	.driver = {
692 		.pm = &ivpu_drv_pci_pm,
693 	},
694 	.err_handler = &ivpu_drv_pci_err,
695 };
696 
697 module_pci_driver(ivpu_pci_driver);
698 
699 MODULE_AUTHOR("Intel Corporation");
700 MODULE_DESCRIPTION(DRIVER_DESC);
701 MODULE_LICENSE("GPL and additional rights");
702 MODULE_VERSION(DRIVER_VERSION_STR);
703