xref: /linux/drivers/accel/ivpu/ivpu_drv.c (revision 37aeccf5f839c155e8c9100937a01059b24e61b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2024 Intel Corporation
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/pm_runtime.h>
10 #include <generated/utsrelease.h>
11 
12 #include <drm/drm_accel.h>
13 #include <drm/drm_file.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_ioctl.h>
16 #include <drm/drm_prime.h>
17 
18 #include "ivpu_coredump.h"
19 #include "ivpu_debugfs.h"
20 #include "ivpu_drv.h"
21 #include "ivpu_fw.h"
22 #include "ivpu_fw_log.h"
23 #include "ivpu_gem.h"
24 #include "ivpu_hw.h"
25 #include "ivpu_ipc.h"
26 #include "ivpu_job.h"
27 #include "ivpu_jsm_msg.h"
28 #include "ivpu_mmu.h"
29 #include "ivpu_mmu_context.h"
30 #include "ivpu_ms.h"
31 #include "ivpu_pm.h"
32 #include "ivpu_sysfs.h"
33 #include "vpu_boot_api.h"
34 
35 #ifndef DRIVER_VERSION_STR
36 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
37 #endif
38 
39 static struct lock_class_key submitted_jobs_xa_lock_class_key;
40 
41 int ivpu_dbg_mask;
42 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
43 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
44 
45 int ivpu_test_mode;
46 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
47 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
48 
49 u8 ivpu_pll_min_ratio;
50 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
51 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
52 
53 u8 ivpu_pll_max_ratio = U8_MAX;
54 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
55 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
56 
57 int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
58 module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
59 MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
60 
61 bool ivpu_disable_mmu_cont_pages;
62 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
63 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
64 
65 bool ivpu_force_snoop;
66 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
67 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
68 
69 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
70 {
71 	struct ivpu_device *vdev = file_priv->vdev;
72 
73 	kref_get(&file_priv->ref);
74 
75 	ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
76 		 file_priv->ctx.id, kref_read(&file_priv->ref));
77 
78 	return file_priv;
79 }
80 
81 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
82 {
83 	mutex_lock(&file_priv->lock);
84 	if (file_priv->bound) {
85 		ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
86 
87 		ivpu_cmdq_release_all_locked(file_priv);
88 		ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
89 		ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
90 		file_priv->bound = false;
91 		drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
92 	}
93 	mutex_unlock(&file_priv->lock);
94 }
95 
96 static void file_priv_release(struct kref *ref)
97 {
98 	struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
99 	struct ivpu_device *vdev = file_priv->vdev;
100 
101 	ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
102 		 file_priv->ctx.id, (bool)file_priv->bound);
103 
104 	pm_runtime_get_sync(vdev->drm.dev);
105 	mutex_lock(&vdev->context_list_lock);
106 	file_priv_unbind(vdev, file_priv);
107 	mutex_unlock(&vdev->context_list_lock);
108 	pm_runtime_put_autosuspend(vdev->drm.dev);
109 
110 	mutex_destroy(&file_priv->ms_lock);
111 	mutex_destroy(&file_priv->lock);
112 	kfree(file_priv);
113 }
114 
115 void ivpu_file_priv_put(struct ivpu_file_priv **link)
116 {
117 	struct ivpu_file_priv *file_priv = *link;
118 	struct ivpu_device *vdev = file_priv->vdev;
119 
120 	ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
121 		 file_priv->ctx.id, kref_read(&file_priv->ref));
122 
123 	*link = NULL;
124 	kref_put(&file_priv->ref, file_priv_release);
125 }
126 
127 static int ivpu_get_capabilities(struct ivpu_device *vdev, struct drm_ivpu_param *args)
128 {
129 	switch (args->index) {
130 	case DRM_IVPU_CAP_METRIC_STREAMER:
131 		args->value = 1;
132 		break;
133 	case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
134 		args->value = 1;
135 		break;
136 	default:
137 		return -EINVAL;
138 	}
139 
140 	return 0;
141 }
142 
143 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
144 {
145 	struct ivpu_file_priv *file_priv = file->driver_priv;
146 	struct ivpu_device *vdev = file_priv->vdev;
147 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
148 	struct drm_ivpu_param *args = data;
149 	int ret = 0;
150 	int idx;
151 
152 	if (!drm_dev_enter(dev, &idx))
153 		return -ENODEV;
154 
155 	switch (args->param) {
156 	case DRM_IVPU_PARAM_DEVICE_ID:
157 		args->value = pdev->device;
158 		break;
159 	case DRM_IVPU_PARAM_DEVICE_REVISION:
160 		args->value = pdev->revision;
161 		break;
162 	case DRM_IVPU_PARAM_PLATFORM_TYPE:
163 		args->value = vdev->platform;
164 		break;
165 	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
166 		args->value = ivpu_hw_ratio_to_freq(vdev, vdev->hw->pll.max_ratio);
167 		break;
168 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
169 		args->value = ivpu_get_context_count(vdev);
170 		break;
171 	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
172 		args->value = vdev->hw->ranges.user.start;
173 		break;
174 	case DRM_IVPU_PARAM_CONTEXT_ID:
175 		args->value = file_priv->ctx.id;
176 		break;
177 	case DRM_IVPU_PARAM_FW_API_VERSION:
178 		if (args->index < VPU_FW_API_VER_NUM) {
179 			struct vpu_firmware_header *fw_hdr;
180 
181 			fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
182 			args->value = fw_hdr->api_version[args->index];
183 		} else {
184 			ret = -EINVAL;
185 		}
186 		break;
187 	case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
188 		ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
189 		break;
190 	case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
191 		args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
192 		break;
193 	case DRM_IVPU_PARAM_TILE_CONFIG:
194 		args->value = vdev->hw->tile_fuse;
195 		break;
196 	case DRM_IVPU_PARAM_SKU:
197 		args->value = vdev->hw->sku;
198 		break;
199 	case DRM_IVPU_PARAM_CAPABILITIES:
200 		ret = ivpu_get_capabilities(vdev, args);
201 		break;
202 	default:
203 		ret = -EINVAL;
204 		break;
205 	}
206 
207 	drm_dev_exit(idx);
208 	return ret;
209 }
210 
211 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
212 {
213 	struct drm_ivpu_param *args = data;
214 	int ret = 0;
215 
216 	switch (args->param) {
217 	default:
218 		ret = -EINVAL;
219 	}
220 
221 	return ret;
222 }
223 
224 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
225 {
226 	struct ivpu_device *vdev = to_ivpu_device(dev);
227 	struct ivpu_file_priv *file_priv;
228 	u32 ctx_id;
229 	int idx, ret;
230 
231 	if (!drm_dev_enter(dev, &idx))
232 		return -ENODEV;
233 
234 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
235 	if (!file_priv) {
236 		ret = -ENOMEM;
237 		goto err_dev_exit;
238 	}
239 
240 	INIT_LIST_HEAD(&file_priv->ms_instance_list);
241 
242 	file_priv->vdev = vdev;
243 	file_priv->bound = true;
244 	kref_init(&file_priv->ref);
245 	mutex_init(&file_priv->lock);
246 	mutex_init(&file_priv->ms_lock);
247 
248 	mutex_lock(&vdev->context_list_lock);
249 
250 	ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
251 			   vdev->context_xa_limit, GFP_KERNEL);
252 	if (ret) {
253 		ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
254 		goto err_unlock;
255 	}
256 
257 	ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
258 	if (ret)
259 		goto err_xa_erase;
260 
261 	file_priv->default_job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK,
262 						      (file_priv->ctx.id - 1));
263 	file_priv->default_job_limit.max = file_priv->default_job_limit.min | IVPU_JOB_ID_JOB_MASK;
264 	file_priv->job_limit = file_priv->default_job_limit;
265 
266 	mutex_unlock(&vdev->context_list_lock);
267 	drm_dev_exit(idx);
268 
269 	file->driver_priv = file_priv;
270 
271 	ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
272 		 ctx_id, current->comm, task_pid_nr(current));
273 
274 	return 0;
275 
276 err_xa_erase:
277 	xa_erase_irq(&vdev->context_xa, ctx_id);
278 err_unlock:
279 	mutex_unlock(&vdev->context_list_lock);
280 	mutex_destroy(&file_priv->ms_lock);
281 	mutex_destroy(&file_priv->lock);
282 	kfree(file_priv);
283 err_dev_exit:
284 	drm_dev_exit(idx);
285 	return ret;
286 }
287 
288 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
289 {
290 	struct ivpu_file_priv *file_priv = file->driver_priv;
291 	struct ivpu_device *vdev = to_ivpu_device(dev);
292 
293 	ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
294 		 file_priv->ctx.id, current->comm, task_pid_nr(current));
295 
296 	ivpu_ms_cleanup(file_priv);
297 	ivpu_file_priv_put(&file_priv);
298 }
299 
300 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
301 	DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
302 	DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
303 	DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
304 	DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
305 	DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
306 	DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
307 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
308 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
309 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
310 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
311 };
312 
313 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
314 {
315 	struct ivpu_ipc_consumer cons;
316 	struct ivpu_ipc_hdr ipc_hdr;
317 	unsigned long timeout;
318 	int ret;
319 
320 	if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
321 		return 0;
322 
323 	ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
324 
325 	timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
326 	while (1) {
327 		ivpu_ipc_irq_handler(vdev);
328 		ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
329 		if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
330 			break;
331 
332 		cond_resched();
333 	}
334 
335 	ivpu_ipc_consumer_del(vdev, &cons);
336 
337 	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
338 		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
339 			 ipc_hdr.data_addr);
340 		return -EIO;
341 	}
342 
343 	if (!ret)
344 		ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
345 
346 	return ret;
347 }
348 
349 static int ivpu_hw_sched_init(struct ivpu_device *vdev)
350 {
351 	int ret = 0;
352 
353 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
354 		ret = ivpu_jsm_hws_setup_priority_bands(vdev);
355 		if (ret) {
356 			ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
357 			return ret;
358 		}
359 	}
360 
361 	return ret;
362 }
363 
364 /**
365  * ivpu_boot() - Start VPU firmware
366  * @vdev: VPU device
367  *
368  * This function is paired with ivpu_shutdown() but it doesn't power up the
369  * VPU because power up has to be called very early in ivpu_probe().
370  */
371 int ivpu_boot(struct ivpu_device *vdev)
372 {
373 	int ret;
374 
375 	/* Update boot params located at first 4KB of FW memory */
376 	ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
377 
378 	ret = ivpu_hw_boot_fw(vdev);
379 	if (ret) {
380 		ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
381 		return ret;
382 	}
383 
384 	ret = ivpu_wait_for_ready(vdev);
385 	if (ret) {
386 		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
387 		goto err_diagnose_failure;
388 	}
389 
390 	ivpu_hw_irq_clear(vdev);
391 	enable_irq(vdev->irq);
392 	ivpu_hw_irq_enable(vdev);
393 	ivpu_ipc_enable(vdev);
394 
395 	if (ivpu_fw_is_cold_boot(vdev)) {
396 		ret = ivpu_pm_dct_init(vdev);
397 		if (ret)
398 			goto err_diagnose_failure;
399 
400 		ret = ivpu_hw_sched_init(vdev);
401 		if (ret)
402 			goto err_diagnose_failure;
403 	}
404 
405 	return 0;
406 
407 err_diagnose_failure:
408 	ivpu_hw_diagnose_failure(vdev);
409 	ivpu_mmu_evtq_dump(vdev);
410 	ivpu_dev_coredump(vdev);
411 	return ret;
412 }
413 
414 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
415 {
416 	ivpu_hw_irq_disable(vdev);
417 	disable_irq(vdev->irq);
418 	ivpu_ipc_disable(vdev);
419 	ivpu_mmu_disable(vdev);
420 }
421 
422 int ivpu_shutdown(struct ivpu_device *vdev)
423 {
424 	int ret;
425 
426 	/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
427 	pci_save_state(to_pci_dev(vdev->drm.dev));
428 
429 	ret = ivpu_hw_power_down(vdev);
430 	if (ret)
431 		ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
432 
433 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
434 
435 	return ret;
436 }
437 
438 static const struct file_operations ivpu_fops = {
439 	.owner		= THIS_MODULE,
440 	DRM_ACCEL_FOPS,
441 };
442 
443 static const struct drm_driver driver = {
444 	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
445 
446 	.open = ivpu_open,
447 	.postclose = ivpu_postclose,
448 
449 	.gem_create_object = ivpu_gem_create_object,
450 	.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table,
451 
452 	.ioctls = ivpu_drm_ioctls,
453 	.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
454 	.fops = &ivpu_fops,
455 
456 	.name = DRIVER_NAME,
457 	.desc = DRIVER_DESC,
458 
459 #ifdef DRIVER_DATE
460 	.date = DRIVER_DATE,
461 	.major = DRIVER_MAJOR,
462 	.minor = DRIVER_MINOR,
463 	.patchlevel = DRIVER_PATCHLEVEL,
464 #else
465 	.date = UTS_RELEASE,
466 	.major = 1,
467 #endif
468 };
469 
470 static void ivpu_context_abort_invalid(struct ivpu_device *vdev)
471 {
472 	struct ivpu_file_priv *file_priv;
473 	unsigned long ctx_id;
474 
475 	mutex_lock(&vdev->context_list_lock);
476 
477 	xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
478 		if (!file_priv->has_mmu_faults || file_priv->aborted)
479 			continue;
480 
481 		mutex_lock(&file_priv->lock);
482 		ivpu_context_abort_locked(file_priv);
483 		file_priv->aborted = true;
484 		mutex_unlock(&file_priv->lock);
485 	}
486 
487 	mutex_unlock(&vdev->context_list_lock);
488 }
489 
490 static irqreturn_t ivpu_irq_thread_handler(int irq, void *arg)
491 {
492 	struct ivpu_device *vdev = arg;
493 	u8 irq_src;
494 
495 	if (kfifo_is_empty(&vdev->hw->irq.fifo))
496 		return IRQ_NONE;
497 
498 	while (kfifo_get(&vdev->hw->irq.fifo, &irq_src)) {
499 		switch (irq_src) {
500 		case IVPU_HW_IRQ_SRC_IPC:
501 			ivpu_ipc_irq_thread_handler(vdev);
502 			break;
503 		case IVPU_HW_IRQ_SRC_MMU_EVTQ:
504 			ivpu_context_abort_invalid(vdev);
505 			break;
506 		case IVPU_HW_IRQ_SRC_DCT:
507 			ivpu_pm_dct_irq_thread_handler(vdev);
508 			break;
509 		default:
510 			ivpu_err_ratelimited(vdev, "Unknown IRQ source: %u\n", irq_src);
511 			break;
512 		}
513 	}
514 
515 	return IRQ_HANDLED;
516 }
517 
518 static int ivpu_irq_init(struct ivpu_device *vdev)
519 {
520 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
521 	int ret;
522 
523 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
524 	if (ret < 0) {
525 		ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
526 		return ret;
527 	}
528 
529 	ivpu_irq_handlers_init(vdev);
530 
531 	vdev->irq = pci_irq_vector(pdev, 0);
532 
533 	ret = devm_request_threaded_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
534 					ivpu_irq_thread_handler, IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
535 	if (ret)
536 		ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
537 
538 	return ret;
539 }
540 
541 static int ivpu_pci_init(struct ivpu_device *vdev)
542 {
543 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
544 	struct resource *bar0 = &pdev->resource[0];
545 	struct resource *bar4 = &pdev->resource[4];
546 	int ret;
547 
548 	ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
549 	vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
550 	if (IS_ERR(vdev->regv)) {
551 		ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
552 		return PTR_ERR(vdev->regv);
553 	}
554 
555 	ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
556 	vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
557 	if (IS_ERR(vdev->regb)) {
558 		ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
559 		return PTR_ERR(vdev->regb);
560 	}
561 
562 	ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
563 	if (ret) {
564 		ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
565 		return ret;
566 	}
567 	dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
568 
569 	/* Clear any pending errors */
570 	pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
571 
572 	/* NPU does not require 10m D3hot delay */
573 	pdev->d3hot_delay = 0;
574 
575 	ret = pcim_enable_device(pdev);
576 	if (ret) {
577 		ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
578 		return ret;
579 	}
580 
581 	pci_set_master(pdev);
582 
583 	return 0;
584 }
585 
586 static int ivpu_dev_init(struct ivpu_device *vdev)
587 {
588 	int ret;
589 
590 	vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
591 	if (!vdev->hw)
592 		return -ENOMEM;
593 
594 	vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
595 	if (!vdev->mmu)
596 		return -ENOMEM;
597 
598 	vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
599 	if (!vdev->fw)
600 		return -ENOMEM;
601 
602 	vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
603 	if (!vdev->ipc)
604 		return -ENOMEM;
605 
606 	vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
607 	if (!vdev->pm)
608 		return -ENOMEM;
609 
610 	if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
611 		vdev->hw->dma_bits = 48;
612 	else
613 		vdev->hw->dma_bits = 38;
614 
615 	vdev->platform = IVPU_PLATFORM_INVALID;
616 	vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
617 	vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
618 	atomic64_set(&vdev->unique_id_counter, 0);
619 	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
620 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
621 	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
622 	lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
623 	INIT_LIST_HEAD(&vdev->bo_list);
624 
625 	vdev->default_db_limit.min = IVPU_MIN_DB;
626 	vdev->default_db_limit.max = IVPU_MAX_DB;
627 	vdev->db_limit = vdev->default_db_limit;
628 
629 	ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
630 	if (ret)
631 		goto err_xa_destroy;
632 
633 	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
634 	if (ret)
635 		goto err_xa_destroy;
636 
637 	ret = ivpu_pci_init(vdev);
638 	if (ret)
639 		goto err_xa_destroy;
640 
641 	ret = ivpu_irq_init(vdev);
642 	if (ret)
643 		goto err_xa_destroy;
644 
645 	/* Init basic HW info based on buttress registers which are accessible before power up */
646 	ret = ivpu_hw_init(vdev);
647 	if (ret)
648 		goto err_xa_destroy;
649 
650 	/* Power up early so the rest of init code can access VPU registers */
651 	ret = ivpu_hw_power_up(vdev);
652 	if (ret)
653 		goto err_shutdown;
654 
655 	ret = ivpu_mmu_global_context_init(vdev);
656 	if (ret)
657 		goto err_shutdown;
658 
659 	ret = ivpu_mmu_init(vdev);
660 	if (ret)
661 		goto err_mmu_gctx_fini;
662 
663 	ret = ivpu_mmu_reserved_context_init(vdev);
664 	if (ret)
665 		goto err_mmu_gctx_fini;
666 
667 	ret = ivpu_fw_init(vdev);
668 	if (ret)
669 		goto err_mmu_rctx_fini;
670 
671 	ret = ivpu_ipc_init(vdev);
672 	if (ret)
673 		goto err_fw_fini;
674 
675 	ivpu_pm_init(vdev);
676 
677 	ret = ivpu_boot(vdev);
678 	if (ret)
679 		goto err_ipc_fini;
680 
681 	ivpu_job_done_consumer_init(vdev);
682 	ivpu_pm_enable(vdev);
683 
684 	return 0;
685 
686 err_ipc_fini:
687 	ivpu_ipc_fini(vdev);
688 err_fw_fini:
689 	ivpu_fw_fini(vdev);
690 err_mmu_rctx_fini:
691 	ivpu_mmu_reserved_context_fini(vdev);
692 err_mmu_gctx_fini:
693 	ivpu_mmu_global_context_fini(vdev);
694 err_shutdown:
695 	ivpu_shutdown(vdev);
696 err_xa_destroy:
697 	xa_destroy(&vdev->db_xa);
698 	xa_destroy(&vdev->submitted_jobs_xa);
699 	xa_destroy(&vdev->context_xa);
700 	return ret;
701 }
702 
703 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
704 {
705 	struct ivpu_file_priv *file_priv;
706 	unsigned long ctx_id;
707 
708 	mutex_lock(&vdev->context_list_lock);
709 
710 	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
711 		file_priv_unbind(vdev, file_priv);
712 
713 	mutex_unlock(&vdev->context_list_lock);
714 }
715 
716 static void ivpu_dev_fini(struct ivpu_device *vdev)
717 {
718 	ivpu_jobs_abort_all(vdev);
719 	ivpu_pm_cancel_recovery(vdev);
720 	ivpu_pm_disable(vdev);
721 	ivpu_prepare_for_reset(vdev);
722 	ivpu_shutdown(vdev);
723 
724 	ivpu_ms_cleanup_all(vdev);
725 	ivpu_job_done_consumer_fini(vdev);
726 	ivpu_bo_unbind_all_user_contexts(vdev);
727 
728 	ivpu_ipc_fini(vdev);
729 	ivpu_fw_fini(vdev);
730 	ivpu_mmu_reserved_context_fini(vdev);
731 	ivpu_mmu_global_context_fini(vdev);
732 
733 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
734 	xa_destroy(&vdev->db_xa);
735 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
736 	xa_destroy(&vdev->submitted_jobs_xa);
737 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
738 	xa_destroy(&vdev->context_xa);
739 }
740 
741 static struct pci_device_id ivpu_pci_ids[] = {
742 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
743 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
744 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
745 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
746 	{ }
747 };
748 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
749 
750 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
751 {
752 	struct ivpu_device *vdev;
753 	int ret;
754 
755 	vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
756 	if (IS_ERR(vdev))
757 		return PTR_ERR(vdev);
758 
759 	pci_set_drvdata(pdev, vdev);
760 
761 	ret = ivpu_dev_init(vdev);
762 	if (ret)
763 		return ret;
764 
765 	ivpu_debugfs_init(vdev);
766 	ivpu_sysfs_init(vdev);
767 
768 	ret = drm_dev_register(&vdev->drm, 0);
769 	if (ret) {
770 		dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
771 		ivpu_dev_fini(vdev);
772 	}
773 
774 	return ret;
775 }
776 
777 static void ivpu_remove(struct pci_dev *pdev)
778 {
779 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
780 
781 	drm_dev_unplug(&vdev->drm);
782 	ivpu_dev_fini(vdev);
783 }
784 
785 static const struct dev_pm_ops ivpu_drv_pci_pm = {
786 	SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
787 	SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
788 };
789 
790 static const struct pci_error_handlers ivpu_drv_pci_err = {
791 	.reset_prepare = ivpu_pm_reset_prepare_cb,
792 	.reset_done = ivpu_pm_reset_done_cb,
793 };
794 
795 static struct pci_driver ivpu_pci_driver = {
796 	.name = KBUILD_MODNAME,
797 	.id_table = ivpu_pci_ids,
798 	.probe = ivpu_probe,
799 	.remove = ivpu_remove,
800 	.driver = {
801 		.pm = &ivpu_drv_pci_pm,
802 	},
803 	.err_handler = &ivpu_drv_pci_err,
804 };
805 
806 module_pci_driver(ivpu_pci_driver);
807 
808 MODULE_AUTHOR("Intel Corporation");
809 MODULE_DESCRIPTION(DRIVER_DESC);
810 MODULE_LICENSE("GPL and additional rights");
811 MODULE_VERSION(DRIVER_VERSION_STR);
812