xref: /linux/drivers/accel/ivpu/ivpu_drv.c (revision 51c7960b87f465d01ea8d8ff174e81dd69f3b2b4)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2020-2025 Intel Corporation
4  */
5 
6 #include <linux/firmware.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/workqueue.h>
11 #include <generated/utsrelease.h>
12 
13 #include <drm/drm_accel.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_prime.h>
18 
19 #include "ivpu_coredump.h"
20 #include "ivpu_debugfs.h"
21 #include "ivpu_drv.h"
22 #include "ivpu_fw.h"
23 #include "ivpu_fw_log.h"
24 #include "ivpu_gem.h"
25 #include "ivpu_hw.h"
26 #include "ivpu_ipc.h"
27 #include "ivpu_job.h"
28 #include "ivpu_jsm_msg.h"
29 #include "ivpu_mmu.h"
30 #include "ivpu_mmu_context.h"
31 #include "ivpu_ms.h"
32 #include "ivpu_pm.h"
33 #include "ivpu_sysfs.h"
34 #include "vpu_boot_api.h"
35 
36 #ifndef DRIVER_VERSION_STR
37 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
38 #endif
39 
40 int ivpu_dbg_mask;
41 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
42 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
43 
44 int ivpu_test_mode;
45 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
46 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
47 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
48 #endif
49 
50 u8 ivpu_pll_min_ratio;
51 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
52 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
53 
54 u8 ivpu_pll_max_ratio = U8_MAX;
55 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
56 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
57 
58 int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
59 module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
60 MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler, 1 - Use HW scheduler");
61 
62 bool ivpu_disable_mmu_cont_pages;
63 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
64 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
65 
66 bool ivpu_force_snoop;
67 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
68 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
69 
ivpu_file_priv_get(struct ivpu_file_priv * file_priv)70 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
71 {
72 	struct ivpu_device *vdev = file_priv->vdev;
73 
74 	kref_get(&file_priv->ref);
75 
76 	ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
77 		 file_priv->ctx.id, kref_read(&file_priv->ref));
78 
79 	return file_priv;
80 }
81 
file_priv_unbind(struct ivpu_device * vdev,struct ivpu_file_priv * file_priv)82 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
83 {
84 	mutex_lock(&file_priv->lock);
85 	if (file_priv->bound) {
86 		ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
87 
88 		ivpu_cmdq_release_all_locked(file_priv);
89 		ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
90 		ivpu_mmu_context_fini(vdev, &file_priv->ctx);
91 		file_priv->bound = false;
92 		drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
93 	}
94 	mutex_unlock(&file_priv->lock);
95 }
96 
file_priv_release(struct kref * ref)97 static void file_priv_release(struct kref *ref)
98 {
99 	struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
100 	struct ivpu_device *vdev = file_priv->vdev;
101 
102 	ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
103 		 file_priv->ctx.id, (bool)file_priv->bound);
104 
105 	pm_runtime_get_sync(vdev->drm.dev);
106 	mutex_lock(&vdev->context_list_lock);
107 	file_priv_unbind(vdev, file_priv);
108 	drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
109 	xa_destroy(&file_priv->cmdq_xa);
110 	mutex_unlock(&vdev->context_list_lock);
111 	pm_runtime_put_autosuspend(vdev->drm.dev);
112 
113 	mutex_destroy(&file_priv->ms_lock);
114 	mutex_destroy(&file_priv->lock);
115 	kfree(file_priv);
116 }
117 
ivpu_file_priv_put(struct ivpu_file_priv ** link)118 void ivpu_file_priv_put(struct ivpu_file_priv **link)
119 {
120 	struct ivpu_file_priv *file_priv = *link;
121 	struct ivpu_device *vdev = file_priv->vdev;
122 
123 	ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
124 		 file_priv->ctx.id, kref_read(&file_priv->ref));
125 
126 	*link = NULL;
127 	kref_put(&file_priv->ref, file_priv_release);
128 }
129 
ivpu_is_capable(struct ivpu_device * vdev,u32 capability)130 bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
131 {
132 	switch (capability) {
133 	case DRM_IVPU_CAP_METRIC_STREAMER:
134 		return true;
135 	case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
136 		return true;
137 	case DRM_IVPU_CAP_MANAGE_CMDQ:
138 		return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
139 	default:
140 		return false;
141 	}
142 }
143 
ivpu_get_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)144 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
145 {
146 	struct ivpu_file_priv *file_priv = file->driver_priv;
147 	struct ivpu_device *vdev = file_priv->vdev;
148 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
149 	struct drm_ivpu_param *args = data;
150 	int ret = 0;
151 	int idx;
152 
153 	if (!drm_dev_enter(dev, &idx))
154 		return -ENODEV;
155 
156 	switch (args->param) {
157 	case DRM_IVPU_PARAM_DEVICE_ID:
158 		args->value = pdev->device;
159 		break;
160 	case DRM_IVPU_PARAM_DEVICE_REVISION:
161 		args->value = pdev->revision;
162 		break;
163 	case DRM_IVPU_PARAM_PLATFORM_TYPE:
164 		args->value = vdev->platform;
165 		break;
166 	case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
167 		args->value = ivpu_hw_dpu_max_freq_get(vdev);
168 		break;
169 	case DRM_IVPU_PARAM_NUM_CONTEXTS:
170 		args->value = ivpu_get_context_count(vdev);
171 		break;
172 	case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
173 		args->value = vdev->hw->ranges.user.start;
174 		break;
175 	case DRM_IVPU_PARAM_CONTEXT_ID:
176 		args->value = file_priv->ctx.id;
177 		break;
178 	case DRM_IVPU_PARAM_FW_API_VERSION:
179 		if (args->index < VPU_FW_API_VER_NUM) {
180 			struct vpu_firmware_header *fw_hdr;
181 
182 			fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
183 			args->value = fw_hdr->api_version[args->index];
184 		} else {
185 			ret = -EINVAL;
186 		}
187 		break;
188 	case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
189 		ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
190 		break;
191 	case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
192 		args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
193 		break;
194 	case DRM_IVPU_PARAM_TILE_CONFIG:
195 		args->value = vdev->hw->tile_fuse;
196 		break;
197 	case DRM_IVPU_PARAM_SKU:
198 		args->value = vdev->hw->sku;
199 		break;
200 	case DRM_IVPU_PARAM_CAPABILITIES:
201 		args->value = ivpu_is_capable(vdev, args->index);
202 		break;
203 	default:
204 		ret = -EINVAL;
205 		break;
206 	}
207 
208 	drm_dev_exit(idx);
209 	return ret;
210 }
211 
ivpu_set_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)212 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
213 {
214 	struct drm_ivpu_param *args = data;
215 	int ret = 0;
216 
217 	switch (args->param) {
218 	default:
219 		ret = -EINVAL;
220 	}
221 
222 	return ret;
223 }
224 
ivpu_open(struct drm_device * dev,struct drm_file * file)225 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
226 {
227 	struct ivpu_device *vdev = to_ivpu_device(dev);
228 	struct ivpu_file_priv *file_priv;
229 	u32 ctx_id;
230 	int idx, ret;
231 
232 	if (!drm_dev_enter(dev, &idx))
233 		return -ENODEV;
234 
235 	file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
236 	if (!file_priv) {
237 		ret = -ENOMEM;
238 		goto err_dev_exit;
239 	}
240 
241 	INIT_LIST_HEAD(&file_priv->ms_instance_list);
242 
243 	file_priv->vdev = vdev;
244 	file_priv->bound = true;
245 	kref_init(&file_priv->ref);
246 	mutex_init(&file_priv->lock);
247 	mutex_init(&file_priv->ms_lock);
248 
249 	mutex_lock(&vdev->context_list_lock);
250 
251 	ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
252 			   vdev->context_xa_limit, GFP_KERNEL);
253 	if (ret) {
254 		ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
255 		goto err_unlock;
256 	}
257 
258 	ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
259 
260 	file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
261 	file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
262 
263 	xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
264 	file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
265 	file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
266 
267 	mutex_unlock(&vdev->context_list_lock);
268 	drm_dev_exit(idx);
269 
270 	file->driver_priv = file_priv;
271 
272 	ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
273 		 ctx_id, current->comm, task_pid_nr(current));
274 
275 	return 0;
276 
277 err_unlock:
278 	mutex_unlock(&vdev->context_list_lock);
279 	mutex_destroy(&file_priv->ms_lock);
280 	mutex_destroy(&file_priv->lock);
281 	kfree(file_priv);
282 err_dev_exit:
283 	drm_dev_exit(idx);
284 	return ret;
285 }
286 
ivpu_postclose(struct drm_device * dev,struct drm_file * file)287 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
288 {
289 	struct ivpu_file_priv *file_priv = file->driver_priv;
290 	struct ivpu_device *vdev = to_ivpu_device(dev);
291 
292 	ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
293 		 file_priv->ctx.id, current->comm, task_pid_nr(current));
294 
295 	ivpu_ms_cleanup(file_priv);
296 	ivpu_file_priv_put(&file_priv);
297 }
298 
299 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
300 	DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
301 	DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
302 	DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
303 	DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
304 	DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
305 	DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
306 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
307 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
308 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
309 	DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
310 	DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
311 	DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
312 	DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
313 };
314 
ivpu_wait_for_ready(struct ivpu_device * vdev)315 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
316 {
317 	struct ivpu_ipc_consumer cons;
318 	struct ivpu_ipc_hdr ipc_hdr;
319 	unsigned long timeout;
320 	int ret;
321 
322 	if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
323 		return 0;
324 
325 	ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
326 
327 	timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
328 	while (1) {
329 		ivpu_ipc_irq_handler(vdev);
330 		ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
331 		if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
332 			break;
333 
334 		cond_resched();
335 	}
336 
337 	ivpu_ipc_consumer_del(vdev, &cons);
338 
339 	if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
340 		ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
341 			 ipc_hdr.data_addr);
342 		return -EIO;
343 	}
344 
345 	if (!ret)
346 		ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
347 
348 	return ret;
349 }
350 
ivpu_hw_sched_init(struct ivpu_device * vdev)351 static int ivpu_hw_sched_init(struct ivpu_device *vdev)
352 {
353 	int ret = 0;
354 
355 	if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
356 		ret = ivpu_jsm_hws_setup_priority_bands(vdev);
357 		if (ret) {
358 			ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
359 			return ret;
360 		}
361 	}
362 
363 	return ret;
364 }
365 
366 /**
367  * ivpu_boot() - Start VPU firmware
368  * @vdev: VPU device
369  *
370  * This function is paired with ivpu_shutdown() but it doesn't power up the
371  * VPU because power up has to be called very early in ivpu_probe().
372  */
ivpu_boot(struct ivpu_device * vdev)373 int ivpu_boot(struct ivpu_device *vdev)
374 {
375 	int ret;
376 
377 	/* Update boot params located at first 4KB of FW memory */
378 	ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem));
379 
380 	ret = ivpu_hw_boot_fw(vdev);
381 	if (ret) {
382 		ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
383 		return ret;
384 	}
385 
386 	ret = ivpu_wait_for_ready(vdev);
387 	if (ret) {
388 		ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
389 		goto err_diagnose_failure;
390 	}
391 
392 	ivpu_hw_irq_clear(vdev);
393 	enable_irq(vdev->irq);
394 	ivpu_hw_irq_enable(vdev);
395 	ivpu_ipc_enable(vdev);
396 
397 	if (ivpu_fw_is_cold_boot(vdev)) {
398 		ret = ivpu_pm_dct_init(vdev);
399 		if (ret)
400 			goto err_disable_ipc;
401 
402 		ret = ivpu_hw_sched_init(vdev);
403 		if (ret)
404 			goto err_disable_ipc;
405 	}
406 
407 	return 0;
408 
409 err_disable_ipc:
410 	ivpu_ipc_disable(vdev);
411 	ivpu_hw_irq_disable(vdev);
412 	disable_irq(vdev->irq);
413 err_diagnose_failure:
414 	ivpu_hw_diagnose_failure(vdev);
415 	ivpu_mmu_evtq_dump(vdev);
416 	ivpu_dev_coredump(vdev);
417 	return ret;
418 }
419 
ivpu_prepare_for_reset(struct ivpu_device * vdev)420 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
421 {
422 	ivpu_hw_irq_disable(vdev);
423 	disable_irq(vdev->irq);
424 	flush_work(&vdev->irq_ipc_work);
425 	flush_work(&vdev->irq_dct_work);
426 	flush_work(&vdev->context_abort_work);
427 	ivpu_ipc_disable(vdev);
428 	ivpu_mmu_disable(vdev);
429 }
430 
ivpu_shutdown(struct ivpu_device * vdev)431 int ivpu_shutdown(struct ivpu_device *vdev)
432 {
433 	int ret;
434 
435 	/* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
436 	pci_save_state(to_pci_dev(vdev->drm.dev));
437 
438 	ret = ivpu_hw_power_down(vdev);
439 	if (ret)
440 		ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
441 
442 	pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
443 
444 	return ret;
445 }
446 
447 static const struct file_operations ivpu_fops = {
448 	.owner		= THIS_MODULE,
449 	DRM_ACCEL_FOPS,
450 };
451 
452 static const struct drm_driver driver = {
453 	.driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
454 
455 	.open = ivpu_open,
456 	.postclose = ivpu_postclose,
457 
458 	.gem_create_object = ivpu_gem_create_object,
459 	.gem_prime_import = ivpu_gem_prime_import,
460 
461 	.ioctls = ivpu_drm_ioctls,
462 	.num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
463 	.fops = &ivpu_fops,
464 
465 	.name = DRIVER_NAME,
466 	.desc = DRIVER_DESC,
467 
468 	.major = 1,
469 };
470 
ivpu_irq_init(struct ivpu_device * vdev)471 static int ivpu_irq_init(struct ivpu_device *vdev)
472 {
473 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
474 	int ret;
475 
476 	ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
477 	if (ret < 0) {
478 		ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
479 		return ret;
480 	}
481 
482 	INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
483 	INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
484 	INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
485 
486 	ivpu_irq_handlers_init(vdev);
487 
488 	vdev->irq = pci_irq_vector(pdev, 0);
489 
490 	ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
491 			       IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
492 	if (ret)
493 		ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
494 
495 	return ret;
496 }
497 
ivpu_pci_init(struct ivpu_device * vdev)498 static int ivpu_pci_init(struct ivpu_device *vdev)
499 {
500 	struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
501 	struct resource *bar0 = &pdev->resource[0];
502 	struct resource *bar4 = &pdev->resource[4];
503 	int ret;
504 
505 	ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
506 	vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
507 	if (IS_ERR(vdev->regv)) {
508 		ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
509 		return PTR_ERR(vdev->regv);
510 	}
511 
512 	ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
513 	vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
514 	if (IS_ERR(vdev->regb)) {
515 		ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
516 		return PTR_ERR(vdev->regb);
517 	}
518 
519 	ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
520 	if (ret) {
521 		ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
522 		return ret;
523 	}
524 	dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
525 
526 	/* Clear any pending errors */
527 	pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
528 
529 	/* NPU does not require 10m D3hot delay */
530 	pdev->d3hot_delay = 0;
531 
532 	ret = pcim_enable_device(pdev);
533 	if (ret) {
534 		ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
535 		return ret;
536 	}
537 
538 	pci_set_master(pdev);
539 
540 	return 0;
541 }
542 
ivpu_dev_init(struct ivpu_device * vdev)543 static int ivpu_dev_init(struct ivpu_device *vdev)
544 {
545 	int ret;
546 
547 	vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
548 	if (!vdev->hw)
549 		return -ENOMEM;
550 
551 	vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
552 	if (!vdev->mmu)
553 		return -ENOMEM;
554 
555 	vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
556 	if (!vdev->fw)
557 		return -ENOMEM;
558 
559 	vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
560 	if (!vdev->ipc)
561 		return -ENOMEM;
562 
563 	vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
564 	if (!vdev->pm)
565 		return -ENOMEM;
566 
567 	if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
568 		vdev->hw->dma_bits = 48;
569 	else
570 		vdev->hw->dma_bits = 38;
571 
572 	vdev->platform = IVPU_PLATFORM_INVALID;
573 	vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
574 	vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
575 	atomic64_set(&vdev->unique_id_counter, 0);
576 	xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
577 	xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
578 	xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
579 	INIT_LIST_HEAD(&vdev->bo_list);
580 
581 	vdev->db_limit.min = IVPU_MIN_DB;
582 	vdev->db_limit.max = IVPU_MAX_DB;
583 
584 	ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
585 	if (ret)
586 		goto err_xa_destroy;
587 
588 	ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
589 	if (ret)
590 		goto err_xa_destroy;
591 
592 	ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
593 	if (ret)
594 		goto err_xa_destroy;
595 
596 	ret = ivpu_pci_init(vdev);
597 	if (ret)
598 		goto err_xa_destroy;
599 
600 	ret = ivpu_irq_init(vdev);
601 	if (ret)
602 		goto err_xa_destroy;
603 
604 	/* Init basic HW info based on buttress registers which are accessible before power up */
605 	ret = ivpu_hw_init(vdev);
606 	if (ret)
607 		goto err_xa_destroy;
608 
609 	/* Power up early so the rest of init code can access VPU registers */
610 	ret = ivpu_hw_power_up(vdev);
611 	if (ret)
612 		goto err_shutdown;
613 
614 	ivpu_mmu_global_context_init(vdev);
615 
616 	ret = ivpu_mmu_init(vdev);
617 	if (ret)
618 		goto err_mmu_gctx_fini;
619 
620 	ret = ivpu_mmu_reserved_context_init(vdev);
621 	if (ret)
622 		goto err_mmu_gctx_fini;
623 
624 	ret = ivpu_fw_init(vdev);
625 	if (ret)
626 		goto err_mmu_rctx_fini;
627 
628 	ret = ivpu_ipc_init(vdev);
629 	if (ret)
630 		goto err_fw_fini;
631 
632 	ivpu_pm_init(vdev);
633 
634 	ret = ivpu_boot(vdev);
635 	if (ret)
636 		goto err_ipc_fini;
637 
638 	ivpu_job_done_consumer_init(vdev);
639 	ivpu_pm_enable(vdev);
640 
641 	return 0;
642 
643 err_ipc_fini:
644 	ivpu_ipc_fini(vdev);
645 err_fw_fini:
646 	ivpu_fw_fini(vdev);
647 err_mmu_rctx_fini:
648 	ivpu_mmu_reserved_context_fini(vdev);
649 err_mmu_gctx_fini:
650 	ivpu_mmu_global_context_fini(vdev);
651 err_shutdown:
652 	ivpu_shutdown(vdev);
653 err_xa_destroy:
654 	xa_destroy(&vdev->db_xa);
655 	xa_destroy(&vdev->submitted_jobs_xa);
656 	xa_destroy(&vdev->context_xa);
657 	return ret;
658 }
659 
ivpu_bo_unbind_all_user_contexts(struct ivpu_device * vdev)660 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
661 {
662 	struct ivpu_file_priv *file_priv;
663 	unsigned long ctx_id;
664 
665 	mutex_lock(&vdev->context_list_lock);
666 
667 	xa_for_each(&vdev->context_xa, ctx_id, file_priv)
668 		file_priv_unbind(vdev, file_priv);
669 
670 	mutex_unlock(&vdev->context_list_lock);
671 }
672 
ivpu_dev_fini(struct ivpu_device * vdev)673 static void ivpu_dev_fini(struct ivpu_device *vdev)
674 {
675 	ivpu_jobs_abort_all(vdev);
676 	ivpu_pm_cancel_recovery(vdev);
677 	ivpu_pm_disable(vdev);
678 	ivpu_prepare_for_reset(vdev);
679 	ivpu_shutdown(vdev);
680 
681 	ivpu_ms_cleanup_all(vdev);
682 	ivpu_job_done_consumer_fini(vdev);
683 	ivpu_bo_unbind_all_user_contexts(vdev);
684 
685 	ivpu_ipc_fini(vdev);
686 	ivpu_fw_fini(vdev);
687 	ivpu_mmu_reserved_context_fini(vdev);
688 	ivpu_mmu_global_context_fini(vdev);
689 
690 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
691 	xa_destroy(&vdev->db_xa);
692 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
693 	xa_destroy(&vdev->submitted_jobs_xa);
694 	drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
695 	xa_destroy(&vdev->context_xa);
696 }
697 
698 static struct pci_device_id ivpu_pci_ids[] = {
699 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
700 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
701 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
702 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
703 	{ }
704 };
705 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
706 
ivpu_probe(struct pci_dev * pdev,const struct pci_device_id * id)707 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
708 {
709 	struct ivpu_device *vdev;
710 	int ret;
711 
712 	vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
713 	if (IS_ERR(vdev))
714 		return PTR_ERR(vdev);
715 
716 	pci_set_drvdata(pdev, vdev);
717 
718 	ret = ivpu_dev_init(vdev);
719 	if (ret)
720 		return ret;
721 
722 	ivpu_debugfs_init(vdev);
723 	ivpu_sysfs_init(vdev);
724 
725 	ret = drm_dev_register(&vdev->drm, 0);
726 	if (ret) {
727 		dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
728 		ivpu_dev_fini(vdev);
729 	}
730 
731 	return ret;
732 }
733 
ivpu_remove(struct pci_dev * pdev)734 static void ivpu_remove(struct pci_dev *pdev)
735 {
736 	struct ivpu_device *vdev = pci_get_drvdata(pdev);
737 
738 	drm_dev_unplug(&vdev->drm);
739 	ivpu_dev_fini(vdev);
740 }
741 
742 static const struct dev_pm_ops ivpu_drv_pci_pm = {
743 	SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
744 	SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
745 };
746 
747 static const struct pci_error_handlers ivpu_drv_pci_err = {
748 	.reset_prepare = ivpu_pm_reset_prepare_cb,
749 	.reset_done = ivpu_pm_reset_done_cb,
750 };
751 
752 static struct pci_driver ivpu_pci_driver = {
753 	.name = KBUILD_MODNAME,
754 	.id_table = ivpu_pci_ids,
755 	.probe = ivpu_probe,
756 	.remove = ivpu_remove,
757 	.driver = {
758 		.pm = &ivpu_drv_pci_pm,
759 	},
760 	.err_handler = &ivpu_drv_pci_err,
761 };
762 
763 module_pci_driver(ivpu_pci_driver);
764 
765 MODULE_AUTHOR("Intel Corporation");
766 MODULE_DESCRIPTION(DRIVER_DESC);
767 MODULE_LICENSE("GPL and additional rights");
768 MODULE_VERSION(DRIVER_VERSION_STR);
769