1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2025 Intel Corporation
4 */
5
6 #include <linux/firmware.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/workqueue.h>
11 #include <generated/utsrelease.h>
12
13 #include <drm/drm_accel.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_prime.h>
18
19 #include "ivpu_coredump.h"
20 #include "ivpu_debugfs.h"
21 #include "ivpu_drv.h"
22 #include "ivpu_fw.h"
23 #include "ivpu_fw_log.h"
24 #include "ivpu_gem.h"
25 #include "ivpu_hw.h"
26 #include "ivpu_ipc.h"
27 #include "ivpu_job.h"
28 #include "ivpu_jsm_msg.h"
29 #include "ivpu_mmu.h"
30 #include "ivpu_mmu_context.h"
31 #include "ivpu_ms.h"
32 #include "ivpu_pm.h"
33 #include "ivpu_sysfs.h"
34 #include "vpu_boot_api.h"
35
36 #ifndef DRIVER_VERSION_STR
37 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
38 #endif
39
40 int ivpu_dbg_mask;
41 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
42 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
43
44 int ivpu_test_mode;
45 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
46 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
47 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
48 #endif
49
50 u8 ivpu_pll_min_ratio;
51 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
52 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
53
54 u8 ivpu_pll_max_ratio = U8_MAX;
55 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
56 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
57
58 int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
59 module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
60 MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
61
62 bool ivpu_disable_mmu_cont_pages;
63 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
64 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
65
66 bool ivpu_force_snoop;
67 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
68 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
69
ivpu_file_priv_get(struct ivpu_file_priv * file_priv)70 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
71 {
72 struct ivpu_device *vdev = file_priv->vdev;
73
74 kref_get(&file_priv->ref);
75
76 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
77 file_priv->ctx.id, kref_read(&file_priv->ref));
78
79 return file_priv;
80 }
81
file_priv_unbind(struct ivpu_device * vdev,struct ivpu_file_priv * file_priv)82 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
83 {
84 mutex_lock(&file_priv->lock);
85 if (file_priv->bound) {
86 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
87
88 ivpu_cmdq_release_all_locked(file_priv);
89 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
90 ivpu_mmu_context_fini(vdev, &file_priv->ctx);
91 file_priv->bound = false;
92 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
93 }
94 mutex_unlock(&file_priv->lock);
95 }
96
file_priv_release(struct kref * ref)97 static void file_priv_release(struct kref *ref)
98 {
99 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
100 struct ivpu_device *vdev = file_priv->vdev;
101
102 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
103 file_priv->ctx.id, (bool)file_priv->bound);
104
105 pm_runtime_get_sync(vdev->drm.dev);
106 mutex_lock(&vdev->context_list_lock);
107 file_priv_unbind(vdev, file_priv);
108 drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
109 xa_destroy(&file_priv->cmdq_xa);
110 mutex_unlock(&vdev->context_list_lock);
111 pm_runtime_put_autosuspend(vdev->drm.dev);
112
113 mutex_destroy(&file_priv->ms_lock);
114 mutex_destroy(&file_priv->lock);
115 kfree(file_priv);
116 }
117
ivpu_file_priv_put(struct ivpu_file_priv ** link)118 void ivpu_file_priv_put(struct ivpu_file_priv **link)
119 {
120 struct ivpu_file_priv *file_priv = *link;
121 struct ivpu_device *vdev = file_priv->vdev;
122
123 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
124 file_priv->ctx.id, kref_read(&file_priv->ref));
125
126 *link = NULL;
127 kref_put(&file_priv->ref, file_priv_release);
128 }
129
ivpu_is_capable(struct ivpu_device * vdev,u32 capability)130 bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
131 {
132 switch (capability) {
133 case DRM_IVPU_CAP_METRIC_STREAMER:
134 return true;
135 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
136 return true;
137 case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
138 return true;
139 case DRM_IVPU_CAP_MANAGE_CMDQ:
140 return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
141 default:
142 return false;
143 }
144 }
145
ivpu_get_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)146 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
147 {
148 struct ivpu_file_priv *file_priv = file->driver_priv;
149 struct ivpu_device *vdev = file_priv->vdev;
150 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
151 struct drm_ivpu_param *args = data;
152 int ret = 0;
153 int idx;
154
155 if (!drm_dev_enter(dev, &idx))
156 return -ENODEV;
157
158 switch (args->param) {
159 case DRM_IVPU_PARAM_DEVICE_ID:
160 args->value = pdev->device;
161 break;
162 case DRM_IVPU_PARAM_DEVICE_REVISION:
163 args->value = pdev->revision;
164 break;
165 case DRM_IVPU_PARAM_PLATFORM_TYPE:
166 args->value = vdev->platform;
167 break;
168 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
169 args->value = ivpu_hw_dpu_max_freq_get(vdev);
170 break;
171 case DRM_IVPU_PARAM_NUM_CONTEXTS:
172 args->value = ivpu_get_context_count(vdev);
173 break;
174 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
175 args->value = vdev->hw->ranges.user.start;
176 break;
177 case DRM_IVPU_PARAM_CONTEXT_ID:
178 args->value = file_priv->ctx.id;
179 break;
180 case DRM_IVPU_PARAM_FW_API_VERSION:
181 if (args->index < VPU_FW_API_VER_NUM) {
182 struct vpu_firmware_header *fw_hdr;
183
184 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
185 args->value = fw_hdr->api_version[args->index];
186 } else {
187 ret = -EINVAL;
188 }
189 break;
190 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
191 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
192 break;
193 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
194 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
195 break;
196 case DRM_IVPU_PARAM_TILE_CONFIG:
197 args->value = vdev->hw->tile_fuse;
198 break;
199 case DRM_IVPU_PARAM_SKU:
200 args->value = vdev->hw->sku;
201 break;
202 case DRM_IVPU_PARAM_CAPABILITIES:
203 args->value = ivpu_is_capable(vdev, args->index);
204 break;
205 case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
206 args->value = ivpu_fw_preempt_buf_size(vdev);
207 break;
208 default:
209 ret = -EINVAL;
210 break;
211 }
212
213 drm_dev_exit(idx);
214 return ret;
215 }
216
ivpu_set_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)217 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
218 {
219 struct drm_ivpu_param *args = data;
220 int ret = 0;
221
222 switch (args->param) {
223 default:
224 ret = -EINVAL;
225 }
226
227 return ret;
228 }
229
ivpu_open(struct drm_device * dev,struct drm_file * file)230 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
231 {
232 struct ivpu_device *vdev = to_ivpu_device(dev);
233 struct ivpu_file_priv *file_priv;
234 u32 ctx_id;
235 int idx, ret;
236
237 if (!drm_dev_enter(dev, &idx))
238 return -ENODEV;
239
240 file_priv = kzalloc_obj(*file_priv);
241 if (!file_priv) {
242 ret = -ENOMEM;
243 goto err_dev_exit;
244 }
245
246 INIT_LIST_HEAD(&file_priv->ms_instance_list);
247
248 file_priv->vdev = vdev;
249 file_priv->bound = true;
250 kref_init(&file_priv->ref);
251 mutex_init(&file_priv->lock);
252 mutex_init(&file_priv->ms_lock);
253
254 mutex_lock(&vdev->context_list_lock);
255
256 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
257 vdev->context_xa_limit, GFP_KERNEL);
258 if (ret) {
259 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
260 goto err_unlock;
261 }
262
263 ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
264
265 file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
266 file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
267
268 xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
269 file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
270 file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
271
272 mutex_unlock(&vdev->context_list_lock);
273 drm_dev_exit(idx);
274
275 file->driver_priv = file_priv;
276
277 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
278 ctx_id, current->comm, task_pid_nr(current));
279
280 return 0;
281
282 err_unlock:
283 mutex_unlock(&vdev->context_list_lock);
284 mutex_destroy(&file_priv->ms_lock);
285 mutex_destroy(&file_priv->lock);
286 kfree(file_priv);
287 err_dev_exit:
288 drm_dev_exit(idx);
289 return ret;
290 }
291
ivpu_postclose(struct drm_device * dev,struct drm_file * file)292 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
293 {
294 struct ivpu_file_priv *file_priv = file->driver_priv;
295 struct ivpu_device *vdev = to_ivpu_device(dev);
296
297 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
298 file_priv->ctx.id, current->comm, task_pid_nr(current));
299
300 ivpu_ms_cleanup(file_priv);
301 ivpu_file_priv_put(&file_priv);
302 }
303
304 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
305 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
306 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
307 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
308 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
309 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
310 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
311 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
312 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
313 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
314 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
315 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
316 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
317 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
318 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
319 };
320
ivpu_wait_for_ready(struct ivpu_device * vdev)321 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
322 {
323 struct ivpu_ipc_consumer cons;
324 struct ivpu_ipc_hdr ipc_hdr;
325 unsigned long timeout;
326 int ret;
327
328 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
329 return 0;
330
331 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
332
333 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
334 while (1) {
335 ivpu_ipc_irq_handler(vdev);
336 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
337 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
338 break;
339
340 cond_resched();
341 }
342
343 ivpu_ipc_consumer_del(vdev, &cons);
344
345 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
346 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n",
347 ipc_hdr.data_addr);
348 return -EIO;
349 }
350
351 if (!ret)
352 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
353
354 return ret;
355 }
356
ivpu_hw_sched_init(struct ivpu_device * vdev)357 static int ivpu_hw_sched_init(struct ivpu_device *vdev)
358 {
359 int ret = 0;
360
361 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
362 ret = ivpu_jsm_hws_setup_priority_bands(vdev);
363 if (ret) {
364 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
365 return ret;
366 }
367 }
368
369 return ret;
370 }
371
372 /**
373 * ivpu_boot() - Start VPU firmware
374 * @vdev: VPU device
375 *
376 * This function is paired with ivpu_shutdown() but it doesn't power up the
377 * VPU because power up has to be called very early in ivpu_probe().
378 */
ivpu_boot(struct ivpu_device * vdev)379 int ivpu_boot(struct ivpu_device *vdev)
380 {
381 int ret;
382
383 drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
384 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
385
386 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
387 vdev->fw->last_boot_mode = vdev->fw->next_boot_mode;
388
389 ret = ivpu_hw_boot_fw(vdev);
390 if (ret) {
391 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
392 return ret;
393 }
394
395 ret = ivpu_wait_for_ready(vdev);
396 if (ret) {
397 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
398 goto err_diagnose_failure;
399 }
400 ivpu_hw_irq_clear(vdev);
401 enable_irq(vdev->irq);
402 ivpu_hw_irq_enable(vdev);
403 ivpu_ipc_enable(vdev);
404
405 if (!ivpu_fw_is_warm_boot(vdev)) {
406 ret = ivpu_pm_dct_init(vdev);
407 if (ret)
408 goto err_disable_ipc;
409
410 ret = ivpu_hw_sched_init(vdev);
411 if (ret)
412 goto err_disable_ipc;
413 }
414
415 return 0;
416
417 err_disable_ipc:
418 ivpu_ipc_disable(vdev);
419 ivpu_hw_irq_disable(vdev);
420 disable_irq(vdev->irq);
421 err_diagnose_failure:
422 ivpu_hw_diagnose_failure(vdev);
423 ivpu_mmu_evtq_dump(vdev);
424 ivpu_dev_coredump(vdev);
425 return ret;
426 }
427
ivpu_prepare_for_reset(struct ivpu_device * vdev)428 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
429 {
430 ivpu_hw_irq_disable(vdev);
431 disable_irq(vdev->irq);
432 flush_work(&vdev->irq_ipc_work);
433 flush_work(&vdev->irq_dct_work);
434 flush_work(&vdev->context_abort_work);
435 ivpu_ipc_disable(vdev);
436 ivpu_mmu_disable(vdev);
437 }
438
ivpu_shutdown(struct ivpu_device * vdev)439 int ivpu_shutdown(struct ivpu_device *vdev)
440 {
441 int ret;
442
443 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
444 pci_save_state(to_pci_dev(vdev->drm.dev));
445
446 ret = ivpu_hw_power_down(vdev);
447 if (ret)
448 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
449
450 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
451
452 return ret;
453 }
454
455 static const struct file_operations ivpu_fops = {
456 .owner = THIS_MODULE,
457 DRM_ACCEL_FOPS,
458 #ifdef CONFIG_PROC_FS
459 .show_fdinfo = drm_show_fdinfo,
460 #endif
461 };
462
463 static const struct drm_driver driver = {
464 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
465
466 .open = ivpu_open,
467 .postclose = ivpu_postclose,
468
469 .gem_create_object = ivpu_gem_create_object,
470 .gem_prime_import = ivpu_gem_prime_import,
471
472 .ioctls = ivpu_drm_ioctls,
473 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
474 .fops = &ivpu_fops,
475 #ifdef CONFIG_PROC_FS
476 .show_fdinfo = drm_show_memory_stats,
477 #endif
478
479 .name = DRIVER_NAME,
480 .desc = DRIVER_DESC,
481
482 .major = 1,
483 };
484
ivpu_irq_init(struct ivpu_device * vdev)485 static int ivpu_irq_init(struct ivpu_device *vdev)
486 {
487 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
488 int ret;
489
490 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
491 if (ret < 0) {
492 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
493 return ret;
494 }
495
496 INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
497 INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
498 INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
499
500 ivpu_irq_handlers_init(vdev);
501
502 vdev->irq = pci_irq_vector(pdev, 0);
503
504 ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
505 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
506 if (ret)
507 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
508
509 return ret;
510 }
511
ivpu_pci_init(struct ivpu_device * vdev)512 static int ivpu_pci_init(struct ivpu_device *vdev)
513 {
514 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
515 struct resource *bar0 = &pdev->resource[0];
516 struct resource *bar4 = &pdev->resource[4];
517 int ret;
518
519 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
520 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
521 if (IS_ERR(vdev->regv)) {
522 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
523 return PTR_ERR(vdev->regv);
524 }
525
526 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
527 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
528 if (IS_ERR(vdev->regb)) {
529 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
530 return PTR_ERR(vdev->regb);
531 }
532
533 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
534 if (ret) {
535 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
536 return ret;
537 }
538 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
539
540 /* Clear any pending errors */
541 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
542
543 /* NPU does not require 10m D3hot delay */
544 pdev->d3hot_delay = 0;
545
546 ret = pcim_enable_device(pdev);
547 if (ret) {
548 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
549 return ret;
550 }
551
552 pci_set_master(pdev);
553
554 return 0;
555 }
556
ivpu_dev_init(struct ivpu_device * vdev)557 static int ivpu_dev_init(struct ivpu_device *vdev)
558 {
559 int ret;
560
561 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
562 if (!vdev->hw)
563 return -ENOMEM;
564
565 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
566 if (!vdev->mmu)
567 return -ENOMEM;
568
569 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
570 if (!vdev->fw)
571 return -ENOMEM;
572
573 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
574 if (!vdev->ipc)
575 return -ENOMEM;
576
577 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
578 if (!vdev->pm)
579 return -ENOMEM;
580
581 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
582 vdev->hw->dma_bits = 48;
583 else
584 vdev->hw->dma_bits = 38;
585
586 vdev->platform = IVPU_PLATFORM_INVALID;
587 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
588 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
589 atomic64_set(&vdev->unique_id_counter, 0);
590 atomic_set(&vdev->job_timeout_counter, 0);
591 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
592 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
593 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
594 INIT_LIST_HEAD(&vdev->bo_list);
595
596 vdev->db_limit.min = IVPU_MIN_DB;
597 vdev->db_limit.max = IVPU_MAX_DB;
598
599 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
600 if (ret)
601 goto err_xa_destroy;
602
603 ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
604 if (ret)
605 goto err_xa_destroy;
606
607 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
608 if (ret)
609 goto err_xa_destroy;
610
611 ret = ivpu_pci_init(vdev);
612 if (ret)
613 goto err_xa_destroy;
614
615 ret = ivpu_irq_init(vdev);
616 if (ret)
617 goto err_xa_destroy;
618
619 /* Init basic HW info based on buttress registers which are accessible before power up */
620 ret = ivpu_hw_init(vdev);
621 if (ret)
622 goto err_xa_destroy;
623
624 /* Power up early so the rest of init code can access VPU registers */
625 ret = ivpu_hw_power_up(vdev);
626 if (ret)
627 goto err_shutdown;
628
629 ivpu_mmu_global_context_init(vdev);
630
631 ret = ivpu_mmu_init(vdev);
632 if (ret)
633 goto err_mmu_gctx_fini;
634
635 ret = ivpu_mmu_reserved_context_init(vdev);
636 if (ret)
637 goto err_mmu_gctx_fini;
638
639 ret = ivpu_fw_init(vdev);
640 if (ret)
641 goto err_mmu_rctx_fini;
642
643 ret = ivpu_ipc_init(vdev);
644 if (ret)
645 goto err_fw_fini;
646
647 ivpu_pm_init(vdev);
648
649 ret = ivpu_boot(vdev);
650 if (ret)
651 goto err_ipc_fini;
652
653 ivpu_job_done_consumer_init(vdev);
654 ivpu_pm_enable(vdev);
655
656 return 0;
657
658 err_ipc_fini:
659 ivpu_ipc_fini(vdev);
660 err_fw_fini:
661 ivpu_fw_fini(vdev);
662 err_mmu_rctx_fini:
663 ivpu_mmu_reserved_context_fini(vdev);
664 err_mmu_gctx_fini:
665 ivpu_mmu_global_context_fini(vdev);
666 err_shutdown:
667 ivpu_shutdown(vdev);
668 err_xa_destroy:
669 xa_destroy(&vdev->db_xa);
670 xa_destroy(&vdev->submitted_jobs_xa);
671 xa_destroy(&vdev->context_xa);
672 return ret;
673 }
674
ivpu_bo_unbind_all_user_contexts(struct ivpu_device * vdev)675 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
676 {
677 struct ivpu_file_priv *file_priv;
678 unsigned long ctx_id;
679
680 mutex_lock(&vdev->context_list_lock);
681
682 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
683 file_priv_unbind(vdev, file_priv);
684
685 mutex_unlock(&vdev->context_list_lock);
686 }
687
ivpu_dev_fini(struct ivpu_device * vdev)688 static void ivpu_dev_fini(struct ivpu_device *vdev)
689 {
690 ivpu_jobs_abort_all(vdev);
691 ivpu_pm_disable_recovery(vdev);
692 ivpu_pm_disable(vdev);
693 ivpu_prepare_for_reset(vdev);
694 ivpu_shutdown(vdev);
695
696 ivpu_ms_cleanup_all(vdev);
697 ivpu_job_done_consumer_fini(vdev);
698 ivpu_bo_unbind_all_user_contexts(vdev);
699
700 ivpu_ipc_fini(vdev);
701 ivpu_fw_fini(vdev);
702 ivpu_mmu_reserved_context_fini(vdev);
703 ivpu_mmu_global_context_fini(vdev);
704
705 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
706 xa_destroy(&vdev->db_xa);
707 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
708 xa_destroy(&vdev->submitted_jobs_xa);
709 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
710 xa_destroy(&vdev->context_xa);
711 }
712
713 static struct pci_device_id ivpu_pci_ids[] = {
714 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
715 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
716 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
717 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
718 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
719 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
720 { }
721 };
722 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
723
ivpu_probe(struct pci_dev * pdev,const struct pci_device_id * id)724 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
725 {
726 struct ivpu_device *vdev;
727 int ret;
728
729 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
730 if (IS_ERR(vdev))
731 return PTR_ERR(vdev);
732
733 pci_set_drvdata(pdev, vdev);
734
735 ret = ivpu_dev_init(vdev);
736 if (ret)
737 return ret;
738
739 ivpu_debugfs_init(vdev);
740 ivpu_sysfs_init(vdev);
741
742 ret = drm_dev_register(&vdev->drm, 0);
743 if (ret) {
744 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
745 ivpu_dev_fini(vdev);
746 }
747
748 return ret;
749 }
750
ivpu_remove(struct pci_dev * pdev)751 static void ivpu_remove(struct pci_dev *pdev)
752 {
753 struct ivpu_device *vdev = pci_get_drvdata(pdev);
754
755 drm_dev_unplug(&vdev->drm);
756 ivpu_dev_fini(vdev);
757 }
758
759 static const struct dev_pm_ops ivpu_drv_pci_pm = {
760 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
761 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
762 };
763
764 static const struct pci_error_handlers ivpu_drv_pci_err = {
765 .reset_prepare = ivpu_pm_reset_prepare_cb,
766 .reset_done = ivpu_pm_reset_done_cb,
767 };
768
769 static struct pci_driver ivpu_pci_driver = {
770 .name = KBUILD_MODNAME,
771 .id_table = ivpu_pci_ids,
772 .probe = ivpu_probe,
773 .remove = ivpu_remove,
774 .driver = {
775 .pm = &ivpu_drv_pci_pm,
776 },
777 .err_handler = &ivpu_drv_pci_err,
778 };
779
780 module_pci_driver(ivpu_pci_driver);
781
782 MODULE_AUTHOR("Intel Corporation");
783 MODULE_DESCRIPTION(DRIVER_DESC);
784 MODULE_LICENSE("GPL and additional rights");
785 MODULE_VERSION(DRIVER_VERSION_STR);
786