1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020-2025 Intel Corporation
4 */
5
6 #include <linux/firmware.h>
7 #include <linux/module.h>
8 #include <linux/pci.h>
9 #include <linux/pm_runtime.h>
10 #include <linux/workqueue.h>
11 #include <generated/utsrelease.h>
12
13 #include <drm/drm_accel.h>
14 #include <drm/drm_file.h>
15 #include <drm/drm_gem.h>
16 #include <drm/drm_ioctl.h>
17 #include <drm/drm_prime.h>
18
19 #include "ivpu_coredump.h"
20 #include "ivpu_debugfs.h"
21 #include "ivpu_drv.h"
22 #include "ivpu_fw.h"
23 #include "ivpu_fw_log.h"
24 #include "ivpu_gem.h"
25 #include "ivpu_hw.h"
26 #include "ivpu_ipc.h"
27 #include "ivpu_job.h"
28 #include "ivpu_jsm_msg.h"
29 #include "ivpu_mmu.h"
30 #include "ivpu_mmu_context.h"
31 #include "ivpu_ms.h"
32 #include "ivpu_pm.h"
33 #include "ivpu_sysfs.h"
34 #include "vpu_boot_api.h"
35
36 #ifndef DRIVER_VERSION_STR
37 #define DRIVER_VERSION_STR "1.0.0 " UTS_RELEASE
38 #endif
39
40 int ivpu_dbg_mask;
41 module_param_named(dbg_mask, ivpu_dbg_mask, int, 0644);
42 MODULE_PARM_DESC(dbg_mask, "Driver debug mask. See IVPU_DBG_* macros.");
43
44 int ivpu_test_mode;
45 #if IS_ENABLED(CONFIG_DRM_ACCEL_IVPU_DEBUG)
46 module_param_named_unsafe(test_mode, ivpu_test_mode, int, 0644);
47 MODULE_PARM_DESC(test_mode, "Test mode mask. See IVPU_TEST_MODE_* macros.");
48 #endif
49
50 u8 ivpu_pll_min_ratio;
51 module_param_named(pll_min_ratio, ivpu_pll_min_ratio, byte, 0644);
52 MODULE_PARM_DESC(pll_min_ratio, "Minimum PLL ratio used to set NPU frequency");
53
54 u8 ivpu_pll_max_ratio = U8_MAX;
55 module_param_named(pll_max_ratio, ivpu_pll_max_ratio, byte, 0644);
56 MODULE_PARM_DESC(pll_max_ratio, "Maximum PLL ratio used to set NPU frequency");
57
58 int ivpu_sched_mode = IVPU_SCHED_MODE_AUTO;
59 module_param_named(sched_mode, ivpu_sched_mode, int, 0444);
60 MODULE_PARM_DESC(sched_mode, "Scheduler mode: -1 - Use default scheduler, 0 - Use OS scheduler (supported on 27XX - 50XX), 1 - Use HW scheduler");
61
62 bool ivpu_disable_mmu_cont_pages;
63 module_param_named(disable_mmu_cont_pages, ivpu_disable_mmu_cont_pages, bool, 0444);
64 MODULE_PARM_DESC(disable_mmu_cont_pages, "Disable MMU contiguous pages optimization");
65
66 bool ivpu_force_snoop;
67 module_param_named(force_snoop, ivpu_force_snoop, bool, 0444);
68 MODULE_PARM_DESC(force_snoop, "Force snooping for NPU host memory access");
69
ivpu_user_limits_alloc(struct ivpu_device * vdev,uid_t uid)70 static struct ivpu_user_limits *ivpu_user_limits_alloc(struct ivpu_device *vdev, uid_t uid)
71 {
72 struct ivpu_user_limits *limits;
73
74 limits = kzalloc_obj(*limits);
75 if (!limits)
76 return ERR_PTR(-ENOMEM);
77
78 kref_init(&limits->ref);
79 atomic_set(&limits->db_count, 0);
80 limits->vdev = vdev;
81 limits->uid = uid;
82
83 /* Allow root user to allocate all contexts */
84 if (uid == 0) {
85 limits->max_ctx_count = ivpu_get_context_count(vdev);
86 limits->max_db_count = ivpu_get_doorbell_count(vdev);
87 } else {
88 limits->max_ctx_count = ivpu_get_context_count(vdev) / 2;
89 limits->max_db_count = ivpu_get_doorbell_count(vdev) / 2;
90 }
91
92 hash_add(vdev->user_limits, &limits->hash_node, uid);
93
94 return limits;
95 }
96
ivpu_user_limits_get(struct ivpu_device * vdev)97 static struct ivpu_user_limits *ivpu_user_limits_get(struct ivpu_device *vdev)
98 {
99 struct ivpu_user_limits *limits;
100 uid_t uid = current_uid().val;
101
102 guard(mutex)(&vdev->user_limits_lock);
103
104 hash_for_each_possible(vdev->user_limits, limits, hash_node, uid) {
105 if (limits->uid == uid) {
106 if (kref_read(&limits->ref) >= limits->max_ctx_count) {
107 ivpu_dbg(vdev, IOCTL, "User %u exceeded max ctx count %u\n", uid,
108 limits->max_ctx_count);
109 return ERR_PTR(-EMFILE);
110 }
111
112 kref_get(&limits->ref);
113 return limits;
114 }
115 }
116
117 return ivpu_user_limits_alloc(vdev, uid);
118 }
119
ivpu_user_limits_release(struct kref * ref)120 static void ivpu_user_limits_release(struct kref *ref)
121 {
122 struct ivpu_user_limits *limits = container_of(ref, struct ivpu_user_limits, ref);
123 struct ivpu_device *vdev = limits->vdev;
124
125 lockdep_assert_held(&vdev->user_limits_lock);
126 drm_WARN_ON(&vdev->drm, atomic_read(&limits->db_count));
127 hash_del(&limits->hash_node);
128 kfree(limits);
129 }
130
ivpu_user_limits_put(struct ivpu_device * vdev,struct ivpu_user_limits * limits)131 static void ivpu_user_limits_put(struct ivpu_device *vdev, struct ivpu_user_limits *limits)
132 {
133 guard(mutex)(&vdev->user_limits_lock);
134 kref_put(&limits->ref, ivpu_user_limits_release);
135 }
136
ivpu_file_priv_get(struct ivpu_file_priv * file_priv)137 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
138 {
139 struct ivpu_device *vdev = file_priv->vdev;
140
141 kref_get(&file_priv->ref);
142
143 ivpu_dbg(vdev, KREF, "file_priv get: ctx %u refcount %u\n",
144 file_priv->ctx.id, kref_read(&file_priv->ref));
145
146 return file_priv;
147 }
148
file_priv_unbind(struct ivpu_device * vdev,struct ivpu_file_priv * file_priv)149 static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
150 {
151 mutex_lock(&file_priv->lock);
152 if (file_priv->bound) {
153 ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
154
155 ivpu_cmdq_release_all_locked(file_priv);
156 ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
157 ivpu_mmu_context_fini(vdev, &file_priv->ctx);
158 file_priv->bound = false;
159 drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
160 }
161 mutex_unlock(&file_priv->lock);
162 }
163
file_priv_release(struct kref * ref)164 static void file_priv_release(struct kref *ref)
165 {
166 struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
167 struct ivpu_device *vdev = file_priv->vdev;
168
169 ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
170 file_priv->ctx.id, (bool)file_priv->bound);
171
172 pm_runtime_get_sync(vdev->drm.dev);
173 mutex_lock(&vdev->context_list_lock);
174 file_priv_unbind(vdev, file_priv);
175 drm_WARN_ON(&vdev->drm, !xa_empty(&file_priv->cmdq_xa));
176 xa_destroy(&file_priv->cmdq_xa);
177 mutex_unlock(&vdev->context_list_lock);
178 pm_runtime_put_autosuspend(vdev->drm.dev);
179
180 ivpu_user_limits_put(vdev, file_priv->user_limits);
181 mutex_destroy(&file_priv->ms_lock);
182 mutex_destroy(&file_priv->lock);
183 kfree(file_priv);
184 }
185
ivpu_file_priv_put(struct ivpu_file_priv ** link)186 void ivpu_file_priv_put(struct ivpu_file_priv **link)
187 {
188 struct ivpu_file_priv *file_priv = *link;
189 struct ivpu_device *vdev = file_priv->vdev;
190
191 ivpu_dbg(vdev, KREF, "file_priv put: ctx %u refcount %u\n",
192 file_priv->ctx.id, kref_read(&file_priv->ref));
193
194 *link = NULL;
195 kref_put(&file_priv->ref, file_priv_release);
196 }
197
ivpu_is_capable(struct ivpu_device * vdev,u32 capability)198 bool ivpu_is_capable(struct ivpu_device *vdev, u32 capability)
199 {
200 switch (capability) {
201 case DRM_IVPU_CAP_METRIC_STREAMER:
202 return true;
203 case DRM_IVPU_CAP_DMA_MEMORY_RANGE:
204 return true;
205 case DRM_IVPU_CAP_BO_CREATE_FROM_USERPTR:
206 return true;
207 case DRM_IVPU_CAP_MANAGE_CMDQ:
208 return vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW;
209 default:
210 return false;
211 }
212 }
213
ivpu_get_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)214 static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
215 {
216 struct ivpu_file_priv *file_priv = file->driver_priv;
217 struct ivpu_device *vdev = file_priv->vdev;
218 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
219 struct drm_ivpu_param *args = data;
220 int ret = 0;
221 int idx;
222
223 if (!drm_dev_enter(dev, &idx))
224 return -ENODEV;
225
226 switch (args->param) {
227 case DRM_IVPU_PARAM_DEVICE_ID:
228 args->value = pdev->device;
229 break;
230 case DRM_IVPU_PARAM_DEVICE_REVISION:
231 args->value = pdev->revision;
232 break;
233 case DRM_IVPU_PARAM_PLATFORM_TYPE:
234 args->value = vdev->platform;
235 break;
236 case DRM_IVPU_PARAM_CORE_CLOCK_RATE:
237 args->value = ivpu_hw_dpu_max_freq_get(vdev);
238 break;
239 case DRM_IVPU_PARAM_NUM_CONTEXTS:
240 args->value = file_priv->user_limits->max_ctx_count;
241 break;
242 case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
243 args->value = vdev->hw->ranges.user.start;
244 break;
245 case DRM_IVPU_PARAM_CONTEXT_ID:
246 args->value = file_priv->ctx.id;
247 break;
248 case DRM_IVPU_PARAM_FW_API_VERSION:
249 if (args->index < VPU_FW_API_VER_NUM) {
250 struct vpu_firmware_header *fw_hdr;
251
252 fw_hdr = (struct vpu_firmware_header *)vdev->fw->file->data;
253 args->value = fw_hdr->api_version[args->index];
254 } else {
255 ret = -EINVAL;
256 }
257 break;
258 case DRM_IVPU_PARAM_ENGINE_HEARTBEAT:
259 ret = ivpu_jsm_get_heartbeat(vdev, args->index, &args->value);
260 break;
261 case DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID:
262 args->value = (u64)atomic64_inc_return(&vdev->unique_id_counter);
263 break;
264 case DRM_IVPU_PARAM_TILE_CONFIG:
265 args->value = vdev->hw->tile_fuse;
266 break;
267 case DRM_IVPU_PARAM_SKU:
268 args->value = vdev->hw->sku;
269 break;
270 case DRM_IVPU_PARAM_CAPABILITIES:
271 args->value = ivpu_is_capable(vdev, args->index);
272 break;
273 case DRM_IVPU_PARAM_PREEMPT_BUFFER_SIZE:
274 args->value = ivpu_fw_preempt_buf_size(vdev);
275 break;
276 default:
277 ret = -EINVAL;
278 break;
279 }
280
281 drm_dev_exit(idx);
282 return ret;
283 }
284
ivpu_set_param_ioctl(struct drm_device * dev,void * data,struct drm_file * file)285 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
286 {
287 struct drm_ivpu_param *args = data;
288 int ret = 0;
289
290 switch (args->param) {
291 default:
292 ret = -EINVAL;
293 }
294
295 return ret;
296 }
297
ivpu_open(struct drm_device * dev,struct drm_file * file)298 static int ivpu_open(struct drm_device *dev, struct drm_file *file)
299 {
300 struct ivpu_device *vdev = to_ivpu_device(dev);
301 struct ivpu_file_priv *file_priv;
302 struct ivpu_user_limits *limits;
303 u32 ctx_id;
304 int idx, ret;
305
306 if (!drm_dev_enter(dev, &idx))
307 return -ENODEV;
308
309 limits = ivpu_user_limits_get(vdev);
310 if (IS_ERR(limits)) {
311 ret = PTR_ERR(limits);
312 goto err_dev_exit;
313 }
314
315 file_priv = kzalloc_obj(*file_priv);
316 if (!file_priv) {
317 ret = -ENOMEM;
318 goto err_user_limits_put;
319 }
320
321 INIT_LIST_HEAD(&file_priv->ms_instance_list);
322
323 file_priv->vdev = vdev;
324 file_priv->bound = true;
325 file_priv->user_limits = limits;
326 kref_init(&file_priv->ref);
327 mutex_init(&file_priv->lock);
328 mutex_init(&file_priv->ms_lock);
329
330 mutex_lock(&vdev->context_list_lock);
331
332 ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
333 vdev->context_xa_limit, GFP_KERNEL);
334 if (ret) {
335 ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
336 goto err_unlock;
337 }
338
339 ivpu_mmu_context_init(vdev, &file_priv->ctx, ctx_id);
340
341 file_priv->job_limit.min = FIELD_PREP(IVPU_JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
342 file_priv->job_limit.max = file_priv->job_limit.min | IVPU_JOB_ID_JOB_MASK;
343
344 xa_init_flags(&file_priv->cmdq_xa, XA_FLAGS_ALLOC1);
345 file_priv->cmdq_limit.min = IVPU_CMDQ_MIN_ID;
346 file_priv->cmdq_limit.max = IVPU_CMDQ_MAX_ID;
347
348 mutex_unlock(&vdev->context_list_lock);
349 drm_dev_exit(idx);
350
351 file->driver_priv = file_priv;
352
353 ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
354 ctx_id, current->comm, task_pid_nr(current));
355
356 return 0;
357
358 err_unlock:
359 mutex_unlock(&vdev->context_list_lock);
360 mutex_destroy(&file_priv->ms_lock);
361 mutex_destroy(&file_priv->lock);
362 kfree(file_priv);
363 err_user_limits_put:
364 ivpu_user_limits_put(vdev, limits);
365 err_dev_exit:
366 drm_dev_exit(idx);
367 return ret;
368 }
369
ivpu_postclose(struct drm_device * dev,struct drm_file * file)370 static void ivpu_postclose(struct drm_device *dev, struct drm_file *file)
371 {
372 struct ivpu_file_priv *file_priv = file->driver_priv;
373 struct ivpu_device *vdev = to_ivpu_device(dev);
374
375 ivpu_dbg(vdev, FILE, "file_priv close: ctx %u process %s pid %d\n",
376 file_priv->ctx.id, current->comm, task_pid_nr(current));
377
378 ivpu_ms_cleanup(file_priv);
379 ivpu_file_priv_put(&file_priv);
380 }
381
382 static const struct drm_ioctl_desc ivpu_drm_ioctls[] = {
383 DRM_IOCTL_DEF_DRV(IVPU_GET_PARAM, ivpu_get_param_ioctl, 0),
384 DRM_IOCTL_DEF_DRV(IVPU_SET_PARAM, ivpu_set_param_ioctl, 0),
385 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE, ivpu_bo_create_ioctl, 0),
386 DRM_IOCTL_DEF_DRV(IVPU_BO_INFO, ivpu_bo_info_ioctl, 0),
387 DRM_IOCTL_DEF_DRV(IVPU_SUBMIT, ivpu_submit_ioctl, 0),
388 DRM_IOCTL_DEF_DRV(IVPU_BO_WAIT, ivpu_bo_wait_ioctl, 0),
389 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_START, ivpu_ms_start_ioctl, 0),
390 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_DATA, ivpu_ms_get_data_ioctl, 0),
391 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_STOP, ivpu_ms_stop_ioctl, 0),
392 DRM_IOCTL_DEF_DRV(IVPU_METRIC_STREAMER_GET_INFO, ivpu_ms_get_info_ioctl, 0),
393 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_CREATE, ivpu_cmdq_create_ioctl, 0),
394 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_DESTROY, ivpu_cmdq_destroy_ioctl, 0),
395 DRM_IOCTL_DEF_DRV(IVPU_CMDQ_SUBMIT, ivpu_cmdq_submit_ioctl, 0),
396 DRM_IOCTL_DEF_DRV(IVPU_BO_CREATE_FROM_USERPTR, ivpu_bo_create_from_userptr_ioctl, 0),
397 };
398
ivpu_wait_for_ready(struct ivpu_device * vdev)399 static int ivpu_wait_for_ready(struct ivpu_device *vdev)
400 {
401 struct ivpu_ipc_consumer cons;
402 struct ivpu_ipc_hdr ipc_hdr;
403 unsigned long timeout;
404 int ret;
405
406 if (ivpu_test_mode & IVPU_TEST_MODE_FW_TEST)
407 return 0;
408
409 ivpu_ipc_consumer_add(vdev, &cons, IVPU_IPC_CHAN_BOOT_MSG, NULL);
410
411 timeout = jiffies + msecs_to_jiffies(vdev->timeout.boot);
412 while (1) {
413 ivpu_ipc_irq_handler(vdev);
414 ret = ivpu_ipc_receive(vdev, &cons, &ipc_hdr, NULL, 0);
415 if (ret != -ETIMEDOUT || time_after_eq(jiffies, timeout))
416 break;
417
418 cond_resched();
419 }
420
421 ivpu_ipc_consumer_del(vdev, &cons);
422
423 if (!ret && ipc_hdr.data_addr != IVPU_IPC_BOOT_MSG_DATA_ADDR) {
424 ivpu_err(vdev, "Invalid NPU ready message: 0x%x\n", ipc_hdr.data_addr);
425 return -EIO;
426 }
427
428 if (!ret)
429 ivpu_dbg(vdev, PM, "NPU ready message received successfully\n");
430
431 return ret;
432 }
433
ivpu_hw_sched_init(struct ivpu_device * vdev)434 static int ivpu_hw_sched_init(struct ivpu_device *vdev)
435 {
436 int ret = 0;
437
438 if (vdev->fw->sched_mode == VPU_SCHEDULING_MODE_HW) {
439 ret = ivpu_jsm_hws_setup_priority_bands(vdev);
440 if (ret) {
441 ivpu_err(vdev, "Failed to enable hw scheduler: %d", ret);
442 return ret;
443 }
444 }
445
446 return ret;
447 }
448
449 /**
450 * ivpu_boot() - Start VPU firmware
451 * @vdev: VPU device
452 *
453 * This function is paired with ivpu_shutdown() but it doesn't power up the
454 * VPU because power up has to be called very early in ivpu_probe().
455 */
ivpu_boot(struct ivpu_device * vdev)456 int ivpu_boot(struct ivpu_device *vdev)
457 {
458 int ret;
459
460 drm_WARN_ON(&vdev->drm, atomic_read(&vdev->job_timeout_counter));
461 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
462
463 ivpu_fw_boot_params_setup(vdev, ivpu_bo_vaddr(vdev->fw->mem_bp));
464 vdev->fw->last_boot_mode = vdev->fw->next_boot_mode;
465
466 ret = ivpu_hw_boot_fw(vdev);
467 if (ret) {
468 ivpu_err(vdev, "Failed to start the firmware: %d\n", ret);
469 return ret;
470 }
471
472 ret = ivpu_wait_for_ready(vdev);
473 if (ret) {
474 ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
475 goto err_diagnose_failure;
476 }
477 ivpu_hw_irq_clear(vdev);
478 enable_irq(vdev->irq);
479 ivpu_hw_irq_enable(vdev);
480 ivpu_ipc_enable(vdev);
481
482 if (!ivpu_fw_is_warm_boot(vdev)) {
483 ret = ivpu_pm_dct_init(vdev);
484 if (ret)
485 goto err_disable_ipc;
486
487 ret = ivpu_hw_sched_init(vdev);
488 if (ret)
489 goto err_disable_ipc;
490 }
491
492 return 0;
493
494 err_disable_ipc:
495 ivpu_ipc_disable(vdev);
496 ivpu_hw_irq_disable(vdev);
497 disable_irq(vdev->irq);
498 err_diagnose_failure:
499 ivpu_hw_diagnose_failure(vdev);
500 ivpu_mmu_evtq_dump(vdev);
501 ivpu_dev_coredump(vdev);
502 return ret;
503 }
504
ivpu_prepare_for_reset(struct ivpu_device * vdev)505 void ivpu_prepare_for_reset(struct ivpu_device *vdev)
506 {
507 ivpu_hw_irq_disable(vdev);
508 disable_irq(vdev->irq);
509 flush_work(&vdev->irq_ipc_work);
510 flush_work(&vdev->irq_dct_work);
511 flush_work(&vdev->context_abort_work);
512 ivpu_ipc_disable(vdev);
513 ivpu_mmu_disable(vdev);
514 }
515
ivpu_shutdown(struct ivpu_device * vdev)516 int ivpu_shutdown(struct ivpu_device *vdev)
517 {
518 int ret;
519
520 /* Save PCI state before powering down as it sometimes gets corrupted if NPU hangs */
521 pci_save_state(to_pci_dev(vdev->drm.dev));
522
523 ret = ivpu_hw_power_down(vdev);
524 if (ret)
525 ivpu_warn(vdev, "Failed to power down HW: %d\n", ret);
526
527 pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
528
529 return ret;
530 }
531
532 static const struct file_operations ivpu_fops = {
533 .owner = THIS_MODULE,
534 DRM_ACCEL_FOPS,
535 #ifdef CONFIG_PROC_FS
536 .show_fdinfo = drm_show_fdinfo,
537 #endif
538 };
539
ivpu_gem_prime_handle_to_fd(struct drm_device * dev,struct drm_file * file_priv,u32 handle,u32 flags,int * prime_fd)540 static int ivpu_gem_prime_handle_to_fd(struct drm_device *dev, struct drm_file *file_priv,
541 u32 handle, u32 flags, int *prime_fd)
542 {
543 struct drm_gem_object *obj;
544
545 obj = drm_gem_object_lookup(file_priv, handle);
546 if (!obj)
547 return -ENOENT;
548
549 if (drm_gem_is_imported(obj)) {
550 /* Do not allow re-exporting */
551 drm_gem_object_put(obj);
552 return -EOPNOTSUPP;
553 }
554
555 drm_gem_object_put(obj);
556
557 return drm_gem_prime_handle_to_fd(dev, file_priv, handle, flags, prime_fd);
558 }
559
560 static const struct drm_driver driver = {
561 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
562
563 .open = ivpu_open,
564 .postclose = ivpu_postclose,
565
566 .gem_create_object = ivpu_gem_create_object,
567 .gem_prime_import = ivpu_gem_prime_import,
568 .prime_handle_to_fd = ivpu_gem_prime_handle_to_fd,
569
570 .ioctls = ivpu_drm_ioctls,
571 .num_ioctls = ARRAY_SIZE(ivpu_drm_ioctls),
572 .fops = &ivpu_fops,
573 #ifdef CONFIG_PROC_FS
574 .show_fdinfo = drm_show_memory_stats,
575 #endif
576
577 .name = DRIVER_NAME,
578 .desc = DRIVER_DESC,
579
580 .major = 1,
581 };
582
ivpu_irq_init(struct ivpu_device * vdev)583 static int ivpu_irq_init(struct ivpu_device *vdev)
584 {
585 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
586 int ret;
587
588 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_MSIX);
589 if (ret < 0) {
590 ivpu_err(vdev, "Failed to allocate a MSI IRQ: %d\n", ret);
591 return ret;
592 }
593
594 INIT_WORK(&vdev->irq_ipc_work, ivpu_ipc_irq_work_fn);
595 INIT_WORK(&vdev->irq_dct_work, ivpu_pm_irq_dct_work_fn);
596 INIT_WORK(&vdev->context_abort_work, ivpu_context_abort_work_fn);
597
598 ivpu_irq_handlers_init(vdev);
599
600 vdev->irq = pci_irq_vector(pdev, 0);
601
602 ret = devm_request_irq(vdev->drm.dev, vdev->irq, ivpu_hw_irq_handler,
603 IRQF_NO_AUTOEN, DRIVER_NAME, vdev);
604 if (ret)
605 ivpu_err(vdev, "Failed to request an IRQ %d\n", ret);
606
607 return ret;
608 }
609
ivpu_pci_init(struct ivpu_device * vdev)610 static int ivpu_pci_init(struct ivpu_device *vdev)
611 {
612 struct pci_dev *pdev = to_pci_dev(vdev->drm.dev);
613 struct resource *bar0 = &pdev->resource[0];
614 struct resource *bar4 = &pdev->resource[4];
615 int ret;
616
617 ivpu_dbg(vdev, MISC, "Mapping BAR0 (RegV) %pR\n", bar0);
618 vdev->regv = devm_ioremap_resource(vdev->drm.dev, bar0);
619 if (IS_ERR(vdev->regv)) {
620 ivpu_err(vdev, "Failed to map bar 0: %pe\n", vdev->regv);
621 return PTR_ERR(vdev->regv);
622 }
623
624 ivpu_dbg(vdev, MISC, "Mapping BAR4 (RegB) %pR\n", bar4);
625 vdev->regb = devm_ioremap_resource(vdev->drm.dev, bar4);
626 if (IS_ERR(vdev->regb)) {
627 ivpu_err(vdev, "Failed to map bar 4: %pe\n", vdev->regb);
628 return PTR_ERR(vdev->regb);
629 }
630
631 ret = dma_set_mask_and_coherent(vdev->drm.dev, DMA_BIT_MASK(vdev->hw->dma_bits));
632 if (ret) {
633 ivpu_err(vdev, "Failed to set DMA mask: %d\n", ret);
634 return ret;
635 }
636 dma_set_max_seg_size(vdev->drm.dev, UINT_MAX);
637
638 /* Clear any pending errors */
639 pcie_capability_clear_word(pdev, PCI_EXP_DEVSTA, 0x3f);
640
641 /* NPU does not require 10m D3hot delay */
642 pdev->d3hot_delay = 0;
643
644 ret = pcim_enable_device(pdev);
645 if (ret) {
646 ivpu_err(vdev, "Failed to enable PCI device: %d\n", ret);
647 return ret;
648 }
649
650 pci_set_master(pdev);
651
652 return 0;
653 }
654
ivpu_dev_init(struct ivpu_device * vdev)655 static int ivpu_dev_init(struct ivpu_device *vdev)
656 {
657 int ret;
658
659 vdev->hw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->hw), GFP_KERNEL);
660 if (!vdev->hw)
661 return -ENOMEM;
662
663 vdev->mmu = drmm_kzalloc(&vdev->drm, sizeof(*vdev->mmu), GFP_KERNEL);
664 if (!vdev->mmu)
665 return -ENOMEM;
666
667 vdev->fw = drmm_kzalloc(&vdev->drm, sizeof(*vdev->fw), GFP_KERNEL);
668 if (!vdev->fw)
669 return -ENOMEM;
670
671 vdev->ipc = drmm_kzalloc(&vdev->drm, sizeof(*vdev->ipc), GFP_KERNEL);
672 if (!vdev->ipc)
673 return -ENOMEM;
674
675 vdev->pm = drmm_kzalloc(&vdev->drm, sizeof(*vdev->pm), GFP_KERNEL);
676 if (!vdev->pm)
677 return -ENOMEM;
678
679 if (ivpu_hw_ip_gen(vdev) >= IVPU_HW_IP_40XX)
680 vdev->hw->dma_bits = 48;
681 else
682 vdev->hw->dma_bits = 38;
683
684 vdev->platform = IVPU_PLATFORM_INVALID;
685 vdev->context_xa_limit.min = IVPU_USER_CONTEXT_MIN_SSID;
686 vdev->context_xa_limit.max = IVPU_USER_CONTEXT_MAX_SSID;
687 atomic64_set(&vdev->unique_id_counter, 0);
688 atomic_set(&vdev->job_timeout_counter, 0);
689 atomic_set(&vdev->faults_detected, 0);
690 xa_init_flags(&vdev->context_xa, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
691 xa_init_flags(&vdev->submitted_jobs_xa, XA_FLAGS_ALLOC1);
692 xa_init_flags(&vdev->db_xa, XA_FLAGS_ALLOC1);
693 INIT_LIST_HEAD(&vdev->bo_list);
694 hash_init(vdev->user_limits);
695
696 vdev->db_limit.min = IVPU_MIN_DB;
697 vdev->db_limit.max = IVPU_MAX_DB;
698
699 ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
700 if (ret)
701 goto err_xa_destroy;
702
703 ret = drmm_mutex_init(&vdev->drm, &vdev->user_limits_lock);
704 if (ret)
705 goto err_xa_destroy;
706
707 ret = drmm_mutex_init(&vdev->drm, &vdev->submitted_jobs_lock);
708 if (ret)
709 goto err_xa_destroy;
710
711 ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
712 if (ret)
713 goto err_xa_destroy;
714
715 ret = ivpu_pci_init(vdev);
716 if (ret)
717 goto err_xa_destroy;
718
719 ret = ivpu_irq_init(vdev);
720 if (ret)
721 goto err_xa_destroy;
722
723 /* Init basic HW info based on buttress registers which are accessible before power up */
724 ret = ivpu_hw_init(vdev);
725 if (ret)
726 goto err_xa_destroy;
727
728 /* Power up early so the rest of init code can access VPU registers */
729 ret = ivpu_hw_power_up(vdev);
730 if (ret)
731 goto err_shutdown;
732
733 ivpu_mmu_global_context_init(vdev);
734
735 ret = ivpu_mmu_init(vdev);
736 if (ret)
737 goto err_mmu_gctx_fini;
738
739 ret = ivpu_mmu_reserved_context_init(vdev);
740 if (ret)
741 goto err_mmu_gctx_fini;
742
743 ret = ivpu_fw_init(vdev);
744 if (ret)
745 goto err_mmu_rctx_fini;
746
747 ret = ivpu_ipc_init(vdev);
748 if (ret)
749 goto err_fw_fini;
750
751 ivpu_pm_init(vdev);
752
753 ret = ivpu_boot(vdev);
754 if (ret)
755 goto err_ipc_fini;
756
757 ivpu_job_done_consumer_init(vdev);
758 ivpu_pm_enable(vdev);
759
760 return 0;
761
762 err_ipc_fini:
763 ivpu_ipc_fini(vdev);
764 err_fw_fini:
765 ivpu_fw_fini(vdev);
766 err_mmu_rctx_fini:
767 ivpu_mmu_reserved_context_fini(vdev);
768 err_mmu_gctx_fini:
769 ivpu_mmu_global_context_fini(vdev);
770 err_shutdown:
771 ivpu_shutdown(vdev);
772 err_xa_destroy:
773 xa_destroy(&vdev->db_xa);
774 xa_destroy(&vdev->submitted_jobs_xa);
775 xa_destroy(&vdev->context_xa);
776 return ret;
777 }
778
ivpu_bo_unbind_all_user_contexts(struct ivpu_device * vdev)779 static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
780 {
781 struct ivpu_file_priv *file_priv;
782 unsigned long ctx_id;
783
784 mutex_lock(&vdev->context_list_lock);
785
786 xa_for_each(&vdev->context_xa, ctx_id, file_priv)
787 file_priv_unbind(vdev, file_priv);
788
789 mutex_unlock(&vdev->context_list_lock);
790 }
791
ivpu_dev_fini(struct ivpu_device * vdev)792 static void ivpu_dev_fini(struct ivpu_device *vdev)
793 {
794 ivpu_jobs_abort_all(vdev);
795 ivpu_pm_disable_recovery(vdev);
796 ivpu_pm_disable(vdev);
797 ivpu_prepare_for_reset(vdev);
798 ivpu_shutdown(vdev);
799
800 ivpu_ms_cleanup_all(vdev);
801 ivpu_job_done_consumer_fini(vdev);
802 ivpu_bo_unbind_all_user_contexts(vdev);
803
804 ivpu_ipc_fini(vdev);
805 ivpu_fw_fini(vdev);
806 ivpu_mmu_reserved_context_fini(vdev);
807 ivpu_mmu_global_context_fini(vdev);
808
809 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->db_xa));
810 xa_destroy(&vdev->db_xa);
811 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
812 xa_destroy(&vdev->submitted_jobs_xa);
813 drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->context_xa));
814 xa_destroy(&vdev->context_xa);
815 }
816
817 static struct pci_device_id ivpu_pci_ids[] = {
818 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_MTL) },
819 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_ARL) },
820 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_LNL) },
821 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PTL_P) },
822 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_WCL) },
823 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_NVL) },
824 {}
825 };
826 MODULE_DEVICE_TABLE(pci, ivpu_pci_ids);
827
ivpu_probe(struct pci_dev * pdev,const struct pci_device_id * id)828 static int ivpu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
829 {
830 struct ivpu_device *vdev;
831 int ret;
832
833 vdev = devm_drm_dev_alloc(&pdev->dev, &driver, struct ivpu_device, drm);
834 if (IS_ERR(vdev))
835 return PTR_ERR(vdev);
836
837 pci_set_drvdata(pdev, vdev);
838
839 ret = ivpu_dev_init(vdev);
840 if (ret)
841 return ret;
842
843 ivpu_debugfs_init(vdev);
844 ivpu_sysfs_init(vdev);
845
846 ret = drm_dev_register(&vdev->drm, 0);
847 if (ret) {
848 dev_err(&pdev->dev, "Failed to register DRM device: %d\n", ret);
849 ivpu_dev_fini(vdev);
850 }
851
852 return ret;
853 }
854
ivpu_remove(struct pci_dev * pdev)855 static void ivpu_remove(struct pci_dev *pdev)
856 {
857 struct ivpu_device *vdev = pci_get_drvdata(pdev);
858
859 drm_dev_unplug(&vdev->drm);
860 ivpu_dev_fini(vdev);
861 }
862
863 static const struct dev_pm_ops ivpu_drv_pci_pm = {
864 SET_SYSTEM_SLEEP_PM_OPS(ivpu_pm_suspend_cb, ivpu_pm_resume_cb)
865 SET_RUNTIME_PM_OPS(ivpu_pm_runtime_suspend_cb, ivpu_pm_runtime_resume_cb, NULL)
866 };
867
868 static const struct pci_error_handlers ivpu_drv_pci_err = {
869 .reset_prepare = ivpu_pm_reset_prepare_cb,
870 .reset_done = ivpu_pm_reset_done_cb,
871 };
872
873 static struct pci_driver ivpu_pci_driver = {
874 .name = KBUILD_MODNAME,
875 .id_table = ivpu_pci_ids,
876 .probe = ivpu_probe,
877 .remove = ivpu_remove,
878 .driver = {
879 .pm = &ivpu_drv_pci_pm,
880 },
881 .err_handler = &ivpu_drv_pci_err,
882 };
883
884 module_pci_driver(ivpu_pci_driver);
885
886 MODULE_AUTHOR("Intel Corporation");
887 MODULE_DESCRIPTION(DRIVER_DESC);
888 MODULE_LICENSE("GPL and additional rights");
889 MODULE_VERSION(DRIVER_VERSION_STR);
890