1 // SPDX-License-Identifier: GPL-2.0-only
2
3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */
4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */
5
6 #include <linux/delay.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/idr.h>
9 #include <linux/interrupt.h>
10 #include <linux/list.h>
11 #include <linux/kobject.h>
12 #include <linux/kref.h>
13 #include <linux/mhi.h>
14 #include <linux/module.h>
15 #include <linux/msi.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/spinlock.h>
19 #include <linux/workqueue.h>
20 #include <linux/wait.h>
21 #include <drm/drm_accel.h>
22 #include <drm/drm_drv.h>
23 #include <drm/drm_file.h>
24 #include <drm/drm_gem.h>
25 #include <drm/drm_ioctl.h>
26 #include <drm/drm_managed.h>
27 #include <uapi/drm/qaic_accel.h>
28
29 #include "mhi_controller.h"
30 #include "qaic.h"
31 #include "qaic_debugfs.h"
32 #include "qaic_timesync.h"
33 #include "sahara.h"
34
35 MODULE_IMPORT_NS("DMA_BUF");
36
37 #define PCI_DEV_AIC080 0xa080
38 #define PCI_DEV_AIC100 0xa100
39 #define QAIC_NAME "qaic"
40 #define QAIC_DESC "Qualcomm Cloud AI Accelerators"
41 #define CNTL_MAJOR 5
42 #define CNTL_MINOR 0
43
44 bool datapath_polling;
45 module_param(datapath_polling, bool, 0400);
46 MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode");
47 static bool link_up;
48 static DEFINE_IDA(qaic_usrs);
49
qaicm_wq_release(struct drm_device * dev,void * res)50 static void qaicm_wq_release(struct drm_device *dev, void *res)
51 {
52 struct workqueue_struct *wq = res;
53
54 destroy_workqueue(wq);
55 }
56
qaicm_wq_init(struct drm_device * dev,const char * name)57 static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *name)
58 {
59 struct workqueue_struct *wq;
60 int ret;
61
62 wq = alloc_workqueue("%s", WQ_UNBOUND, 0, name);
63 if (!wq)
64 return ERR_PTR(-ENOMEM);
65 ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq);
66 if (ret)
67 return ERR_PTR(ret);
68
69 return wq;
70 }
71
qaicm_srcu_release(struct drm_device * dev,void * res)72 static void qaicm_srcu_release(struct drm_device *dev, void *res)
73 {
74 struct srcu_struct *lock = res;
75
76 cleanup_srcu_struct(lock);
77 }
78
qaicm_srcu_init(struct drm_device * dev,struct srcu_struct * lock)79 static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock)
80 {
81 int ret;
82
83 ret = init_srcu_struct(lock);
84 if (ret)
85 return ret;
86
87 return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock);
88 }
89
qaicm_pci_release(struct drm_device * dev,void * res)90 static void qaicm_pci_release(struct drm_device *dev, void *res)
91 {
92 struct qaic_device *qdev = to_qaic_device(dev);
93
94 pci_set_drvdata(qdev->pdev, NULL);
95 }
96
free_usr(struct kref * kref)97 static void free_usr(struct kref *kref)
98 {
99 struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count);
100
101 cleanup_srcu_struct(&usr->qddev_lock);
102 ida_free(&qaic_usrs, usr->handle);
103 kfree(usr);
104 }
105
qaic_open(struct drm_device * dev,struct drm_file * file)106 static int qaic_open(struct drm_device *dev, struct drm_file *file)
107 {
108 struct qaic_drm_device *qddev = to_qaic_drm_device(dev);
109 struct qaic_device *qdev = qddev->qdev;
110 struct qaic_user *usr;
111 int rcu_id;
112 int ret;
113
114 rcu_id = srcu_read_lock(&qdev->dev_lock);
115 if (qdev->dev_state != QAIC_ONLINE) {
116 ret = -ENODEV;
117 goto dev_unlock;
118 }
119
120 usr = kmalloc(sizeof(*usr), GFP_KERNEL);
121 if (!usr) {
122 ret = -ENOMEM;
123 goto dev_unlock;
124 }
125
126 usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL);
127 if (usr->handle < 0) {
128 ret = usr->handle;
129 goto free_usr;
130 }
131 usr->qddev = qddev;
132 atomic_set(&usr->chunk_id, 0);
133 init_srcu_struct(&usr->qddev_lock);
134 kref_init(&usr->ref_count);
135
136 ret = mutex_lock_interruptible(&qddev->users_mutex);
137 if (ret)
138 goto cleanup_usr;
139
140 list_add(&usr->node, &qddev->users);
141 mutex_unlock(&qddev->users_mutex);
142
143 file->driver_priv = usr;
144
145 srcu_read_unlock(&qdev->dev_lock, rcu_id);
146 return 0;
147
148 cleanup_usr:
149 cleanup_srcu_struct(&usr->qddev_lock);
150 ida_free(&qaic_usrs, usr->handle);
151 free_usr:
152 kfree(usr);
153 dev_unlock:
154 srcu_read_unlock(&qdev->dev_lock, rcu_id);
155 return ret;
156 }
157
qaic_postclose(struct drm_device * dev,struct drm_file * file)158 static void qaic_postclose(struct drm_device *dev, struct drm_file *file)
159 {
160 struct qaic_user *usr = file->driver_priv;
161 struct qaic_drm_device *qddev;
162 struct qaic_device *qdev;
163 int qdev_rcu_id;
164 int usr_rcu_id;
165 int i;
166
167 qddev = usr->qddev;
168 usr_rcu_id = srcu_read_lock(&usr->qddev_lock);
169 if (qddev) {
170 qdev = qddev->qdev;
171 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock);
172 if (qdev->dev_state == QAIC_ONLINE) {
173 qaic_release_usr(qdev, usr);
174 for (i = 0; i < qdev->num_dbc; ++i)
175 if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle)
176 release_dbc(qdev, i);
177 }
178 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id);
179
180 mutex_lock(&qddev->users_mutex);
181 if (!list_empty(&usr->node))
182 list_del_init(&usr->node);
183 mutex_unlock(&qddev->users_mutex);
184 }
185
186 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id);
187 kref_put(&usr->ref_count, free_usr);
188
189 file->driver_priv = NULL;
190 }
191
192 DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops);
193
194 static const struct drm_ioctl_desc qaic_drm_ioctls[] = {
195 DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0),
196 DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0),
197 DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0),
198 DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0),
199 DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0),
200 DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0),
201 DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0),
202 DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0),
203 DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0),
204 };
205
206 static const struct drm_driver qaic_accel_driver = {
207 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL,
208
209 .name = QAIC_NAME,
210 .desc = QAIC_DESC,
211 .date = "20190618",
212
213 .fops = &qaic_accel_fops,
214 .open = qaic_open,
215 .postclose = qaic_postclose,
216
217 .ioctls = qaic_drm_ioctls,
218 .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls),
219 .gem_prime_import = qaic_gem_prime_import,
220 };
221
qaic_create_drm_device(struct qaic_device * qdev,s32 partition_id)222 static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id)
223 {
224 struct qaic_drm_device *qddev = qdev->qddev;
225 struct drm_device *drm = to_drm(qddev);
226 int ret;
227
228 /* Hold off implementing partitions until the uapi is determined */
229 if (partition_id != QAIC_NO_PARTITION)
230 return -EINVAL;
231
232 qddev->partition_id = partition_id;
233
234 ret = drm_dev_register(drm, 0);
235 if (ret) {
236 pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret);
237 return ret;
238 }
239
240 qaic_debugfs_init(qddev);
241
242 return ret;
243 }
244
qaic_destroy_drm_device(struct qaic_device * qdev,s32 partition_id)245 static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id)
246 {
247 struct qaic_drm_device *qddev = qdev->qddev;
248 struct drm_device *drm = to_drm(qddev);
249 struct qaic_user *usr;
250
251 drm_dev_unregister(drm);
252 qddev->partition_id = 0;
253 /*
254 * Existing users get unresolvable errors till they close FDs.
255 * Need to sync carefully with users calling close(). The
256 * list of users can be modified elsewhere when the lock isn't
257 * held here, but the sync'ing the srcu with the mutex held
258 * could deadlock. Grab the mutex so that the list will be
259 * unmodified. The user we get will exist as long as the
260 * lock is held. Signal that the qcdev is going away, and
261 * grab a reference to the user so they don't go away for
262 * synchronize_srcu(). Then release the mutex to avoid
263 * deadlock and make sure the user has observed the signal.
264 * With the lock released, we cannot maintain any state of the
265 * user list.
266 */
267 mutex_lock(&qddev->users_mutex);
268 while (!list_empty(&qddev->users)) {
269 usr = list_first_entry(&qddev->users, struct qaic_user, node);
270 list_del_init(&usr->node);
271 kref_get(&usr->ref_count);
272 usr->qddev = NULL;
273 mutex_unlock(&qddev->users_mutex);
274 synchronize_srcu(&usr->qddev_lock);
275 kref_put(&usr->ref_count, free_usr);
276 mutex_lock(&qddev->users_mutex);
277 }
278 mutex_unlock(&qddev->users_mutex);
279 }
280
qaic_mhi_probe(struct mhi_device * mhi_dev,const struct mhi_device_id * id)281 static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id)
282 {
283 u16 major = -1, minor = -1;
284 struct qaic_device *qdev;
285 int ret;
286
287 /*
288 * Invoking this function indicates that the control channel to the
289 * device is available. We use that as a signal to indicate that
290 * the device side firmware has booted. The device side firmware
291 * manages the device resources, so we need to communicate with it
292 * via the control channel in order to utilize the device. Therefore
293 * we wait until this signal to create the drm dev that userspace will
294 * use to control the device, because without the device side firmware,
295 * userspace can't do anything useful.
296 */
297
298 qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev));
299
300 dev_set_drvdata(&mhi_dev->dev, qdev);
301 qdev->cntl_ch = mhi_dev;
302
303 ret = qaic_control_open(qdev);
304 if (ret) {
305 pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret);
306 return ret;
307 }
308
309 qdev->dev_state = QAIC_BOOT;
310 ret = get_cntl_version(qdev, NULL, &major, &minor);
311 if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) {
312 pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n",
313 __func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret);
314 ret = -EINVAL;
315 goto close_control;
316 }
317 qdev->dev_state = QAIC_ONLINE;
318 kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE);
319
320 return ret;
321
322 close_control:
323 qaic_control_close(qdev);
324 return ret;
325 }
326
qaic_mhi_remove(struct mhi_device * mhi_dev)327 static void qaic_mhi_remove(struct mhi_device *mhi_dev)
328 {
329 /* This is redundant since we have already observed the device crash */
330 }
331
qaic_notify_reset(struct qaic_device * qdev)332 static void qaic_notify_reset(struct qaic_device *qdev)
333 {
334 int i;
335
336 kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE);
337 qdev->dev_state = QAIC_OFFLINE;
338 /* wake up any waiters to avoid waiting for timeouts at sync */
339 wake_all_cntl(qdev);
340 for (i = 0; i < qdev->num_dbc; ++i)
341 wakeup_dbc(qdev, i);
342 synchronize_srcu(&qdev->dev_lock);
343 }
344
qaic_dev_reset_clean_local_state(struct qaic_device * qdev)345 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev)
346 {
347 int i;
348
349 qaic_notify_reset(qdev);
350
351 /* start tearing things down */
352 for (i = 0; i < qdev->num_dbc; ++i)
353 release_dbc(qdev, i);
354 }
355
create_qdev(struct pci_dev * pdev,const struct pci_device_id * id)356 static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id)
357 {
358 struct device *dev = &pdev->dev;
359 struct qaic_drm_device *qddev;
360 struct qaic_device *qdev;
361 struct drm_device *drm;
362 int i, ret;
363
364 qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL);
365 if (!qdev)
366 return NULL;
367
368 qdev->dev_state = QAIC_OFFLINE;
369 if (id->device == PCI_DEV_AIC080 || id->device == PCI_DEV_AIC100) {
370 qdev->num_dbc = 16;
371 qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL);
372 if (!qdev->dbc)
373 return NULL;
374 }
375
376 qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm);
377 if (IS_ERR(qddev))
378 return NULL;
379
380 drm = to_drm(qddev);
381 pci_set_drvdata(pdev, qdev);
382
383 ret = drmm_mutex_init(drm, &qddev->users_mutex);
384 if (ret)
385 return NULL;
386 ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL);
387 if (ret)
388 return NULL;
389 ret = drmm_mutex_init(drm, &qdev->cntl_mutex);
390 if (ret)
391 return NULL;
392 ret = drmm_mutex_init(drm, &qdev->bootlog_mutex);
393 if (ret)
394 return NULL;
395
396 qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl");
397 if (IS_ERR(qdev->cntl_wq))
398 return NULL;
399 qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts");
400 if (IS_ERR(qdev->qts_wq))
401 return NULL;
402
403 ret = qaicm_srcu_init(drm, &qdev->dev_lock);
404 if (ret)
405 return NULL;
406
407 qdev->qddev = qddev;
408 qdev->pdev = pdev;
409 qddev->qdev = qdev;
410
411 INIT_LIST_HEAD(&qdev->cntl_xfer_list);
412 INIT_LIST_HEAD(&qdev->bootlog);
413 INIT_LIST_HEAD(&qddev->users);
414
415 for (i = 0; i < qdev->num_dbc; ++i) {
416 spin_lock_init(&qdev->dbc[i].xfer_lock);
417 qdev->dbc[i].qdev = qdev;
418 qdev->dbc[i].id = i;
419 INIT_LIST_HEAD(&qdev->dbc[i].xfer_list);
420 ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock);
421 if (ret)
422 return NULL;
423 init_waitqueue_head(&qdev->dbc[i].dbc_release);
424 INIT_LIST_HEAD(&qdev->dbc[i].bo_lists);
425 }
426
427 return qdev;
428 }
429
init_pci(struct qaic_device * qdev,struct pci_dev * pdev)430 static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev)
431 {
432 int bars;
433 int ret;
434
435 bars = pci_select_bars(pdev, IORESOURCE_MEM);
436
437 /* make sure the device has the expected BARs */
438 if (bars != (BIT(0) | BIT(2) | BIT(4))) {
439 pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n",
440 __func__, bars);
441 return -EINVAL;
442 }
443
444 ret = pcim_enable_device(pdev);
445 if (ret)
446 return ret;
447
448 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
449 if (ret)
450 return ret;
451 dma_set_max_seg_size(&pdev->dev, UINT_MAX);
452
453 qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]);
454 if (IS_ERR(qdev->bar_0))
455 return PTR_ERR(qdev->bar_0);
456
457 qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]);
458 if (IS_ERR(qdev->bar_2))
459 return PTR_ERR(qdev->bar_2);
460
461 /* Managed release since we use pcim_enable_device above */
462 pci_set_master(pdev);
463
464 return 0;
465 }
466
init_msi(struct qaic_device * qdev,struct pci_dev * pdev)467 static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev)
468 {
469 int mhi_irq;
470 int ret;
471 int i;
472
473 /* Managed release since we use pcim_enable_device */
474 ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI);
475 if (ret == -ENOSPC) {
476 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI);
477 if (ret < 0)
478 return ret;
479
480 /*
481 * Operate in one MSI mode. All interrupts will be directed to
482 * MSI0; every interrupt will wake up all the interrupt handlers
483 * (MHI and DBC[0-15]). Since the interrupt is now shared, it is
484 * not disabled during DBC threaded handler, but only one thread
485 * will be allowed to run per DBC, so while it can be
486 * interrupted, it shouldn't race with itself.
487 */
488 qdev->single_msi = true;
489 pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n");
490 } else if (ret < 0) {
491 return ret;
492 }
493
494 mhi_irq = pci_irq_vector(pdev, 0);
495 if (mhi_irq < 0)
496 return mhi_irq;
497
498 for (i = 0; i < qdev->num_dbc; ++i) {
499 ret = devm_request_threaded_irq(&pdev->dev,
500 pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1),
501 dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED,
502 "qaic_dbc", &qdev->dbc[i]);
503 if (ret)
504 return ret;
505
506 if (datapath_polling) {
507 qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1);
508 if (!qdev->single_msi)
509 disable_irq_nosync(qdev->dbc[i].irq);
510 INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work);
511 }
512 }
513
514 return mhi_irq;
515 }
516
qaic_pci_probe(struct pci_dev * pdev,const struct pci_device_id * id)517 static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
518 {
519 struct qaic_device *qdev;
520 int mhi_irq;
521 int ret;
522 int i;
523
524 qdev = create_qdev(pdev, id);
525 if (!qdev)
526 return -ENOMEM;
527
528 ret = init_pci(qdev, pdev);
529 if (ret)
530 return ret;
531
532 for (i = 0; i < qdev->num_dbc; ++i)
533 qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i);
534
535 mhi_irq = init_msi(qdev, pdev);
536 if (mhi_irq < 0)
537 return mhi_irq;
538
539 ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION);
540 if (ret)
541 return ret;
542
543 qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq,
544 qdev->single_msi);
545 if (IS_ERR(qdev->mhi_cntrl)) {
546 ret = PTR_ERR(qdev->mhi_cntrl);
547 qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
548 return ret;
549 }
550
551 return 0;
552 }
553
qaic_pci_remove(struct pci_dev * pdev)554 static void qaic_pci_remove(struct pci_dev *pdev)
555 {
556 struct qaic_device *qdev = pci_get_drvdata(pdev);
557
558 if (!qdev)
559 return;
560
561 qaic_dev_reset_clean_local_state(qdev);
562 qaic_mhi_free_controller(qdev->mhi_cntrl, link_up);
563 qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION);
564 }
565
qaic_pci_shutdown(struct pci_dev * pdev)566 static void qaic_pci_shutdown(struct pci_dev *pdev)
567 {
568 /* see qaic_exit for what link_up is doing */
569 link_up = true;
570 qaic_pci_remove(pdev);
571 }
572
qaic_pci_error_detected(struct pci_dev * pdev,pci_channel_state_t error)573 static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error)
574 {
575 return PCI_ERS_RESULT_NEED_RESET;
576 }
577
qaic_pci_reset_prepare(struct pci_dev * pdev)578 static void qaic_pci_reset_prepare(struct pci_dev *pdev)
579 {
580 struct qaic_device *qdev = pci_get_drvdata(pdev);
581
582 qaic_notify_reset(qdev);
583 qaic_mhi_start_reset(qdev->mhi_cntrl);
584 qaic_dev_reset_clean_local_state(qdev);
585 }
586
qaic_pci_reset_done(struct pci_dev * pdev)587 static void qaic_pci_reset_done(struct pci_dev *pdev)
588 {
589 struct qaic_device *qdev = pci_get_drvdata(pdev);
590
591 qaic_mhi_reset_done(qdev->mhi_cntrl);
592 }
593
594 static const struct mhi_device_id qaic_mhi_match_table[] = {
595 { .chan = "QAIC_CONTROL", },
596 {},
597 };
598
599 static struct mhi_driver qaic_mhi_driver = {
600 .id_table = qaic_mhi_match_table,
601 .remove = qaic_mhi_remove,
602 .probe = qaic_mhi_probe,
603 .ul_xfer_cb = qaic_mhi_ul_xfer_cb,
604 .dl_xfer_cb = qaic_mhi_dl_xfer_cb,
605 .driver = {
606 .name = "qaic_mhi",
607 },
608 };
609
610 static const struct pci_device_id qaic_ids[] = {
611 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC080), },
612 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), },
613 { }
614 };
615 MODULE_DEVICE_TABLE(pci, qaic_ids);
616
617 static const struct pci_error_handlers qaic_pci_err_handler = {
618 .error_detected = qaic_pci_error_detected,
619 .reset_prepare = qaic_pci_reset_prepare,
620 .reset_done = qaic_pci_reset_done,
621 };
622
623 static struct pci_driver qaic_pci_driver = {
624 .name = QAIC_NAME,
625 .id_table = qaic_ids,
626 .probe = qaic_pci_probe,
627 .remove = qaic_pci_remove,
628 .shutdown = qaic_pci_shutdown,
629 .err_handler = &qaic_pci_err_handler,
630 };
631
qaic_init(void)632 static int __init qaic_init(void)
633 {
634 int ret;
635
636 ret = pci_register_driver(&qaic_pci_driver);
637 if (ret) {
638 pr_debug("qaic: pci_register_driver failed %d\n", ret);
639 return ret;
640 }
641
642 ret = mhi_driver_register(&qaic_mhi_driver);
643 if (ret) {
644 pr_debug("qaic: mhi_driver_register failed %d\n", ret);
645 goto free_pci;
646 }
647
648 ret = sahara_register();
649 if (ret) {
650 pr_debug("qaic: sahara_register failed %d\n", ret);
651 goto free_mhi;
652 }
653
654 ret = qaic_timesync_init();
655 if (ret)
656 pr_debug("qaic: qaic_timesync_init failed %d\n", ret);
657
658 ret = qaic_bootlog_register();
659 if (ret)
660 pr_debug("qaic: qaic_bootlog_register failed %d\n", ret);
661
662 return 0;
663
664 free_mhi:
665 mhi_driver_unregister(&qaic_mhi_driver);
666 free_pci:
667 pci_unregister_driver(&qaic_pci_driver);
668 return ret;
669 }
670
qaic_exit(void)671 static void __exit qaic_exit(void)
672 {
673 /*
674 * We assume that qaic_pci_remove() is called due to a hotplug event
675 * which would mean that the link is down, and thus
676 * qaic_mhi_free_controller() should not try to access the device during
677 * cleanup.
678 * We call pci_unregister_driver() below, which also triggers
679 * qaic_pci_remove(), but since this is module exit, we expect the link
680 * to the device to be up, in which case qaic_mhi_free_controller()
681 * should try to access the device during cleanup to put the device in
682 * a sane state.
683 * For that reason, we set link_up here to let qaic_mhi_free_controller
684 * know the expected link state. Since the module is going to be
685 * removed at the end of this, we don't need to worry about
686 * reinitializing the link_up state after the cleanup is done.
687 */
688 link_up = true;
689 qaic_bootlog_unregister();
690 qaic_timesync_deinit();
691 sahara_unregister();
692 mhi_driver_unregister(&qaic_mhi_driver);
693 pci_unregister_driver(&qaic_pci_driver);
694 }
695
696 module_init(qaic_init);
697 module_exit(qaic_exit);
698
699 MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team");
700 MODULE_DESCRIPTION(QAIC_DESC " Accel Driver");
701 MODULE_LICENSE("GPL");
702