1 // SPDX-License-Identifier: GPL-2.0-only 2 3 /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ 4 /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ 5 6 #include <linux/delay.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/idr.h> 9 #include <linux/interrupt.h> 10 #include <linux/list.h> 11 #include <linux/kobject.h> 12 #include <linux/kref.h> 13 #include <linux/mhi.h> 14 #include <linux/module.h> 15 #include <linux/msi.h> 16 #include <linux/mutex.h> 17 #include <linux/pci.h> 18 #include <linux/spinlock.h> 19 #include <linux/workqueue.h> 20 #include <linux/wait.h> 21 #include <drm/drm_accel.h> 22 #include <drm/drm_drv.h> 23 #include <drm/drm_file.h> 24 #include <drm/drm_gem.h> 25 #include <drm/drm_ioctl.h> 26 #include <drm/drm_managed.h> 27 #include <uapi/drm/qaic_accel.h> 28 29 #include "mhi_controller.h" 30 #include "qaic.h" 31 #include "qaic_debugfs.h" 32 #include "qaic_timesync.h" 33 #include "sahara.h" 34 35 MODULE_IMPORT_NS(DMA_BUF); 36 37 #define PCI_DEV_AIC100 0xa100 38 #define QAIC_NAME "qaic" 39 #define QAIC_DESC "Qualcomm Cloud AI Accelerators" 40 #define CNTL_MAJOR 5 41 #define CNTL_MINOR 0 42 43 bool datapath_polling; 44 module_param(datapath_polling, bool, 0400); 45 MODULE_PARM_DESC(datapath_polling, "Operate the datapath in polling mode"); 46 static bool link_up; 47 static DEFINE_IDA(qaic_usrs); 48 49 static void qaicm_wq_release(struct drm_device *dev, void *res) 50 { 51 struct workqueue_struct *wq = res; 52 53 destroy_workqueue(wq); 54 } 55 56 static struct workqueue_struct *qaicm_wq_init(struct drm_device *dev, const char *fmt) 57 { 58 struct workqueue_struct *wq; 59 int ret; 60 61 wq = alloc_workqueue(fmt, WQ_UNBOUND, 0); 62 if (!wq) 63 return ERR_PTR(-ENOMEM); 64 ret = drmm_add_action_or_reset(dev, qaicm_wq_release, wq); 65 if (ret) 66 return ERR_PTR(ret); 67 68 return wq; 69 } 70 71 static void qaicm_srcu_release(struct drm_device *dev, void *res) 72 { 73 struct srcu_struct *lock = res; 74 75 cleanup_srcu_struct(lock); 76 } 77 78 static int qaicm_srcu_init(struct drm_device *dev, struct srcu_struct *lock) 79 { 80 int ret; 81 82 ret = init_srcu_struct(lock); 83 if (ret) 84 return ret; 85 86 return drmm_add_action_or_reset(dev, qaicm_srcu_release, lock); 87 } 88 89 static void qaicm_pci_release(struct drm_device *dev, void *res) 90 { 91 struct qaic_device *qdev = to_qaic_device(dev); 92 93 pci_set_drvdata(qdev->pdev, NULL); 94 } 95 96 static void free_usr(struct kref *kref) 97 { 98 struct qaic_user *usr = container_of(kref, struct qaic_user, ref_count); 99 100 cleanup_srcu_struct(&usr->qddev_lock); 101 ida_free(&qaic_usrs, usr->handle); 102 kfree(usr); 103 } 104 105 static int qaic_open(struct drm_device *dev, struct drm_file *file) 106 { 107 struct qaic_drm_device *qddev = to_qaic_drm_device(dev); 108 struct qaic_device *qdev = qddev->qdev; 109 struct qaic_user *usr; 110 int rcu_id; 111 int ret; 112 113 rcu_id = srcu_read_lock(&qdev->dev_lock); 114 if (qdev->dev_state != QAIC_ONLINE) { 115 ret = -ENODEV; 116 goto dev_unlock; 117 } 118 119 usr = kmalloc(sizeof(*usr), GFP_KERNEL); 120 if (!usr) { 121 ret = -ENOMEM; 122 goto dev_unlock; 123 } 124 125 usr->handle = ida_alloc(&qaic_usrs, GFP_KERNEL); 126 if (usr->handle < 0) { 127 ret = usr->handle; 128 goto free_usr; 129 } 130 usr->qddev = qddev; 131 atomic_set(&usr->chunk_id, 0); 132 init_srcu_struct(&usr->qddev_lock); 133 kref_init(&usr->ref_count); 134 135 ret = mutex_lock_interruptible(&qddev->users_mutex); 136 if (ret) 137 goto cleanup_usr; 138 139 list_add(&usr->node, &qddev->users); 140 mutex_unlock(&qddev->users_mutex); 141 142 file->driver_priv = usr; 143 144 srcu_read_unlock(&qdev->dev_lock, rcu_id); 145 return 0; 146 147 cleanup_usr: 148 cleanup_srcu_struct(&usr->qddev_lock); 149 ida_free(&qaic_usrs, usr->handle); 150 free_usr: 151 kfree(usr); 152 dev_unlock: 153 srcu_read_unlock(&qdev->dev_lock, rcu_id); 154 return ret; 155 } 156 157 static void qaic_postclose(struct drm_device *dev, struct drm_file *file) 158 { 159 struct qaic_user *usr = file->driver_priv; 160 struct qaic_drm_device *qddev; 161 struct qaic_device *qdev; 162 int qdev_rcu_id; 163 int usr_rcu_id; 164 int i; 165 166 qddev = usr->qddev; 167 usr_rcu_id = srcu_read_lock(&usr->qddev_lock); 168 if (qddev) { 169 qdev = qddev->qdev; 170 qdev_rcu_id = srcu_read_lock(&qdev->dev_lock); 171 if (qdev->dev_state == QAIC_ONLINE) { 172 qaic_release_usr(qdev, usr); 173 for (i = 0; i < qdev->num_dbc; ++i) 174 if (qdev->dbc[i].usr && qdev->dbc[i].usr->handle == usr->handle) 175 release_dbc(qdev, i); 176 } 177 srcu_read_unlock(&qdev->dev_lock, qdev_rcu_id); 178 179 mutex_lock(&qddev->users_mutex); 180 if (!list_empty(&usr->node)) 181 list_del_init(&usr->node); 182 mutex_unlock(&qddev->users_mutex); 183 } 184 185 srcu_read_unlock(&usr->qddev_lock, usr_rcu_id); 186 kref_put(&usr->ref_count, free_usr); 187 188 file->driver_priv = NULL; 189 } 190 191 DEFINE_DRM_ACCEL_FOPS(qaic_accel_fops); 192 193 static const struct drm_ioctl_desc qaic_drm_ioctls[] = { 194 DRM_IOCTL_DEF_DRV(QAIC_MANAGE, qaic_manage_ioctl, 0), 195 DRM_IOCTL_DEF_DRV(QAIC_CREATE_BO, qaic_create_bo_ioctl, 0), 196 DRM_IOCTL_DEF_DRV(QAIC_MMAP_BO, qaic_mmap_bo_ioctl, 0), 197 DRM_IOCTL_DEF_DRV(QAIC_ATTACH_SLICE_BO, qaic_attach_slice_bo_ioctl, 0), 198 DRM_IOCTL_DEF_DRV(QAIC_EXECUTE_BO, qaic_execute_bo_ioctl, 0), 199 DRM_IOCTL_DEF_DRV(QAIC_PARTIAL_EXECUTE_BO, qaic_partial_execute_bo_ioctl, 0), 200 DRM_IOCTL_DEF_DRV(QAIC_WAIT_BO, qaic_wait_bo_ioctl, 0), 201 DRM_IOCTL_DEF_DRV(QAIC_PERF_STATS_BO, qaic_perf_stats_bo_ioctl, 0), 202 DRM_IOCTL_DEF_DRV(QAIC_DETACH_SLICE_BO, qaic_detach_slice_bo_ioctl, 0), 203 }; 204 205 static const struct drm_driver qaic_accel_driver = { 206 .driver_features = DRIVER_GEM | DRIVER_COMPUTE_ACCEL, 207 208 .name = QAIC_NAME, 209 .desc = QAIC_DESC, 210 .date = "20190618", 211 212 .fops = &qaic_accel_fops, 213 .open = qaic_open, 214 .postclose = qaic_postclose, 215 216 .ioctls = qaic_drm_ioctls, 217 .num_ioctls = ARRAY_SIZE(qaic_drm_ioctls), 218 .gem_prime_import = qaic_gem_prime_import, 219 }; 220 221 static int qaic_create_drm_device(struct qaic_device *qdev, s32 partition_id) 222 { 223 struct qaic_drm_device *qddev = qdev->qddev; 224 struct drm_device *drm = to_drm(qddev); 225 int ret; 226 227 /* Hold off implementing partitions until the uapi is determined */ 228 if (partition_id != QAIC_NO_PARTITION) 229 return -EINVAL; 230 231 qddev->partition_id = partition_id; 232 233 ret = drm_dev_register(drm, 0); 234 if (ret) { 235 pci_dbg(qdev->pdev, "drm_dev_register failed %d\n", ret); 236 return ret; 237 } 238 239 qaic_debugfs_init(qddev); 240 241 return ret; 242 } 243 244 static void qaic_destroy_drm_device(struct qaic_device *qdev, s32 partition_id) 245 { 246 struct qaic_drm_device *qddev = qdev->qddev; 247 struct drm_device *drm = to_drm(qddev); 248 struct qaic_user *usr; 249 250 drm_dev_unregister(drm); 251 qddev->partition_id = 0; 252 /* 253 * Existing users get unresolvable errors till they close FDs. 254 * Need to sync carefully with users calling close(). The 255 * list of users can be modified elsewhere when the lock isn't 256 * held here, but the sync'ing the srcu with the mutex held 257 * could deadlock. Grab the mutex so that the list will be 258 * unmodified. The user we get will exist as long as the 259 * lock is held. Signal that the qcdev is going away, and 260 * grab a reference to the user so they don't go away for 261 * synchronize_srcu(). Then release the mutex to avoid 262 * deadlock and make sure the user has observed the signal. 263 * With the lock released, we cannot maintain any state of the 264 * user list. 265 */ 266 mutex_lock(&qddev->users_mutex); 267 while (!list_empty(&qddev->users)) { 268 usr = list_first_entry(&qddev->users, struct qaic_user, node); 269 list_del_init(&usr->node); 270 kref_get(&usr->ref_count); 271 usr->qddev = NULL; 272 mutex_unlock(&qddev->users_mutex); 273 synchronize_srcu(&usr->qddev_lock); 274 kref_put(&usr->ref_count, free_usr); 275 mutex_lock(&qddev->users_mutex); 276 } 277 mutex_unlock(&qddev->users_mutex); 278 } 279 280 static int qaic_mhi_probe(struct mhi_device *mhi_dev, const struct mhi_device_id *id) 281 { 282 u16 major = -1, minor = -1; 283 struct qaic_device *qdev; 284 int ret; 285 286 /* 287 * Invoking this function indicates that the control channel to the 288 * device is available. We use that as a signal to indicate that 289 * the device side firmware has booted. The device side firmware 290 * manages the device resources, so we need to communicate with it 291 * via the control channel in order to utilize the device. Therefore 292 * we wait until this signal to create the drm dev that userspace will 293 * use to control the device, because without the device side firmware, 294 * userspace can't do anything useful. 295 */ 296 297 qdev = pci_get_drvdata(to_pci_dev(mhi_dev->mhi_cntrl->cntrl_dev)); 298 299 dev_set_drvdata(&mhi_dev->dev, qdev); 300 qdev->cntl_ch = mhi_dev; 301 302 ret = qaic_control_open(qdev); 303 if (ret) { 304 pci_dbg(qdev->pdev, "%s: control_open failed %d\n", __func__, ret); 305 return ret; 306 } 307 308 qdev->dev_state = QAIC_BOOT; 309 ret = get_cntl_version(qdev, NULL, &major, &minor); 310 if (ret || major != CNTL_MAJOR || minor > CNTL_MINOR) { 311 pci_err(qdev->pdev, "%s: Control protocol version (%d.%d) not supported. Supported version is (%d.%d). Ret: %d\n", 312 __func__, major, minor, CNTL_MAJOR, CNTL_MINOR, ret); 313 ret = -EINVAL; 314 goto close_control; 315 } 316 qdev->dev_state = QAIC_ONLINE; 317 kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_ONLINE); 318 319 return ret; 320 321 close_control: 322 qaic_control_close(qdev); 323 return ret; 324 } 325 326 static void qaic_mhi_remove(struct mhi_device *mhi_dev) 327 { 328 /* This is redundant since we have already observed the device crash */ 329 } 330 331 static void qaic_notify_reset(struct qaic_device *qdev) 332 { 333 int i; 334 335 kobject_uevent(&(to_accel_kdev(qdev->qddev))->kobj, KOBJ_OFFLINE); 336 qdev->dev_state = QAIC_OFFLINE; 337 /* wake up any waiters to avoid waiting for timeouts at sync */ 338 wake_all_cntl(qdev); 339 for (i = 0; i < qdev->num_dbc; ++i) 340 wakeup_dbc(qdev, i); 341 synchronize_srcu(&qdev->dev_lock); 342 } 343 344 void qaic_dev_reset_clean_local_state(struct qaic_device *qdev) 345 { 346 int i; 347 348 qaic_notify_reset(qdev); 349 350 /* start tearing things down */ 351 for (i = 0; i < qdev->num_dbc; ++i) 352 release_dbc(qdev, i); 353 } 354 355 static struct qaic_device *create_qdev(struct pci_dev *pdev, const struct pci_device_id *id) 356 { 357 struct device *dev = &pdev->dev; 358 struct qaic_drm_device *qddev; 359 struct qaic_device *qdev; 360 struct drm_device *drm; 361 int i, ret; 362 363 qdev = devm_kzalloc(dev, sizeof(*qdev), GFP_KERNEL); 364 if (!qdev) 365 return NULL; 366 367 qdev->dev_state = QAIC_OFFLINE; 368 if (id->device == PCI_DEV_AIC100) { 369 qdev->num_dbc = 16; 370 qdev->dbc = devm_kcalloc(dev, qdev->num_dbc, sizeof(*qdev->dbc), GFP_KERNEL); 371 if (!qdev->dbc) 372 return NULL; 373 } 374 375 qddev = devm_drm_dev_alloc(&pdev->dev, &qaic_accel_driver, struct qaic_drm_device, drm); 376 if (IS_ERR(qddev)) 377 return NULL; 378 379 drm = to_drm(qddev); 380 pci_set_drvdata(pdev, qdev); 381 382 ret = drmm_mutex_init(drm, &qddev->users_mutex); 383 if (ret) 384 return NULL; 385 ret = drmm_add_action_or_reset(drm, qaicm_pci_release, NULL); 386 if (ret) 387 return NULL; 388 ret = drmm_mutex_init(drm, &qdev->cntl_mutex); 389 if (ret) 390 return NULL; 391 ret = drmm_mutex_init(drm, &qdev->bootlog_mutex); 392 if (ret) 393 return NULL; 394 395 qdev->cntl_wq = qaicm_wq_init(drm, "qaic_cntl"); 396 if (IS_ERR(qdev->cntl_wq)) 397 return NULL; 398 qdev->qts_wq = qaicm_wq_init(drm, "qaic_ts"); 399 if (IS_ERR(qdev->qts_wq)) 400 return NULL; 401 402 ret = qaicm_srcu_init(drm, &qdev->dev_lock); 403 if (ret) 404 return NULL; 405 406 qdev->qddev = qddev; 407 qdev->pdev = pdev; 408 qddev->qdev = qdev; 409 410 INIT_LIST_HEAD(&qdev->cntl_xfer_list); 411 INIT_LIST_HEAD(&qdev->bootlog); 412 INIT_LIST_HEAD(&qddev->users); 413 414 for (i = 0; i < qdev->num_dbc; ++i) { 415 spin_lock_init(&qdev->dbc[i].xfer_lock); 416 qdev->dbc[i].qdev = qdev; 417 qdev->dbc[i].id = i; 418 INIT_LIST_HEAD(&qdev->dbc[i].xfer_list); 419 ret = qaicm_srcu_init(drm, &qdev->dbc[i].ch_lock); 420 if (ret) 421 return NULL; 422 init_waitqueue_head(&qdev->dbc[i].dbc_release); 423 INIT_LIST_HEAD(&qdev->dbc[i].bo_lists); 424 } 425 426 return qdev; 427 } 428 429 static int init_pci(struct qaic_device *qdev, struct pci_dev *pdev) 430 { 431 int bars; 432 int ret; 433 434 bars = pci_select_bars(pdev, IORESOURCE_MEM); 435 436 /* make sure the device has the expected BARs */ 437 if (bars != (BIT(0) | BIT(2) | BIT(4))) { 438 pci_dbg(pdev, "%s: expected BARs 0, 2, and 4 not found in device. Found 0x%x\n", 439 __func__, bars); 440 return -EINVAL; 441 } 442 443 ret = pcim_enable_device(pdev); 444 if (ret) 445 return ret; 446 447 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 448 if (ret) 449 return ret; 450 ret = dma_set_max_seg_size(&pdev->dev, UINT_MAX); 451 if (ret) 452 return ret; 453 454 qdev->bar_0 = devm_ioremap_resource(&pdev->dev, &pdev->resource[0]); 455 if (IS_ERR(qdev->bar_0)) 456 return PTR_ERR(qdev->bar_0); 457 458 qdev->bar_2 = devm_ioremap_resource(&pdev->dev, &pdev->resource[2]); 459 if (IS_ERR(qdev->bar_2)) 460 return PTR_ERR(qdev->bar_2); 461 462 /* Managed release since we use pcim_enable_device above */ 463 pci_set_master(pdev); 464 465 return 0; 466 } 467 468 static int init_msi(struct qaic_device *qdev, struct pci_dev *pdev) 469 { 470 int mhi_irq; 471 int ret; 472 int i; 473 474 /* Managed release since we use pcim_enable_device */ 475 ret = pci_alloc_irq_vectors(pdev, 32, 32, PCI_IRQ_MSI); 476 if (ret == -ENOSPC) { 477 ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); 478 if (ret < 0) 479 return ret; 480 481 /* 482 * Operate in one MSI mode. All interrupts will be directed to 483 * MSI0; every interrupt will wake up all the interrupt handlers 484 * (MHI and DBC[0-15]). Since the interrupt is now shared, it is 485 * not disabled during DBC threaded handler, but only one thread 486 * will be allowed to run per DBC, so while it can be 487 * interrupted, it shouldn't race with itself. 488 */ 489 qdev->single_msi = true; 490 pci_info(pdev, "Allocating 32 MSIs failed, operating in 1 MSI mode. Performance may be impacted.\n"); 491 } else if (ret < 0) { 492 return ret; 493 } 494 495 mhi_irq = pci_irq_vector(pdev, 0); 496 if (mhi_irq < 0) 497 return mhi_irq; 498 499 for (i = 0; i < qdev->num_dbc; ++i) { 500 ret = devm_request_threaded_irq(&pdev->dev, 501 pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1), 502 dbc_irq_handler, dbc_irq_threaded_fn, IRQF_SHARED, 503 "qaic_dbc", &qdev->dbc[i]); 504 if (ret) 505 return ret; 506 507 if (datapath_polling) { 508 qdev->dbc[i].irq = pci_irq_vector(pdev, qdev->single_msi ? 0 : i + 1); 509 if (!qdev->single_msi) 510 disable_irq_nosync(qdev->dbc[i].irq); 511 INIT_WORK(&qdev->dbc[i].poll_work, irq_polling_work); 512 } 513 } 514 515 return mhi_irq; 516 } 517 518 static int qaic_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 519 { 520 struct qaic_device *qdev; 521 int mhi_irq; 522 int ret; 523 int i; 524 525 qdev = create_qdev(pdev, id); 526 if (!qdev) 527 return -ENOMEM; 528 529 ret = init_pci(qdev, pdev); 530 if (ret) 531 return ret; 532 533 for (i = 0; i < qdev->num_dbc; ++i) 534 qdev->dbc[i].dbc_base = qdev->bar_2 + QAIC_DBC_OFF(i); 535 536 mhi_irq = init_msi(qdev, pdev); 537 if (mhi_irq < 0) 538 return mhi_irq; 539 540 ret = qaic_create_drm_device(qdev, QAIC_NO_PARTITION); 541 if (ret) 542 return ret; 543 544 qdev->mhi_cntrl = qaic_mhi_register_controller(pdev, qdev->bar_0, mhi_irq, 545 qdev->single_msi); 546 if (IS_ERR(qdev->mhi_cntrl)) { 547 ret = PTR_ERR(qdev->mhi_cntrl); 548 qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); 549 return ret; 550 } 551 552 return 0; 553 } 554 555 static void qaic_pci_remove(struct pci_dev *pdev) 556 { 557 struct qaic_device *qdev = pci_get_drvdata(pdev); 558 559 if (!qdev) 560 return; 561 562 qaic_dev_reset_clean_local_state(qdev); 563 qaic_mhi_free_controller(qdev->mhi_cntrl, link_up); 564 qaic_destroy_drm_device(qdev, QAIC_NO_PARTITION); 565 } 566 567 static void qaic_pci_shutdown(struct pci_dev *pdev) 568 { 569 /* see qaic_exit for what link_up is doing */ 570 link_up = true; 571 qaic_pci_remove(pdev); 572 } 573 574 static pci_ers_result_t qaic_pci_error_detected(struct pci_dev *pdev, pci_channel_state_t error) 575 { 576 return PCI_ERS_RESULT_NEED_RESET; 577 } 578 579 static void qaic_pci_reset_prepare(struct pci_dev *pdev) 580 { 581 struct qaic_device *qdev = pci_get_drvdata(pdev); 582 583 qaic_notify_reset(qdev); 584 qaic_mhi_start_reset(qdev->mhi_cntrl); 585 qaic_dev_reset_clean_local_state(qdev); 586 } 587 588 static void qaic_pci_reset_done(struct pci_dev *pdev) 589 { 590 struct qaic_device *qdev = pci_get_drvdata(pdev); 591 592 qaic_mhi_reset_done(qdev->mhi_cntrl); 593 } 594 595 static const struct mhi_device_id qaic_mhi_match_table[] = { 596 { .chan = "QAIC_CONTROL", }, 597 {}, 598 }; 599 600 static struct mhi_driver qaic_mhi_driver = { 601 .id_table = qaic_mhi_match_table, 602 .remove = qaic_mhi_remove, 603 .probe = qaic_mhi_probe, 604 .ul_xfer_cb = qaic_mhi_ul_xfer_cb, 605 .dl_xfer_cb = qaic_mhi_dl_xfer_cb, 606 .driver = { 607 .name = "qaic_mhi", 608 }, 609 }; 610 611 static const struct pci_device_id qaic_ids[] = { 612 { PCI_DEVICE(PCI_VENDOR_ID_QCOM, PCI_DEV_AIC100), }, 613 { } 614 }; 615 MODULE_DEVICE_TABLE(pci, qaic_ids); 616 617 static const struct pci_error_handlers qaic_pci_err_handler = { 618 .error_detected = qaic_pci_error_detected, 619 .reset_prepare = qaic_pci_reset_prepare, 620 .reset_done = qaic_pci_reset_done, 621 }; 622 623 static struct pci_driver qaic_pci_driver = { 624 .name = QAIC_NAME, 625 .id_table = qaic_ids, 626 .probe = qaic_pci_probe, 627 .remove = qaic_pci_remove, 628 .shutdown = qaic_pci_shutdown, 629 .err_handler = &qaic_pci_err_handler, 630 }; 631 632 static int __init qaic_init(void) 633 { 634 int ret; 635 636 ret = pci_register_driver(&qaic_pci_driver); 637 if (ret) { 638 pr_debug("qaic: pci_register_driver failed %d\n", ret); 639 return ret; 640 } 641 642 ret = mhi_driver_register(&qaic_mhi_driver); 643 if (ret) { 644 pr_debug("qaic: mhi_driver_register failed %d\n", ret); 645 goto free_pci; 646 } 647 648 ret = sahara_register(); 649 if (ret) { 650 pr_debug("qaic: sahara_register failed %d\n", ret); 651 goto free_mhi; 652 } 653 654 ret = qaic_timesync_init(); 655 if (ret) 656 pr_debug("qaic: qaic_timesync_init failed %d\n", ret); 657 658 ret = qaic_bootlog_register(); 659 if (ret) 660 pr_debug("qaic: qaic_bootlog_register failed %d\n", ret); 661 662 return 0; 663 664 free_mhi: 665 mhi_driver_unregister(&qaic_mhi_driver); 666 free_pci: 667 pci_unregister_driver(&qaic_pci_driver); 668 return ret; 669 } 670 671 static void __exit qaic_exit(void) 672 { 673 /* 674 * We assume that qaic_pci_remove() is called due to a hotplug event 675 * which would mean that the link is down, and thus 676 * qaic_mhi_free_controller() should not try to access the device during 677 * cleanup. 678 * We call pci_unregister_driver() below, which also triggers 679 * qaic_pci_remove(), but since this is module exit, we expect the link 680 * to the device to be up, in which case qaic_mhi_free_controller() 681 * should try to access the device during cleanup to put the device in 682 * a sane state. 683 * For that reason, we set link_up here to let qaic_mhi_free_controller 684 * know the expected link state. Since the module is going to be 685 * removed at the end of this, we don't need to worry about 686 * reinitializing the link_up state after the cleanup is done. 687 */ 688 link_up = true; 689 qaic_bootlog_unregister(); 690 qaic_timesync_deinit(); 691 sahara_unregister(); 692 mhi_driver_unregister(&qaic_mhi_driver); 693 pci_unregister_driver(&qaic_pci_driver); 694 } 695 696 module_init(qaic_init); 697 module_exit(qaic_exit); 698 699 MODULE_AUTHOR(QAIC_DESC " Kernel Driver Team"); 700 MODULE_DESCRIPTION(QAIC_DESC " Accel Driver"); 701 MODULE_LICENSE("GPL"); 702