1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. 4 * 5 */ 6 7 #include <linux/bitfield.h> 8 #include <linux/debugfs.h> 9 #include <linux/device.h> 10 #include <linux/dma-direction.h> 11 #include <linux/dma-mapping.h> 12 #include <linux/idr.h> 13 #include <linux/interrupt.h> 14 #include <linux/list.h> 15 #include <linux/mhi.h> 16 #include <linux/mod_devicetable.h> 17 #include <linux/module.h> 18 #include <linux/slab.h> 19 #include <linux/vmalloc.h> 20 #include <linux/wait.h> 21 #include "internal.h" 22 23 static DEFINE_IDA(mhi_controller_ida); 24 25 const char * const mhi_ee_str[MHI_EE_MAX] = { 26 [MHI_EE_PBL] = "PRIMARY BOOTLOADER", 27 [MHI_EE_SBL] = "SECONDARY BOOTLOADER", 28 [MHI_EE_AMSS] = "MISSION MODE", 29 [MHI_EE_RDDM] = "RAMDUMP DOWNLOAD MODE", 30 [MHI_EE_WFW] = "WLAN FIRMWARE", 31 [MHI_EE_PTHRU] = "PASS THROUGH", 32 [MHI_EE_EDL] = "EMERGENCY DOWNLOAD", 33 [MHI_EE_FP] = "FLASH PROGRAMMER", 34 [MHI_EE_DISABLE_TRANSITION] = "DISABLE", 35 [MHI_EE_NOT_SUPPORTED] = "NOT SUPPORTED", 36 }; 37 38 const char * const dev_state_tran_str[DEV_ST_TRANSITION_MAX] = { 39 [DEV_ST_TRANSITION_PBL] = "PBL", 40 [DEV_ST_TRANSITION_READY] = "READY", 41 [DEV_ST_TRANSITION_SBL] = "SBL", 42 [DEV_ST_TRANSITION_MISSION_MODE] = "MISSION MODE", 43 [DEV_ST_TRANSITION_FP] = "FLASH PROGRAMMER", 44 [DEV_ST_TRANSITION_SYS_ERR] = "SYS ERROR", 45 [DEV_ST_TRANSITION_DISABLE] = "DISABLE", 46 }; 47 48 const char * const mhi_ch_state_type_str[MHI_CH_STATE_TYPE_MAX] = { 49 [MHI_CH_STATE_TYPE_RESET] = "RESET", 50 [MHI_CH_STATE_TYPE_STOP] = "STOP", 51 [MHI_CH_STATE_TYPE_START] = "START", 52 }; 53 54 static const char * const mhi_pm_state_str[] = { 55 [MHI_PM_STATE_DISABLE] = "DISABLE", 56 [MHI_PM_STATE_POR] = "POWER ON RESET", 57 [MHI_PM_STATE_M0] = "M0", 58 [MHI_PM_STATE_M2] = "M2", 59 [MHI_PM_STATE_M3_ENTER] = "M?->M3", 60 [MHI_PM_STATE_M3] = "M3", 61 [MHI_PM_STATE_M3_EXIT] = "M3->M0", 62 [MHI_PM_STATE_FW_DL_ERR] = "Firmware Download Error", 63 [MHI_PM_STATE_SYS_ERR_DETECT] = "SYS ERROR Detect", 64 [MHI_PM_STATE_SYS_ERR_PROCESS] = "SYS ERROR Process", 65 [MHI_PM_STATE_SHUTDOWN_PROCESS] = "SHUTDOWN Process", 66 [MHI_PM_STATE_LD_ERR_FATAL_DETECT] = "Linkdown or Error Fatal Detect", 67 }; 68 69 const char *to_mhi_pm_state_str(u32 state) 70 { 71 int index; 72 73 if (state) 74 index = __fls(state); 75 76 if (!state || index >= ARRAY_SIZE(mhi_pm_state_str)) 77 return "Invalid State"; 78 79 return mhi_pm_state_str[index]; 80 } 81 82 static ssize_t serial_number_show(struct device *dev, 83 struct device_attribute *attr, 84 char *buf) 85 { 86 struct mhi_device *mhi_dev = to_mhi_device(dev); 87 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 88 89 return snprintf(buf, PAGE_SIZE, "Serial Number: %u\n", 90 mhi_cntrl->serial_number); 91 } 92 static DEVICE_ATTR_RO(serial_number); 93 94 static ssize_t oem_pk_hash_show(struct device *dev, 95 struct device_attribute *attr, 96 char *buf) 97 { 98 struct mhi_device *mhi_dev = to_mhi_device(dev); 99 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 100 int i, cnt = 0; 101 102 for (i = 0; i < ARRAY_SIZE(mhi_cntrl->oem_pk_hash); i++) 103 cnt += snprintf(buf + cnt, PAGE_SIZE - cnt, 104 "OEMPKHASH[%d]: 0x%x\n", i, 105 mhi_cntrl->oem_pk_hash[i]); 106 107 return cnt; 108 } 109 static DEVICE_ATTR_RO(oem_pk_hash); 110 111 static struct attribute *mhi_dev_attrs[] = { 112 &dev_attr_serial_number.attr, 113 &dev_attr_oem_pk_hash.attr, 114 NULL, 115 }; 116 ATTRIBUTE_GROUPS(mhi_dev); 117 118 /* MHI protocol requires the transfer ring to be aligned with ring length */ 119 static int mhi_alloc_aligned_ring(struct mhi_controller *mhi_cntrl, 120 struct mhi_ring *ring, 121 u64 len) 122 { 123 ring->alloc_size = len + (len - 1); 124 ring->pre_aligned = dma_alloc_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 125 &ring->dma_handle, GFP_KERNEL); 126 if (!ring->pre_aligned) 127 return -ENOMEM; 128 129 ring->iommu_base = (ring->dma_handle + (len - 1)) & ~(len - 1); 130 ring->base = ring->pre_aligned + (ring->iommu_base - ring->dma_handle); 131 132 return 0; 133 } 134 135 void mhi_deinit_free_irq(struct mhi_controller *mhi_cntrl) 136 { 137 int i; 138 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 139 140 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 141 if (mhi_event->offload_ev) 142 continue; 143 144 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); 145 } 146 147 free_irq(mhi_cntrl->irq[0], mhi_cntrl); 148 } 149 150 int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl) 151 { 152 struct mhi_event *mhi_event = mhi_cntrl->mhi_event; 153 struct device *dev = &mhi_cntrl->mhi_dev->dev; 154 unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND; 155 int i, ret; 156 157 /* if controller driver has set irq_flags, use it */ 158 if (mhi_cntrl->irq_flags) 159 irq_flags = mhi_cntrl->irq_flags; 160 161 /* Setup BHI_INTVEC IRQ */ 162 ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler, 163 mhi_intvec_threaded_handler, 164 irq_flags, 165 "bhi", mhi_cntrl); 166 if (ret) 167 return ret; 168 169 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 170 if (mhi_event->offload_ev) 171 continue; 172 173 if (mhi_event->irq >= mhi_cntrl->nr_irqs) { 174 dev_err(dev, "irq %d not available for event ring\n", 175 mhi_event->irq); 176 ret = -EINVAL; 177 goto error_request; 178 } 179 180 ret = request_irq(mhi_cntrl->irq[mhi_event->irq], 181 mhi_irq_handler, 182 irq_flags, 183 "mhi", mhi_event); 184 if (ret) { 185 dev_err(dev, "Error requesting irq:%d for ev:%d\n", 186 mhi_cntrl->irq[mhi_event->irq], i); 187 goto error_request; 188 } 189 } 190 191 return 0; 192 193 error_request: 194 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { 195 if (mhi_event->offload_ev) 196 continue; 197 198 free_irq(mhi_cntrl->irq[mhi_event->irq], mhi_event); 199 } 200 free_irq(mhi_cntrl->irq[0], mhi_cntrl); 201 202 return ret; 203 } 204 205 void mhi_deinit_dev_ctxt(struct mhi_controller *mhi_cntrl) 206 { 207 int i; 208 struct mhi_ctxt *mhi_ctxt = mhi_cntrl->mhi_ctxt; 209 struct mhi_cmd *mhi_cmd; 210 struct mhi_event *mhi_event; 211 struct mhi_ring *ring; 212 213 mhi_cmd = mhi_cntrl->mhi_cmd; 214 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) { 215 ring = &mhi_cmd->ring; 216 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 217 ring->pre_aligned, ring->dma_handle); 218 ring->base = NULL; 219 ring->iommu_base = 0; 220 } 221 222 dma_free_coherent(mhi_cntrl->cntrl_dev, 223 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, 224 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); 225 226 mhi_event = mhi_cntrl->mhi_event; 227 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 228 if (mhi_event->offload_ev) 229 continue; 230 231 ring = &mhi_event->ring; 232 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 233 ring->pre_aligned, ring->dma_handle); 234 ring->base = NULL; 235 ring->iommu_base = 0; 236 } 237 238 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * 239 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, 240 mhi_ctxt->er_ctxt_addr); 241 242 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * 243 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, 244 mhi_ctxt->chan_ctxt_addr); 245 246 kfree(mhi_ctxt); 247 mhi_cntrl->mhi_ctxt = NULL; 248 } 249 250 int mhi_init_dev_ctxt(struct mhi_controller *mhi_cntrl) 251 { 252 struct mhi_ctxt *mhi_ctxt; 253 struct mhi_chan_ctxt *chan_ctxt; 254 struct mhi_event_ctxt *er_ctxt; 255 struct mhi_cmd_ctxt *cmd_ctxt; 256 struct mhi_chan *mhi_chan; 257 struct mhi_event *mhi_event; 258 struct mhi_cmd *mhi_cmd; 259 u32 tmp; 260 int ret = -ENOMEM, i; 261 262 atomic_set(&mhi_cntrl->dev_wake, 0); 263 atomic_set(&mhi_cntrl->pending_pkts, 0); 264 265 mhi_ctxt = kzalloc(sizeof(*mhi_ctxt), GFP_KERNEL); 266 if (!mhi_ctxt) 267 return -ENOMEM; 268 269 /* Setup channel ctxt */ 270 mhi_ctxt->chan_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 271 sizeof(*mhi_ctxt->chan_ctxt) * 272 mhi_cntrl->max_chan, 273 &mhi_ctxt->chan_ctxt_addr, 274 GFP_KERNEL); 275 if (!mhi_ctxt->chan_ctxt) 276 goto error_alloc_chan_ctxt; 277 278 mhi_chan = mhi_cntrl->mhi_chan; 279 chan_ctxt = mhi_ctxt->chan_ctxt; 280 for (i = 0; i < mhi_cntrl->max_chan; i++, chan_ctxt++, mhi_chan++) { 281 /* Skip if it is an offload channel */ 282 if (mhi_chan->offload_ch) 283 continue; 284 285 tmp = le32_to_cpu(chan_ctxt->chcfg); 286 tmp &= ~CHAN_CTX_CHSTATE_MASK; 287 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 288 tmp &= ~CHAN_CTX_BRSTMODE_MASK; 289 tmp |= FIELD_PREP(CHAN_CTX_BRSTMODE_MASK, mhi_chan->db_cfg.brstmode); 290 tmp &= ~CHAN_CTX_POLLCFG_MASK; 291 tmp |= FIELD_PREP(CHAN_CTX_POLLCFG_MASK, mhi_chan->db_cfg.pollcfg); 292 chan_ctxt->chcfg = cpu_to_le32(tmp); 293 294 chan_ctxt->chtype = cpu_to_le32(mhi_chan->type); 295 chan_ctxt->erindex = cpu_to_le32(mhi_chan->er_index); 296 297 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 298 mhi_chan->tre_ring.db_addr = (void __iomem *)&chan_ctxt->wp; 299 } 300 301 /* Setup event context */ 302 mhi_ctxt->er_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 303 sizeof(*mhi_ctxt->er_ctxt) * 304 mhi_cntrl->total_ev_rings, 305 &mhi_ctxt->er_ctxt_addr, 306 GFP_KERNEL); 307 if (!mhi_ctxt->er_ctxt) 308 goto error_alloc_er_ctxt; 309 310 er_ctxt = mhi_ctxt->er_ctxt; 311 mhi_event = mhi_cntrl->mhi_event; 312 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, er_ctxt++, 313 mhi_event++) { 314 struct mhi_ring *ring = &mhi_event->ring; 315 316 /* Skip if it is an offload event */ 317 if (mhi_event->offload_ev) 318 continue; 319 320 tmp = le32_to_cpu(er_ctxt->intmod); 321 tmp &= ~EV_CTX_INTMODC_MASK; 322 tmp &= ~EV_CTX_INTMODT_MASK; 323 tmp |= FIELD_PREP(EV_CTX_INTMODT_MASK, mhi_event->intmod); 324 er_ctxt->intmod = cpu_to_le32(tmp); 325 326 er_ctxt->ertype = cpu_to_le32(MHI_ER_TYPE_VALID); 327 er_ctxt->msivec = cpu_to_le32(mhi_event->irq); 328 mhi_event->db_cfg.db_mode = true; 329 330 ring->el_size = sizeof(struct mhi_ring_element); 331 ring->len = ring->el_size * ring->elements; 332 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); 333 if (ret) 334 goto error_alloc_er; 335 336 /* 337 * If the read pointer equals to the write pointer, then the 338 * ring is empty 339 */ 340 ring->rp = ring->wp = ring->base; 341 er_ctxt->rbase = cpu_to_le64(ring->iommu_base); 342 er_ctxt->rp = er_ctxt->wp = er_ctxt->rbase; 343 er_ctxt->rlen = cpu_to_le64(ring->len); 344 ring->ctxt_wp = &er_ctxt->wp; 345 } 346 347 /* Setup cmd context */ 348 ret = -ENOMEM; 349 mhi_ctxt->cmd_ctxt = dma_alloc_coherent(mhi_cntrl->cntrl_dev, 350 sizeof(*mhi_ctxt->cmd_ctxt) * 351 NR_OF_CMD_RINGS, 352 &mhi_ctxt->cmd_ctxt_addr, 353 GFP_KERNEL); 354 if (!mhi_ctxt->cmd_ctxt) 355 goto error_alloc_er; 356 357 mhi_cmd = mhi_cntrl->mhi_cmd; 358 cmd_ctxt = mhi_ctxt->cmd_ctxt; 359 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++, cmd_ctxt++) { 360 struct mhi_ring *ring = &mhi_cmd->ring; 361 362 ring->el_size = sizeof(struct mhi_ring_element); 363 ring->elements = CMD_EL_PER_RING; 364 ring->len = ring->el_size * ring->elements; 365 ret = mhi_alloc_aligned_ring(mhi_cntrl, ring, ring->len); 366 if (ret) 367 goto error_alloc_cmd; 368 369 ring->rp = ring->wp = ring->base; 370 cmd_ctxt->rbase = cpu_to_le64(ring->iommu_base); 371 cmd_ctxt->rp = cmd_ctxt->wp = cmd_ctxt->rbase; 372 cmd_ctxt->rlen = cpu_to_le64(ring->len); 373 ring->ctxt_wp = &cmd_ctxt->wp; 374 } 375 376 mhi_cntrl->mhi_ctxt = mhi_ctxt; 377 378 return 0; 379 380 error_alloc_cmd: 381 for (--i, --mhi_cmd; i >= 0; i--, mhi_cmd--) { 382 struct mhi_ring *ring = &mhi_cmd->ring; 383 384 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 385 ring->pre_aligned, ring->dma_handle); 386 } 387 dma_free_coherent(mhi_cntrl->cntrl_dev, 388 sizeof(*mhi_ctxt->cmd_ctxt) * NR_OF_CMD_RINGS, 389 mhi_ctxt->cmd_ctxt, mhi_ctxt->cmd_ctxt_addr); 390 i = mhi_cntrl->total_ev_rings; 391 mhi_event = mhi_cntrl->mhi_event + i; 392 393 error_alloc_er: 394 for (--i, --mhi_event; i >= 0; i--, mhi_event--) { 395 struct mhi_ring *ring = &mhi_event->ring; 396 397 if (mhi_event->offload_ev) 398 continue; 399 400 dma_free_coherent(mhi_cntrl->cntrl_dev, ring->alloc_size, 401 ring->pre_aligned, ring->dma_handle); 402 } 403 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->er_ctxt) * 404 mhi_cntrl->total_ev_rings, mhi_ctxt->er_ctxt, 405 mhi_ctxt->er_ctxt_addr); 406 407 error_alloc_er_ctxt: 408 dma_free_coherent(mhi_cntrl->cntrl_dev, sizeof(*mhi_ctxt->chan_ctxt) * 409 mhi_cntrl->max_chan, mhi_ctxt->chan_ctxt, 410 mhi_ctxt->chan_ctxt_addr); 411 412 error_alloc_chan_ctxt: 413 kfree(mhi_ctxt); 414 415 return ret; 416 } 417 418 int mhi_init_mmio(struct mhi_controller *mhi_cntrl) 419 { 420 u32 val; 421 int i, ret; 422 struct mhi_chan *mhi_chan; 423 struct mhi_event *mhi_event; 424 void __iomem *base = mhi_cntrl->regs; 425 struct device *dev = &mhi_cntrl->mhi_dev->dev; 426 struct { 427 u32 offset; 428 u32 mask; 429 u32 val; 430 } reg_info[] = { 431 { 432 CCABAP_HIGHER, U32_MAX, 433 upper_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), 434 }, 435 { 436 CCABAP_LOWER, U32_MAX, 437 lower_32_bits(mhi_cntrl->mhi_ctxt->chan_ctxt_addr), 438 }, 439 { 440 ECABAP_HIGHER, U32_MAX, 441 upper_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), 442 }, 443 { 444 ECABAP_LOWER, U32_MAX, 445 lower_32_bits(mhi_cntrl->mhi_ctxt->er_ctxt_addr), 446 }, 447 { 448 CRCBAP_HIGHER, U32_MAX, 449 upper_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), 450 }, 451 { 452 CRCBAP_LOWER, U32_MAX, 453 lower_32_bits(mhi_cntrl->mhi_ctxt->cmd_ctxt_addr), 454 }, 455 { 456 MHICFG, MHICFG_NER_MASK, 457 mhi_cntrl->total_ev_rings, 458 }, 459 { 460 MHICFG, MHICFG_NHWER_MASK, 461 mhi_cntrl->hw_ev_rings, 462 }, 463 { 464 MHICTRLBASE_HIGHER, U32_MAX, 465 upper_32_bits(mhi_cntrl->iova_start), 466 }, 467 { 468 MHICTRLBASE_LOWER, U32_MAX, 469 lower_32_bits(mhi_cntrl->iova_start), 470 }, 471 { 472 MHIDATABASE_HIGHER, U32_MAX, 473 upper_32_bits(mhi_cntrl->iova_start), 474 }, 475 { 476 MHIDATABASE_LOWER, U32_MAX, 477 lower_32_bits(mhi_cntrl->iova_start), 478 }, 479 { 480 MHICTRLLIMIT_HIGHER, U32_MAX, 481 upper_32_bits(mhi_cntrl->iova_stop), 482 }, 483 { 484 MHICTRLLIMIT_LOWER, U32_MAX, 485 lower_32_bits(mhi_cntrl->iova_stop), 486 }, 487 { 488 MHIDATALIMIT_HIGHER, U32_MAX, 489 upper_32_bits(mhi_cntrl->iova_stop), 490 }, 491 { 492 MHIDATALIMIT_LOWER, U32_MAX, 493 lower_32_bits(mhi_cntrl->iova_stop), 494 }, 495 { 0, 0, 0 } 496 }; 497 498 dev_dbg(dev, "Initializing MHI registers\n"); 499 500 /* Read channel db offset */ 501 ret = mhi_read_reg(mhi_cntrl, base, CHDBOFF, &val); 502 if (ret) { 503 dev_err(dev, "Unable to read CHDBOFF register\n"); 504 return -EIO; 505 } 506 507 /* Setup wake db */ 508 mhi_cntrl->wake_db = base + val + (8 * MHI_DEV_WAKE_DB); 509 mhi_cntrl->wake_set = false; 510 511 /* Setup channel db address for each channel in tre_ring */ 512 mhi_chan = mhi_cntrl->mhi_chan; 513 for (i = 0; i < mhi_cntrl->max_chan; i++, val += 8, mhi_chan++) 514 mhi_chan->tre_ring.db_addr = base + val; 515 516 /* Read event ring db offset */ 517 ret = mhi_read_reg(mhi_cntrl, base, ERDBOFF, &val); 518 if (ret) { 519 dev_err(dev, "Unable to read ERDBOFF register\n"); 520 return -EIO; 521 } 522 523 /* Setup event db address for each ev_ring */ 524 mhi_event = mhi_cntrl->mhi_event; 525 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, val += 8, mhi_event++) { 526 if (mhi_event->offload_ev) 527 continue; 528 529 mhi_event->ring.db_addr = base + val; 530 } 531 532 /* Setup DB register for primary CMD rings */ 533 mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING].ring.db_addr = base + CRDB_LOWER; 534 535 /* Write to MMIO registers */ 536 for (i = 0; reg_info[i].offset; i++) 537 mhi_write_reg_field(mhi_cntrl, base, reg_info[i].offset, 538 reg_info[i].mask, reg_info[i].val); 539 540 return 0; 541 } 542 543 void mhi_deinit_chan_ctxt(struct mhi_controller *mhi_cntrl, 544 struct mhi_chan *mhi_chan) 545 { 546 struct mhi_ring *buf_ring; 547 struct mhi_ring *tre_ring; 548 struct mhi_chan_ctxt *chan_ctxt; 549 u32 tmp; 550 551 buf_ring = &mhi_chan->buf_ring; 552 tre_ring = &mhi_chan->tre_ring; 553 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; 554 555 if (!chan_ctxt->rbase) /* Already uninitialized */ 556 return; 557 558 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, 559 tre_ring->pre_aligned, tre_ring->dma_handle); 560 vfree(buf_ring->base); 561 562 buf_ring->base = tre_ring->base = NULL; 563 tre_ring->ctxt_wp = NULL; 564 chan_ctxt->rbase = 0; 565 chan_ctxt->rlen = 0; 566 chan_ctxt->rp = 0; 567 chan_ctxt->wp = 0; 568 569 tmp = le32_to_cpu(chan_ctxt->chcfg); 570 tmp &= ~CHAN_CTX_CHSTATE_MASK; 571 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_DISABLED); 572 chan_ctxt->chcfg = cpu_to_le32(tmp); 573 574 /* Update to all cores */ 575 smp_wmb(); 576 } 577 578 int mhi_init_chan_ctxt(struct mhi_controller *mhi_cntrl, 579 struct mhi_chan *mhi_chan) 580 { 581 struct mhi_ring *buf_ring; 582 struct mhi_ring *tre_ring; 583 struct mhi_chan_ctxt *chan_ctxt; 584 u32 tmp; 585 int ret; 586 587 buf_ring = &mhi_chan->buf_ring; 588 tre_ring = &mhi_chan->tre_ring; 589 tre_ring->el_size = sizeof(struct mhi_ring_element); 590 tre_ring->len = tre_ring->el_size * tre_ring->elements; 591 chan_ctxt = &mhi_cntrl->mhi_ctxt->chan_ctxt[mhi_chan->chan]; 592 ret = mhi_alloc_aligned_ring(mhi_cntrl, tre_ring, tre_ring->len); 593 if (ret) 594 return -ENOMEM; 595 596 buf_ring->el_size = sizeof(struct mhi_buf_info); 597 buf_ring->len = buf_ring->el_size * buf_ring->elements; 598 buf_ring->base = vzalloc(buf_ring->len); 599 600 if (!buf_ring->base) { 601 dma_free_coherent(mhi_cntrl->cntrl_dev, tre_ring->alloc_size, 602 tre_ring->pre_aligned, tre_ring->dma_handle); 603 return -ENOMEM; 604 } 605 606 tmp = le32_to_cpu(chan_ctxt->chcfg); 607 tmp &= ~CHAN_CTX_CHSTATE_MASK; 608 tmp |= FIELD_PREP(CHAN_CTX_CHSTATE_MASK, MHI_CH_STATE_ENABLED); 609 chan_ctxt->chcfg = cpu_to_le32(tmp); 610 611 chan_ctxt->rbase = cpu_to_le64(tre_ring->iommu_base); 612 chan_ctxt->rp = chan_ctxt->wp = chan_ctxt->rbase; 613 chan_ctxt->rlen = cpu_to_le64(tre_ring->len); 614 tre_ring->ctxt_wp = &chan_ctxt->wp; 615 616 tre_ring->rp = tre_ring->wp = tre_ring->base; 617 buf_ring->rp = buf_ring->wp = buf_ring->base; 618 mhi_chan->db_cfg.db_mode = 1; 619 620 /* Update to all cores */ 621 smp_wmb(); 622 623 return 0; 624 } 625 626 static int parse_ev_cfg(struct mhi_controller *mhi_cntrl, 627 const struct mhi_controller_config *config) 628 { 629 struct mhi_event *mhi_event; 630 const struct mhi_event_config *event_cfg; 631 struct device *dev = mhi_cntrl->cntrl_dev; 632 int i, num; 633 634 num = config->num_events; 635 mhi_cntrl->total_ev_rings = num; 636 mhi_cntrl->mhi_event = kcalloc(num, sizeof(*mhi_cntrl->mhi_event), 637 GFP_KERNEL); 638 if (!mhi_cntrl->mhi_event) 639 return -ENOMEM; 640 641 /* Populate event ring */ 642 mhi_event = mhi_cntrl->mhi_event; 643 for (i = 0; i < num; i++) { 644 event_cfg = &config->event_cfg[i]; 645 646 mhi_event->er_index = i; 647 mhi_event->ring.elements = event_cfg->num_elements; 648 mhi_event->intmod = event_cfg->irq_moderation_ms; 649 mhi_event->irq = event_cfg->irq; 650 651 if (event_cfg->channel != U32_MAX) { 652 /* This event ring has a dedicated channel */ 653 mhi_event->chan = event_cfg->channel; 654 if (mhi_event->chan >= mhi_cntrl->max_chan) { 655 dev_err(dev, 656 "Event Ring channel not available\n"); 657 goto error_ev_cfg; 658 } 659 660 mhi_event->mhi_chan = 661 &mhi_cntrl->mhi_chan[mhi_event->chan]; 662 } 663 664 /* Priority is fixed to 1 for now */ 665 mhi_event->priority = 1; 666 667 mhi_event->db_cfg.brstmode = event_cfg->mode; 668 if (MHI_INVALID_BRSTMODE(mhi_event->db_cfg.brstmode)) 669 goto error_ev_cfg; 670 671 if (mhi_event->db_cfg.brstmode == MHI_DB_BRST_ENABLE) 672 mhi_event->db_cfg.process_db = mhi_db_brstmode; 673 else 674 mhi_event->db_cfg.process_db = mhi_db_brstmode_disable; 675 676 mhi_event->data_type = event_cfg->data_type; 677 678 switch (mhi_event->data_type) { 679 case MHI_ER_DATA: 680 mhi_event->process_event = mhi_process_data_event_ring; 681 break; 682 case MHI_ER_CTRL: 683 mhi_event->process_event = mhi_process_ctrl_ev_ring; 684 break; 685 default: 686 dev_err(dev, "Event Ring type not supported\n"); 687 goto error_ev_cfg; 688 } 689 690 mhi_event->hw_ring = event_cfg->hardware_event; 691 if (mhi_event->hw_ring) 692 mhi_cntrl->hw_ev_rings++; 693 else 694 mhi_cntrl->sw_ev_rings++; 695 696 mhi_event->cl_manage = event_cfg->client_managed; 697 mhi_event->offload_ev = event_cfg->offload_channel; 698 mhi_event++; 699 } 700 701 return 0; 702 703 error_ev_cfg: 704 705 kfree(mhi_cntrl->mhi_event); 706 return -EINVAL; 707 } 708 709 static int parse_ch_cfg(struct mhi_controller *mhi_cntrl, 710 const struct mhi_controller_config *config) 711 { 712 const struct mhi_channel_config *ch_cfg; 713 struct device *dev = mhi_cntrl->cntrl_dev; 714 int i; 715 u32 chan; 716 717 mhi_cntrl->max_chan = config->max_channels; 718 719 /* 720 * The allocation of MHI channels can exceed 32KB in some scenarios, 721 * so to avoid any memory possible allocation failures, vzalloc is 722 * used here 723 */ 724 mhi_cntrl->mhi_chan = vzalloc(mhi_cntrl->max_chan * 725 sizeof(*mhi_cntrl->mhi_chan)); 726 if (!mhi_cntrl->mhi_chan) 727 return -ENOMEM; 728 729 INIT_LIST_HEAD(&mhi_cntrl->lpm_chans); 730 731 /* Populate channel configurations */ 732 for (i = 0; i < config->num_channels; i++) { 733 struct mhi_chan *mhi_chan; 734 735 ch_cfg = &config->ch_cfg[i]; 736 737 chan = ch_cfg->num; 738 if (chan >= mhi_cntrl->max_chan) { 739 dev_err(dev, "Channel %d not available\n", chan); 740 goto error_chan_cfg; 741 } 742 743 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 744 mhi_chan->name = ch_cfg->name; 745 mhi_chan->chan = chan; 746 747 mhi_chan->tre_ring.elements = ch_cfg->num_elements; 748 if (!mhi_chan->tre_ring.elements) 749 goto error_chan_cfg; 750 751 /* 752 * For some channels, local ring length should be bigger than 753 * the transfer ring length due to internal logical channels 754 * in device. So host can queue much more buffers than transfer 755 * ring length. Example, RSC channels should have a larger local 756 * channel length than transfer ring length. 757 */ 758 mhi_chan->buf_ring.elements = ch_cfg->local_elements; 759 if (!mhi_chan->buf_ring.elements) 760 mhi_chan->buf_ring.elements = mhi_chan->tre_ring.elements; 761 mhi_chan->er_index = ch_cfg->event_ring; 762 mhi_chan->dir = ch_cfg->dir; 763 764 /* 765 * For most channels, chtype is identical to channel directions. 766 * So, if it is not defined then assign channel direction to 767 * chtype 768 */ 769 mhi_chan->type = ch_cfg->type; 770 if (!mhi_chan->type) 771 mhi_chan->type = (enum mhi_ch_type)mhi_chan->dir; 772 773 mhi_chan->ee_mask = ch_cfg->ee_mask; 774 mhi_chan->db_cfg.pollcfg = ch_cfg->pollcfg; 775 mhi_chan->lpm_notify = ch_cfg->lpm_notify; 776 mhi_chan->offload_ch = ch_cfg->offload_channel; 777 mhi_chan->db_cfg.reset_req = ch_cfg->doorbell_mode_switch; 778 mhi_chan->pre_alloc = ch_cfg->auto_queue; 779 mhi_chan->wake_capable = ch_cfg->wake_capable; 780 781 /* 782 * If MHI host allocates buffers, then the channel direction 783 * should be DMA_FROM_DEVICE 784 */ 785 if (mhi_chan->pre_alloc && mhi_chan->dir != DMA_FROM_DEVICE) { 786 dev_err(dev, "Invalid channel configuration\n"); 787 goto error_chan_cfg; 788 } 789 790 /* 791 * Bi-directional and direction less channel must be an 792 * offload channel 793 */ 794 if ((mhi_chan->dir == DMA_BIDIRECTIONAL || 795 mhi_chan->dir == DMA_NONE) && !mhi_chan->offload_ch) { 796 dev_err(dev, "Invalid channel configuration\n"); 797 goto error_chan_cfg; 798 } 799 800 if (!mhi_chan->offload_ch) { 801 mhi_chan->db_cfg.brstmode = ch_cfg->doorbell; 802 if (MHI_INVALID_BRSTMODE(mhi_chan->db_cfg.brstmode)) { 803 dev_err(dev, "Invalid Door bell mode\n"); 804 goto error_chan_cfg; 805 } 806 } 807 808 if (mhi_chan->db_cfg.brstmode == MHI_DB_BRST_ENABLE) 809 mhi_chan->db_cfg.process_db = mhi_db_brstmode; 810 else 811 mhi_chan->db_cfg.process_db = mhi_db_brstmode_disable; 812 813 mhi_chan->configured = true; 814 815 if (mhi_chan->lpm_notify) 816 list_add_tail(&mhi_chan->node, &mhi_cntrl->lpm_chans); 817 } 818 819 return 0; 820 821 error_chan_cfg: 822 vfree(mhi_cntrl->mhi_chan); 823 824 return -EINVAL; 825 } 826 827 static int parse_config(struct mhi_controller *mhi_cntrl, 828 const struct mhi_controller_config *config) 829 { 830 int ret; 831 832 /* Parse MHI channel configuration */ 833 ret = parse_ch_cfg(mhi_cntrl, config); 834 if (ret) 835 return ret; 836 837 /* Parse MHI event configuration */ 838 ret = parse_ev_cfg(mhi_cntrl, config); 839 if (ret) 840 goto error_ev_cfg; 841 842 mhi_cntrl->timeout_ms = config->timeout_ms; 843 if (!mhi_cntrl->timeout_ms) 844 mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; 845 846 mhi_cntrl->bounce_buf = config->use_bounce_buf; 847 mhi_cntrl->buffer_len = config->buf_len; 848 if (!mhi_cntrl->buffer_len) 849 mhi_cntrl->buffer_len = MHI_MAX_MTU; 850 851 /* By default, host is allowed to ring DB in both M0 and M2 states */ 852 mhi_cntrl->db_access = MHI_PM_M0 | MHI_PM_M2; 853 if (config->m2_no_db) 854 mhi_cntrl->db_access &= ~MHI_PM_M2; 855 856 return 0; 857 858 error_ev_cfg: 859 vfree(mhi_cntrl->mhi_chan); 860 861 return ret; 862 } 863 864 int mhi_register_controller(struct mhi_controller *mhi_cntrl, 865 const struct mhi_controller_config *config) 866 { 867 struct mhi_event *mhi_event; 868 struct mhi_chan *mhi_chan; 869 struct mhi_cmd *mhi_cmd; 870 struct mhi_device *mhi_dev; 871 u32 soc_info; 872 int ret, i; 873 874 if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->regs || 875 !mhi_cntrl->runtime_get || !mhi_cntrl->runtime_put || 876 !mhi_cntrl->status_cb || !mhi_cntrl->read_reg || 877 !mhi_cntrl->write_reg || !mhi_cntrl->nr_irqs || 878 !mhi_cntrl->irq || !mhi_cntrl->reg_len) 879 return -EINVAL; 880 881 ret = parse_config(mhi_cntrl, config); 882 if (ret) 883 return -EINVAL; 884 885 mhi_cntrl->mhi_cmd = kcalloc(NR_OF_CMD_RINGS, 886 sizeof(*mhi_cntrl->mhi_cmd), GFP_KERNEL); 887 if (!mhi_cntrl->mhi_cmd) { 888 ret = -ENOMEM; 889 goto err_free_event; 890 } 891 892 INIT_LIST_HEAD(&mhi_cntrl->transition_list); 893 mutex_init(&mhi_cntrl->pm_mutex); 894 rwlock_init(&mhi_cntrl->pm_lock); 895 spin_lock_init(&mhi_cntrl->transition_lock); 896 spin_lock_init(&mhi_cntrl->wlock); 897 INIT_WORK(&mhi_cntrl->st_worker, mhi_pm_st_worker); 898 init_waitqueue_head(&mhi_cntrl->state_event); 899 900 mhi_cntrl->hiprio_wq = alloc_ordered_workqueue("mhi_hiprio_wq", WQ_HIGHPRI); 901 if (!mhi_cntrl->hiprio_wq) { 902 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate workqueue\n"); 903 ret = -ENOMEM; 904 goto err_free_cmd; 905 } 906 907 mhi_cmd = mhi_cntrl->mhi_cmd; 908 for (i = 0; i < NR_OF_CMD_RINGS; i++, mhi_cmd++) 909 spin_lock_init(&mhi_cmd->lock); 910 911 mhi_event = mhi_cntrl->mhi_event; 912 for (i = 0; i < mhi_cntrl->total_ev_rings; i++, mhi_event++) { 913 /* Skip for offload events */ 914 if (mhi_event->offload_ev) 915 continue; 916 917 mhi_event->mhi_cntrl = mhi_cntrl; 918 spin_lock_init(&mhi_event->lock); 919 if (mhi_event->data_type == MHI_ER_CTRL) 920 tasklet_init(&mhi_event->task, mhi_ctrl_ev_task, 921 (ulong)mhi_event); 922 else 923 tasklet_init(&mhi_event->task, mhi_ev_task, 924 (ulong)mhi_event); 925 } 926 927 mhi_chan = mhi_cntrl->mhi_chan; 928 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 929 mutex_init(&mhi_chan->mutex); 930 init_completion(&mhi_chan->completion); 931 rwlock_init(&mhi_chan->lock); 932 933 /* used in setting bei field of TRE */ 934 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; 935 mhi_chan->intmod = mhi_event->intmod; 936 } 937 938 if (mhi_cntrl->bounce_buf) { 939 mhi_cntrl->map_single = mhi_map_single_use_bb; 940 mhi_cntrl->unmap_single = mhi_unmap_single_use_bb; 941 } else { 942 mhi_cntrl->map_single = mhi_map_single_no_bb; 943 mhi_cntrl->unmap_single = mhi_unmap_single_no_bb; 944 } 945 946 /* Read the MHI device info */ 947 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, 948 SOC_HW_VERSION_OFFS, &soc_info); 949 if (ret) 950 goto err_destroy_wq; 951 952 mhi_cntrl->family_number = FIELD_GET(SOC_HW_VERSION_FAM_NUM_BMSK, soc_info); 953 mhi_cntrl->device_number = FIELD_GET(SOC_HW_VERSION_DEV_NUM_BMSK, soc_info); 954 mhi_cntrl->major_version = FIELD_GET(SOC_HW_VERSION_MAJOR_VER_BMSK, soc_info); 955 mhi_cntrl->minor_version = FIELD_GET(SOC_HW_VERSION_MINOR_VER_BMSK, soc_info); 956 957 mhi_cntrl->index = ida_alloc(&mhi_controller_ida, GFP_KERNEL); 958 if (mhi_cntrl->index < 0) { 959 ret = mhi_cntrl->index; 960 goto err_destroy_wq; 961 } 962 963 /* Register controller with MHI bus */ 964 mhi_dev = mhi_alloc_device(mhi_cntrl); 965 if (IS_ERR(mhi_dev)) { 966 dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate MHI device\n"); 967 ret = PTR_ERR(mhi_dev); 968 goto err_ida_free; 969 } 970 971 mhi_dev->dev_type = MHI_DEVICE_CONTROLLER; 972 mhi_dev->mhi_cntrl = mhi_cntrl; 973 dev_set_name(&mhi_dev->dev, "mhi%d", mhi_cntrl->index); 974 mhi_dev->name = dev_name(&mhi_dev->dev); 975 976 /* Init wakeup source */ 977 device_init_wakeup(&mhi_dev->dev, true); 978 979 ret = device_add(&mhi_dev->dev); 980 if (ret) 981 goto err_release_dev; 982 983 mhi_cntrl->mhi_dev = mhi_dev; 984 985 mhi_create_debugfs(mhi_cntrl); 986 987 return 0; 988 989 err_release_dev: 990 put_device(&mhi_dev->dev); 991 err_ida_free: 992 ida_free(&mhi_controller_ida, mhi_cntrl->index); 993 err_destroy_wq: 994 destroy_workqueue(mhi_cntrl->hiprio_wq); 995 err_free_cmd: 996 kfree(mhi_cntrl->mhi_cmd); 997 err_free_event: 998 kfree(mhi_cntrl->mhi_event); 999 vfree(mhi_cntrl->mhi_chan); 1000 1001 return ret; 1002 } 1003 EXPORT_SYMBOL_GPL(mhi_register_controller); 1004 1005 void mhi_unregister_controller(struct mhi_controller *mhi_cntrl) 1006 { 1007 struct mhi_device *mhi_dev = mhi_cntrl->mhi_dev; 1008 struct mhi_chan *mhi_chan = mhi_cntrl->mhi_chan; 1009 unsigned int i; 1010 1011 mhi_destroy_debugfs(mhi_cntrl); 1012 1013 destroy_workqueue(mhi_cntrl->hiprio_wq); 1014 kfree(mhi_cntrl->mhi_cmd); 1015 kfree(mhi_cntrl->mhi_event); 1016 1017 /* Drop the references to MHI devices created for channels */ 1018 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 1019 if (!mhi_chan->mhi_dev) 1020 continue; 1021 1022 put_device(&mhi_chan->mhi_dev->dev); 1023 } 1024 vfree(mhi_cntrl->mhi_chan); 1025 1026 device_del(&mhi_dev->dev); 1027 put_device(&mhi_dev->dev); 1028 1029 ida_free(&mhi_controller_ida, mhi_cntrl->index); 1030 } 1031 EXPORT_SYMBOL_GPL(mhi_unregister_controller); 1032 1033 struct mhi_controller *mhi_alloc_controller(void) 1034 { 1035 struct mhi_controller *mhi_cntrl; 1036 1037 mhi_cntrl = kzalloc(sizeof(*mhi_cntrl), GFP_KERNEL); 1038 1039 return mhi_cntrl; 1040 } 1041 EXPORT_SYMBOL_GPL(mhi_alloc_controller); 1042 1043 void mhi_free_controller(struct mhi_controller *mhi_cntrl) 1044 { 1045 kfree(mhi_cntrl); 1046 } 1047 EXPORT_SYMBOL_GPL(mhi_free_controller); 1048 1049 int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl) 1050 { 1051 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1052 u32 bhi_off, bhie_off; 1053 int ret; 1054 1055 mutex_lock(&mhi_cntrl->pm_mutex); 1056 1057 ret = mhi_init_dev_ctxt(mhi_cntrl); 1058 if (ret) 1059 goto error_dev_ctxt; 1060 1061 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIOFF, &bhi_off); 1062 if (ret) { 1063 dev_err(dev, "Error getting BHI offset\n"); 1064 goto error_reg_offset; 1065 } 1066 1067 if (bhi_off >= mhi_cntrl->reg_len) { 1068 dev_err(dev, "BHI offset: 0x%x is out of range: 0x%zx\n", 1069 bhi_off, mhi_cntrl->reg_len); 1070 ret = -EINVAL; 1071 goto error_reg_offset; 1072 } 1073 mhi_cntrl->bhi = mhi_cntrl->regs + bhi_off; 1074 1075 if (mhi_cntrl->fbc_download || mhi_cntrl->rddm_size) { 1076 ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->regs, BHIEOFF, 1077 &bhie_off); 1078 if (ret) { 1079 dev_err(dev, "Error getting BHIE offset\n"); 1080 goto error_reg_offset; 1081 } 1082 1083 if (bhie_off >= mhi_cntrl->reg_len) { 1084 dev_err(dev, 1085 "BHIe offset: 0x%x is out of range: 0x%zx\n", 1086 bhie_off, mhi_cntrl->reg_len); 1087 ret = -EINVAL; 1088 goto error_reg_offset; 1089 } 1090 mhi_cntrl->bhie = mhi_cntrl->regs + bhie_off; 1091 } 1092 1093 if (mhi_cntrl->rddm_size) { 1094 /* 1095 * This controller supports RDDM, so we need to manually clear 1096 * BHIE RX registers since POR values are undefined. 1097 */ 1098 memset_io(mhi_cntrl->bhie + BHIE_RXVECADDR_LOW_OFFS, 1099 0, BHIE_RXVECSTATUS_OFFS - BHIE_RXVECADDR_LOW_OFFS + 1100 4); 1101 /* 1102 * Allocate RDDM table for debugging purpose if specified 1103 */ 1104 mhi_alloc_bhie_table(mhi_cntrl, &mhi_cntrl->rddm_image, 1105 mhi_cntrl->rddm_size); 1106 if (mhi_cntrl->rddm_image) 1107 mhi_rddm_prepare(mhi_cntrl, mhi_cntrl->rddm_image); 1108 } 1109 1110 mutex_unlock(&mhi_cntrl->pm_mutex); 1111 1112 return 0; 1113 1114 error_reg_offset: 1115 mhi_deinit_dev_ctxt(mhi_cntrl); 1116 1117 error_dev_ctxt: 1118 mutex_unlock(&mhi_cntrl->pm_mutex); 1119 1120 return ret; 1121 } 1122 EXPORT_SYMBOL_GPL(mhi_prepare_for_power_up); 1123 1124 void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl) 1125 { 1126 if (mhi_cntrl->fbc_image) { 1127 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->fbc_image); 1128 mhi_cntrl->fbc_image = NULL; 1129 } 1130 1131 if (mhi_cntrl->rddm_image) { 1132 mhi_free_bhie_table(mhi_cntrl, mhi_cntrl->rddm_image); 1133 mhi_cntrl->rddm_image = NULL; 1134 } 1135 1136 mhi_cntrl->bhi = NULL; 1137 mhi_cntrl->bhie = NULL; 1138 1139 mhi_deinit_dev_ctxt(mhi_cntrl); 1140 } 1141 EXPORT_SYMBOL_GPL(mhi_unprepare_after_power_down); 1142 1143 static void mhi_release_device(struct device *dev) 1144 { 1145 struct mhi_device *mhi_dev = to_mhi_device(dev); 1146 1147 /* 1148 * We need to set the mhi_chan->mhi_dev to NULL here since the MHI 1149 * devices for the channels will only get created if the mhi_dev 1150 * associated with it is NULL. This scenario will happen during the 1151 * controller suspend and resume. 1152 */ 1153 if (mhi_dev->ul_chan) 1154 mhi_dev->ul_chan->mhi_dev = NULL; 1155 1156 if (mhi_dev->dl_chan) 1157 mhi_dev->dl_chan->mhi_dev = NULL; 1158 1159 kfree(mhi_dev); 1160 } 1161 1162 struct mhi_device *mhi_alloc_device(struct mhi_controller *mhi_cntrl) 1163 { 1164 struct mhi_device *mhi_dev; 1165 struct device *dev; 1166 1167 mhi_dev = kzalloc(sizeof(*mhi_dev), GFP_KERNEL); 1168 if (!mhi_dev) 1169 return ERR_PTR(-ENOMEM); 1170 1171 dev = &mhi_dev->dev; 1172 device_initialize(dev); 1173 dev->bus = &mhi_bus_type; 1174 dev->release = mhi_release_device; 1175 1176 if (mhi_cntrl->mhi_dev) { 1177 /* for MHI client devices, parent is the MHI controller device */ 1178 dev->parent = &mhi_cntrl->mhi_dev->dev; 1179 } else { 1180 /* for MHI controller device, parent is the bus device (e.g. pci device) */ 1181 dev->parent = mhi_cntrl->cntrl_dev; 1182 } 1183 1184 mhi_dev->mhi_cntrl = mhi_cntrl; 1185 mhi_dev->dev_wake = 0; 1186 1187 return mhi_dev; 1188 } 1189 1190 static int mhi_driver_probe(struct device *dev) 1191 { 1192 struct mhi_device *mhi_dev = to_mhi_device(dev); 1193 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1194 struct device_driver *drv = dev->driver; 1195 struct mhi_driver *mhi_drv = to_mhi_driver(drv); 1196 struct mhi_event *mhi_event; 1197 struct mhi_chan *ul_chan = mhi_dev->ul_chan; 1198 struct mhi_chan *dl_chan = mhi_dev->dl_chan; 1199 int ret; 1200 1201 /* Bring device out of LPM */ 1202 ret = mhi_device_get_sync(mhi_dev); 1203 if (ret) 1204 return ret; 1205 1206 ret = -EINVAL; 1207 1208 if (ul_chan) { 1209 /* 1210 * If channel supports LPM notifications then status_cb should 1211 * be provided 1212 */ 1213 if (ul_chan->lpm_notify && !mhi_drv->status_cb) 1214 goto exit_probe; 1215 1216 /* For non-offload channels then xfer_cb should be provided */ 1217 if (!ul_chan->offload_ch && !mhi_drv->ul_xfer_cb) 1218 goto exit_probe; 1219 1220 ul_chan->xfer_cb = mhi_drv->ul_xfer_cb; 1221 } 1222 1223 ret = -EINVAL; 1224 if (dl_chan) { 1225 /* 1226 * If channel supports LPM notifications then status_cb should 1227 * be provided 1228 */ 1229 if (dl_chan->lpm_notify && !mhi_drv->status_cb) 1230 goto exit_probe; 1231 1232 /* For non-offload channels then xfer_cb should be provided */ 1233 if (!dl_chan->offload_ch && !mhi_drv->dl_xfer_cb) 1234 goto exit_probe; 1235 1236 mhi_event = &mhi_cntrl->mhi_event[dl_chan->er_index]; 1237 1238 /* 1239 * If the channel event ring is managed by client, then 1240 * status_cb must be provided so that the framework can 1241 * notify pending data 1242 */ 1243 if (mhi_event->cl_manage && !mhi_drv->status_cb) 1244 goto exit_probe; 1245 1246 dl_chan->xfer_cb = mhi_drv->dl_xfer_cb; 1247 } 1248 1249 /* Call the user provided probe function */ 1250 ret = mhi_drv->probe(mhi_dev, mhi_dev->id); 1251 if (ret) 1252 goto exit_probe; 1253 1254 mhi_device_put(mhi_dev); 1255 1256 return ret; 1257 1258 exit_probe: 1259 mhi_unprepare_from_transfer(mhi_dev); 1260 1261 mhi_device_put(mhi_dev); 1262 1263 return ret; 1264 } 1265 1266 static int mhi_driver_remove(struct device *dev) 1267 { 1268 struct mhi_device *mhi_dev = to_mhi_device(dev); 1269 struct mhi_driver *mhi_drv = to_mhi_driver(dev->driver); 1270 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1271 struct mhi_chan *mhi_chan; 1272 enum mhi_ch_state ch_state[] = { 1273 MHI_CH_STATE_DISABLED, 1274 MHI_CH_STATE_DISABLED 1275 }; 1276 int dir; 1277 1278 /* Skip if it is a controller device */ 1279 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1280 return 0; 1281 1282 /* Reset both channels */ 1283 for (dir = 0; dir < 2; dir++) { 1284 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1285 1286 if (!mhi_chan) 1287 continue; 1288 1289 /* Wake all threads waiting for completion */ 1290 write_lock_irq(&mhi_chan->lock); 1291 mhi_chan->ccs = MHI_EV_CC_INVALID; 1292 complete_all(&mhi_chan->completion); 1293 write_unlock_irq(&mhi_chan->lock); 1294 1295 /* Set the channel state to disabled */ 1296 mutex_lock(&mhi_chan->mutex); 1297 write_lock_irq(&mhi_chan->lock); 1298 ch_state[dir] = mhi_chan->ch_state; 1299 mhi_chan->ch_state = MHI_CH_STATE_SUSPENDED; 1300 write_unlock_irq(&mhi_chan->lock); 1301 1302 /* Reset the non-offload channel */ 1303 if (!mhi_chan->offload_ch) 1304 mhi_reset_chan(mhi_cntrl, mhi_chan); 1305 1306 mutex_unlock(&mhi_chan->mutex); 1307 } 1308 1309 mhi_drv->remove(mhi_dev); 1310 1311 /* De-init channel if it was enabled */ 1312 for (dir = 0; dir < 2; dir++) { 1313 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1314 1315 if (!mhi_chan) 1316 continue; 1317 1318 mutex_lock(&mhi_chan->mutex); 1319 1320 if ((ch_state[dir] == MHI_CH_STATE_ENABLED || 1321 ch_state[dir] == MHI_CH_STATE_STOP) && 1322 !mhi_chan->offload_ch) 1323 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); 1324 1325 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 1326 1327 mutex_unlock(&mhi_chan->mutex); 1328 } 1329 1330 while (mhi_dev->dev_wake) 1331 mhi_device_put(mhi_dev); 1332 1333 return 0; 1334 } 1335 1336 int __mhi_driver_register(struct mhi_driver *mhi_drv, struct module *owner) 1337 { 1338 struct device_driver *driver = &mhi_drv->driver; 1339 1340 if (!mhi_drv->probe || !mhi_drv->remove) 1341 return -EINVAL; 1342 1343 driver->bus = &mhi_bus_type; 1344 driver->owner = owner; 1345 driver->probe = mhi_driver_probe; 1346 driver->remove = mhi_driver_remove; 1347 1348 return driver_register(driver); 1349 } 1350 EXPORT_SYMBOL_GPL(__mhi_driver_register); 1351 1352 void mhi_driver_unregister(struct mhi_driver *mhi_drv) 1353 { 1354 driver_unregister(&mhi_drv->driver); 1355 } 1356 EXPORT_SYMBOL_GPL(mhi_driver_unregister); 1357 1358 static int mhi_uevent(struct device *dev, struct kobj_uevent_env *env) 1359 { 1360 struct mhi_device *mhi_dev = to_mhi_device(dev); 1361 1362 return add_uevent_var(env, "MODALIAS=" MHI_DEVICE_MODALIAS_FMT, 1363 mhi_dev->name); 1364 } 1365 1366 static int mhi_match(struct device *dev, struct device_driver *drv) 1367 { 1368 struct mhi_device *mhi_dev = to_mhi_device(dev); 1369 struct mhi_driver *mhi_drv = to_mhi_driver(drv); 1370 const struct mhi_device_id *id; 1371 1372 /* 1373 * If the device is a controller type then there is no client driver 1374 * associated with it 1375 */ 1376 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 1377 return 0; 1378 1379 for (id = mhi_drv->id_table; id->chan[0]; id++) 1380 if (!strcmp(mhi_dev->name, id->chan)) { 1381 mhi_dev->id = id; 1382 return 1; 1383 } 1384 1385 return 0; 1386 }; 1387 1388 struct bus_type mhi_bus_type = { 1389 .name = "mhi", 1390 .dev_name = "mhi", 1391 .match = mhi_match, 1392 .uevent = mhi_uevent, 1393 .dev_groups = mhi_dev_groups, 1394 }; 1395 1396 static int __init mhi_init(void) 1397 { 1398 mhi_debugfs_init(); 1399 return bus_register(&mhi_bus_type); 1400 } 1401 1402 static void __exit mhi_exit(void) 1403 { 1404 mhi_debugfs_exit(); 1405 bus_unregister(&mhi_bus_type); 1406 } 1407 1408 postcore_initcall(mhi_init); 1409 module_exit(mhi_exit); 1410 1411 MODULE_LICENSE("GPL v2"); 1412 MODULE_DESCRIPTION("MHI Host Interface"); 1413