1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2018-2020, The Linux Foundation. All rights reserved. 4 * 5 */ 6 7 #include <linux/delay.h> 8 #include <linux/device.h> 9 #include <linux/dma-direction.h> 10 #include <linux/dma-mapping.h> 11 #include <linux/interrupt.h> 12 #include <linux/list.h> 13 #include <linux/mhi.h> 14 #include <linux/module.h> 15 #include <linux/skbuff.h> 16 #include <linux/slab.h> 17 #include "internal.h" 18 19 int __must_check mhi_read_reg(struct mhi_controller *mhi_cntrl, 20 void __iomem *base, u32 offset, u32 *out) 21 { 22 return mhi_cntrl->read_reg(mhi_cntrl, base + offset, out); 23 } 24 25 int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, 26 void __iomem *base, u32 offset, 27 u32 mask, u32 *out) 28 { 29 u32 tmp; 30 int ret; 31 32 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); 33 if (ret) 34 return ret; 35 36 *out = (tmp & mask) >> __ffs(mask); 37 38 return 0; 39 } 40 41 int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, 42 void __iomem *base, u32 offset, 43 u32 mask, u32 val, u32 delayus) 44 { 45 int ret; 46 u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; 47 48 while (retry--) { 49 ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); 50 if (ret) 51 return ret; 52 53 if (out == val) 54 return 0; 55 56 fsleep(delayus); 57 } 58 59 return -ETIMEDOUT; 60 } 61 62 void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, 63 u32 offset, u32 val) 64 { 65 mhi_cntrl->write_reg(mhi_cntrl, base + offset, val); 66 } 67 68 int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, 69 void __iomem *base, u32 offset, u32 mask, 70 u32 val) 71 { 72 int ret; 73 u32 tmp; 74 75 ret = mhi_read_reg(mhi_cntrl, base, offset, &tmp); 76 if (ret) 77 return ret; 78 79 tmp &= ~mask; 80 tmp |= (val << __ffs(mask)); 81 mhi_write_reg(mhi_cntrl, base, offset, tmp); 82 83 return 0; 84 } 85 86 void mhi_write_db(struct mhi_controller *mhi_cntrl, void __iomem *db_addr, 87 dma_addr_t db_val) 88 { 89 mhi_write_reg(mhi_cntrl, db_addr, 4, upper_32_bits(db_val)); 90 mhi_write_reg(mhi_cntrl, db_addr, 0, lower_32_bits(db_val)); 91 } 92 93 void mhi_db_brstmode(struct mhi_controller *mhi_cntrl, 94 struct db_cfg *db_cfg, 95 void __iomem *db_addr, 96 dma_addr_t db_val) 97 { 98 if (db_cfg->db_mode) { 99 db_cfg->db_val = db_val; 100 mhi_write_db(mhi_cntrl, db_addr, db_val); 101 db_cfg->db_mode = 0; 102 } 103 } 104 105 void mhi_db_brstmode_disable(struct mhi_controller *mhi_cntrl, 106 struct db_cfg *db_cfg, 107 void __iomem *db_addr, 108 dma_addr_t db_val) 109 { 110 db_cfg->db_val = db_val; 111 mhi_write_db(mhi_cntrl, db_addr, db_val); 112 } 113 114 void mhi_ring_er_db(struct mhi_event *mhi_event) 115 { 116 struct mhi_ring *ring = &mhi_event->ring; 117 118 mhi_event->db_cfg.process_db(mhi_event->mhi_cntrl, &mhi_event->db_cfg, 119 ring->db_addr, le64_to_cpu(*ring->ctxt_wp)); 120 } 121 122 void mhi_ring_cmd_db(struct mhi_controller *mhi_cntrl, struct mhi_cmd *mhi_cmd) 123 { 124 dma_addr_t db; 125 struct mhi_ring *ring = &mhi_cmd->ring; 126 127 db = ring->iommu_base + (ring->wp - ring->base); 128 *ring->ctxt_wp = cpu_to_le64(db); 129 mhi_write_db(mhi_cntrl, ring->db_addr, db); 130 } 131 132 void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl, 133 struct mhi_chan *mhi_chan) 134 { 135 struct mhi_ring *ring = &mhi_chan->tre_ring; 136 dma_addr_t db; 137 138 db = ring->iommu_base + (ring->wp - ring->base); 139 140 /* 141 * Writes to the new ring element must be visible to the hardware 142 * before letting h/w know there is new element to fetch. 143 */ 144 dma_wmb(); 145 *ring->ctxt_wp = cpu_to_le64(db); 146 147 mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg, 148 ring->db_addr, db); 149 } 150 151 enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl) 152 { 153 u32 exec; 154 int ret = mhi_read_reg(mhi_cntrl, mhi_cntrl->bhi, BHI_EXECENV, &exec); 155 156 return (ret) ? MHI_EE_MAX : exec; 157 } 158 EXPORT_SYMBOL_GPL(mhi_get_exec_env); 159 160 enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl) 161 { 162 u32 state; 163 int ret = mhi_read_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, 164 MHISTATUS_MHISTATE_MASK, &state); 165 return ret ? MHI_STATE_MAX : state; 166 } 167 EXPORT_SYMBOL_GPL(mhi_get_mhi_state); 168 169 void mhi_soc_reset(struct mhi_controller *mhi_cntrl) 170 { 171 if (mhi_cntrl->reset) { 172 mhi_cntrl->reset(mhi_cntrl); 173 return; 174 } 175 176 /* Generic MHI SoC reset */ 177 mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET, 178 MHI_SOC_RESET_REQ); 179 } 180 EXPORT_SYMBOL_GPL(mhi_soc_reset); 181 182 int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl, 183 struct mhi_buf_info *buf_info) 184 { 185 buf_info->p_addr = dma_map_single(mhi_cntrl->cntrl_dev, 186 buf_info->v_addr, buf_info->len, 187 buf_info->dir); 188 if (dma_mapping_error(mhi_cntrl->cntrl_dev, buf_info->p_addr)) 189 return -ENOMEM; 190 191 return 0; 192 } 193 194 int mhi_map_single_use_bb(struct mhi_controller *mhi_cntrl, 195 struct mhi_buf_info *buf_info) 196 { 197 void *buf = dma_alloc_coherent(mhi_cntrl->cntrl_dev, buf_info->len, 198 &buf_info->p_addr, GFP_ATOMIC); 199 200 if (!buf) 201 return -ENOMEM; 202 203 if (buf_info->dir == DMA_TO_DEVICE) 204 memcpy(buf, buf_info->v_addr, buf_info->len); 205 206 buf_info->bb_addr = buf; 207 208 return 0; 209 } 210 211 void mhi_unmap_single_no_bb(struct mhi_controller *mhi_cntrl, 212 struct mhi_buf_info *buf_info) 213 { 214 dma_unmap_single(mhi_cntrl->cntrl_dev, buf_info->p_addr, buf_info->len, 215 buf_info->dir); 216 } 217 218 void mhi_unmap_single_use_bb(struct mhi_controller *mhi_cntrl, 219 struct mhi_buf_info *buf_info) 220 { 221 if (buf_info->dir == DMA_FROM_DEVICE) 222 memcpy(buf_info->v_addr, buf_info->bb_addr, buf_info->len); 223 224 dma_free_coherent(mhi_cntrl->cntrl_dev, buf_info->len, 225 buf_info->bb_addr, buf_info->p_addr); 226 } 227 228 static int get_nr_avail_ring_elements(struct mhi_controller *mhi_cntrl, 229 struct mhi_ring *ring) 230 { 231 int nr_el; 232 233 if (ring->wp < ring->rp) { 234 nr_el = ((ring->rp - ring->wp) / ring->el_size) - 1; 235 } else { 236 nr_el = (ring->rp - ring->base) / ring->el_size; 237 nr_el += ((ring->base + ring->len - ring->wp) / 238 ring->el_size) - 1; 239 } 240 241 return nr_el; 242 } 243 244 static void *mhi_to_virtual(struct mhi_ring *ring, dma_addr_t addr) 245 { 246 return (addr - ring->iommu_base) + ring->base; 247 } 248 249 static void mhi_add_ring_element(struct mhi_controller *mhi_cntrl, 250 struct mhi_ring *ring) 251 { 252 ring->wp += ring->el_size; 253 if (ring->wp >= (ring->base + ring->len)) 254 ring->wp = ring->base; 255 /* smp update */ 256 smp_wmb(); 257 } 258 259 static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, 260 struct mhi_ring *ring) 261 { 262 ring->rp += ring->el_size; 263 if (ring->rp >= (ring->base + ring->len)) 264 ring->rp = ring->base; 265 /* smp update */ 266 smp_wmb(); 267 } 268 269 static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) 270 { 271 return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; 272 } 273 274 int mhi_destroy_device(struct device *dev, void *data) 275 { 276 struct mhi_chan *ul_chan, *dl_chan; 277 struct mhi_device *mhi_dev; 278 struct mhi_controller *mhi_cntrl; 279 enum mhi_ee_type ee = MHI_EE_MAX; 280 281 if (dev->bus != &mhi_bus_type) 282 return 0; 283 284 mhi_dev = to_mhi_device(dev); 285 mhi_cntrl = mhi_dev->mhi_cntrl; 286 287 /* Only destroy virtual devices thats attached to bus */ 288 if (mhi_dev->dev_type == MHI_DEVICE_CONTROLLER) 289 return 0; 290 291 ul_chan = mhi_dev->ul_chan; 292 dl_chan = mhi_dev->dl_chan; 293 294 /* 295 * If execution environment is specified, remove only those devices that 296 * started in them based on ee_mask for the channels as we move on to a 297 * different execution environment 298 */ 299 if (data) 300 ee = *(enum mhi_ee_type *)data; 301 302 /* 303 * For the suspend and resume case, this function will get called 304 * without mhi_unregister_controller(). Hence, we need to drop the 305 * references to mhi_dev created for ul and dl channels. We can 306 * be sure that there will be no instances of mhi_dev left after 307 * this. 308 */ 309 if (ul_chan) { 310 if (ee != MHI_EE_MAX && !(ul_chan->ee_mask & BIT(ee))) 311 return 0; 312 313 put_device(&ul_chan->mhi_dev->dev); 314 } 315 316 if (dl_chan) { 317 if (ee != MHI_EE_MAX && !(dl_chan->ee_mask & BIT(ee))) 318 return 0; 319 320 put_device(&dl_chan->mhi_dev->dev); 321 } 322 323 dev_dbg(&mhi_cntrl->mhi_dev->dev, "destroy device for chan:%s\n", 324 mhi_dev->name); 325 326 /* Notify the client and remove the device from MHI bus */ 327 device_del(dev); 328 put_device(dev); 329 330 return 0; 331 } 332 333 int mhi_get_free_desc_count(struct mhi_device *mhi_dev, 334 enum dma_data_direction dir) 335 { 336 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 337 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? 338 mhi_dev->ul_chan : mhi_dev->dl_chan; 339 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; 340 341 return get_nr_avail_ring_elements(mhi_cntrl, tre_ring); 342 } 343 EXPORT_SYMBOL_GPL(mhi_get_free_desc_count); 344 345 void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason) 346 { 347 struct mhi_driver *mhi_drv; 348 349 if (!mhi_dev->dev.driver) 350 return; 351 352 mhi_drv = to_mhi_driver(mhi_dev->dev.driver); 353 354 if (mhi_drv->status_cb) 355 mhi_drv->status_cb(mhi_dev, cb_reason); 356 } 357 EXPORT_SYMBOL_GPL(mhi_notify); 358 359 /* Bind MHI channels to MHI devices */ 360 void mhi_create_devices(struct mhi_controller *mhi_cntrl) 361 { 362 struct mhi_chan *mhi_chan; 363 struct mhi_device *mhi_dev; 364 struct device *dev = &mhi_cntrl->mhi_dev->dev; 365 int i, ret; 366 367 mhi_chan = mhi_cntrl->mhi_chan; 368 for (i = 0; i < mhi_cntrl->max_chan; i++, mhi_chan++) { 369 if (!mhi_chan->configured || mhi_chan->mhi_dev || 370 !(mhi_chan->ee_mask & BIT(mhi_cntrl->ee))) 371 continue; 372 mhi_dev = mhi_alloc_device(mhi_cntrl); 373 if (IS_ERR(mhi_dev)) 374 return; 375 376 mhi_dev->dev_type = MHI_DEVICE_XFER; 377 switch (mhi_chan->dir) { 378 case DMA_TO_DEVICE: 379 mhi_dev->ul_chan = mhi_chan; 380 mhi_dev->ul_chan_id = mhi_chan->chan; 381 break; 382 case DMA_FROM_DEVICE: 383 /* We use dl_chan as offload channels */ 384 mhi_dev->dl_chan = mhi_chan; 385 mhi_dev->dl_chan_id = mhi_chan->chan; 386 break; 387 default: 388 dev_err(dev, "Direction not supported\n"); 389 put_device(&mhi_dev->dev); 390 return; 391 } 392 393 get_device(&mhi_dev->dev); 394 mhi_chan->mhi_dev = mhi_dev; 395 396 /* Check next channel if it matches */ 397 if ((i + 1) < mhi_cntrl->max_chan && mhi_chan[1].configured) { 398 if (!strcmp(mhi_chan[1].name, mhi_chan->name)) { 399 i++; 400 mhi_chan++; 401 if (mhi_chan->dir == DMA_TO_DEVICE) { 402 mhi_dev->ul_chan = mhi_chan; 403 mhi_dev->ul_chan_id = mhi_chan->chan; 404 } else { 405 mhi_dev->dl_chan = mhi_chan; 406 mhi_dev->dl_chan_id = mhi_chan->chan; 407 } 408 get_device(&mhi_dev->dev); 409 mhi_chan->mhi_dev = mhi_dev; 410 } 411 } 412 413 /* Channel name is same for both UL and DL */ 414 mhi_dev->name = mhi_chan->name; 415 dev_set_name(&mhi_dev->dev, "%s_%s", 416 dev_name(&mhi_cntrl->mhi_dev->dev), 417 mhi_dev->name); 418 419 /* Init wakeup source if available */ 420 if (mhi_dev->dl_chan && mhi_dev->dl_chan->wake_capable) 421 device_init_wakeup(&mhi_dev->dev, true); 422 423 ret = device_add(&mhi_dev->dev); 424 if (ret) 425 put_device(&mhi_dev->dev); 426 } 427 } 428 429 irqreturn_t mhi_irq_handler(int irq_number, void *dev) 430 { 431 struct mhi_event *mhi_event = dev; 432 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; 433 struct mhi_event_ctxt *er_ctxt; 434 struct mhi_ring *ev_ring = &mhi_event->ring; 435 dma_addr_t ptr; 436 void *dev_rp; 437 438 /* 439 * If CONFIG_DEBUG_SHIRQ is set, the IRQ handler will get invoked during __free_irq() 440 * and by that time mhi_ctxt() would've freed. So check for the existence of mhi_ctxt 441 * before handling the IRQs. 442 */ 443 if (!mhi_cntrl->mhi_ctxt) { 444 dev_dbg(&mhi_cntrl->mhi_dev->dev, 445 "mhi_ctxt has been freed\n"); 446 return IRQ_HANDLED; 447 } 448 449 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; 450 ptr = le64_to_cpu(er_ctxt->rp); 451 452 if (!is_valid_ring_ptr(ev_ring, ptr)) { 453 dev_err(&mhi_cntrl->mhi_dev->dev, 454 "Event ring rp points outside of the event ring\n"); 455 return IRQ_HANDLED; 456 } 457 458 dev_rp = mhi_to_virtual(ev_ring, ptr); 459 460 /* Only proceed if event ring has pending events */ 461 if (ev_ring->rp == dev_rp) 462 return IRQ_HANDLED; 463 464 /* For client managed event ring, notify pending data */ 465 if (mhi_event->cl_manage) { 466 struct mhi_chan *mhi_chan = mhi_event->mhi_chan; 467 struct mhi_device *mhi_dev = mhi_chan->mhi_dev; 468 469 if (mhi_dev) 470 mhi_notify(mhi_dev, MHI_CB_PENDING_DATA); 471 } else { 472 tasklet_schedule(&mhi_event->task); 473 } 474 475 return IRQ_HANDLED; 476 } 477 478 irqreturn_t mhi_intvec_threaded_handler(int irq_number, void *priv) 479 { 480 struct mhi_controller *mhi_cntrl = priv; 481 struct device *dev = &mhi_cntrl->mhi_dev->dev; 482 enum mhi_state state; 483 enum mhi_pm_state pm_state = 0; 484 enum mhi_ee_type ee; 485 486 write_lock_irq(&mhi_cntrl->pm_lock); 487 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 488 write_unlock_irq(&mhi_cntrl->pm_lock); 489 goto exit_intvec; 490 } 491 492 state = mhi_get_mhi_state(mhi_cntrl); 493 ee = mhi_get_exec_env(mhi_cntrl); 494 dev_dbg(dev, "local ee: %s state: %s device ee: %s state: %s\n", 495 TO_MHI_EXEC_STR(mhi_cntrl->ee), 496 mhi_state_str(mhi_cntrl->dev_state), 497 TO_MHI_EXEC_STR(ee), mhi_state_str(state)); 498 499 if (state == MHI_STATE_SYS_ERR) { 500 dev_dbg(dev, "System error detected\n"); 501 pm_state = mhi_tryset_pm_state(mhi_cntrl, 502 MHI_PM_SYS_ERR_DETECT); 503 } 504 write_unlock_irq(&mhi_cntrl->pm_lock); 505 506 if (pm_state != MHI_PM_SYS_ERR_DETECT) 507 goto exit_intvec; 508 509 switch (ee) { 510 case MHI_EE_RDDM: 511 /* proceed if power down is not already in progress */ 512 if (mhi_cntrl->rddm_image && mhi_is_active(mhi_cntrl)) { 513 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); 514 mhi_cntrl->ee = ee; 515 wake_up_all(&mhi_cntrl->state_event); 516 } 517 break; 518 case MHI_EE_PBL: 519 case MHI_EE_EDL: 520 case MHI_EE_PTHRU: 521 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_FATAL_ERROR); 522 mhi_cntrl->ee = ee; 523 wake_up_all(&mhi_cntrl->state_event); 524 mhi_pm_sys_err_handler(mhi_cntrl); 525 break; 526 default: 527 wake_up_all(&mhi_cntrl->state_event); 528 mhi_pm_sys_err_handler(mhi_cntrl); 529 break; 530 } 531 532 exit_intvec: 533 534 return IRQ_HANDLED; 535 } 536 537 irqreturn_t mhi_intvec_handler(int irq_number, void *dev) 538 { 539 struct mhi_controller *mhi_cntrl = dev; 540 541 /* Wake up events waiting for state change */ 542 wake_up_all(&mhi_cntrl->state_event); 543 544 return IRQ_WAKE_THREAD; 545 } 546 547 static void mhi_recycle_ev_ring_element(struct mhi_controller *mhi_cntrl, 548 struct mhi_ring *ring) 549 { 550 /* Update the WP */ 551 ring->wp += ring->el_size; 552 553 if (ring->wp >= (ring->base + ring->len)) 554 ring->wp = ring->base; 555 556 *ring->ctxt_wp = cpu_to_le64(ring->iommu_base + (ring->wp - ring->base)); 557 558 /* Update the RP */ 559 ring->rp += ring->el_size; 560 if (ring->rp >= (ring->base + ring->len)) 561 ring->rp = ring->base; 562 563 /* Update to all cores */ 564 smp_wmb(); 565 } 566 567 static int parse_xfer_event(struct mhi_controller *mhi_cntrl, 568 struct mhi_ring_element *event, 569 struct mhi_chan *mhi_chan) 570 { 571 struct mhi_ring *buf_ring, *tre_ring; 572 struct device *dev = &mhi_cntrl->mhi_dev->dev; 573 struct mhi_result result; 574 unsigned long flags = 0; 575 u32 ev_code; 576 577 ev_code = MHI_TRE_GET_EV_CODE(event); 578 buf_ring = &mhi_chan->buf_ring; 579 tre_ring = &mhi_chan->tre_ring; 580 581 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? 582 -EOVERFLOW : 0; 583 584 /* 585 * If it's a DB Event then we need to grab the lock 586 * with preemption disabled and as a write because we 587 * have to update db register and there are chances that 588 * another thread could be doing the same. 589 */ 590 if (ev_code >= MHI_EV_CC_OOB) 591 write_lock_irqsave(&mhi_chan->lock, flags); 592 else 593 read_lock_bh(&mhi_chan->lock); 594 595 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) 596 goto end_process_tx_event; 597 598 switch (ev_code) { 599 case MHI_EV_CC_OVERFLOW: 600 case MHI_EV_CC_EOB: 601 case MHI_EV_CC_EOT: 602 { 603 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(event); 604 struct mhi_ring_element *local_rp, *ev_tre; 605 void *dev_rp; 606 struct mhi_buf_info *buf_info; 607 u16 xfer_len; 608 609 if (!is_valid_ring_ptr(tre_ring, ptr)) { 610 dev_err(&mhi_cntrl->mhi_dev->dev, 611 "Event element points outside of the tre ring\n"); 612 break; 613 } 614 /* Get the TRB this event points to */ 615 ev_tre = mhi_to_virtual(tre_ring, ptr); 616 617 dev_rp = ev_tre + 1; 618 if (dev_rp >= (tre_ring->base + tre_ring->len)) 619 dev_rp = tre_ring->base; 620 621 result.dir = mhi_chan->dir; 622 623 local_rp = tre_ring->rp; 624 while (local_rp != dev_rp) { 625 buf_info = buf_ring->rp; 626 /* If it's the last TRE, get length from the event */ 627 if (local_rp == ev_tre) 628 xfer_len = MHI_TRE_GET_EV_LEN(event); 629 else 630 xfer_len = buf_info->len; 631 632 /* Unmap if it's not pre-mapped by client */ 633 if (likely(!buf_info->pre_mapped)) 634 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); 635 636 result.buf_addr = buf_info->cb_buf; 637 638 /* truncate to buf len if xfer_len is larger */ 639 result.bytes_xferd = 640 min_t(u16, xfer_len, buf_info->len); 641 mhi_del_ring_element(mhi_cntrl, buf_ring); 642 mhi_del_ring_element(mhi_cntrl, tre_ring); 643 local_rp = tre_ring->rp; 644 645 /* notify client */ 646 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 647 648 if (mhi_chan->dir == DMA_TO_DEVICE) { 649 atomic_dec(&mhi_cntrl->pending_pkts); 650 /* Release the reference got from mhi_queue() */ 651 mhi_cntrl->runtime_put(mhi_cntrl); 652 } 653 654 /* 655 * Recycle the buffer if buffer is pre-allocated, 656 * if there is an error, not much we can do apart 657 * from dropping the packet 658 */ 659 if (mhi_chan->pre_alloc) { 660 if (mhi_queue_buf(mhi_chan->mhi_dev, 661 mhi_chan->dir, 662 buf_info->cb_buf, 663 buf_info->len, MHI_EOT)) { 664 dev_err(dev, 665 "Error recycling buffer for chan:%d\n", 666 mhi_chan->chan); 667 kfree(buf_info->cb_buf); 668 } 669 } 670 } 671 break; 672 } /* CC_EOT */ 673 case MHI_EV_CC_OOB: 674 case MHI_EV_CC_DB_MODE: 675 { 676 unsigned long pm_lock_flags; 677 678 mhi_chan->db_cfg.db_mode = 1; 679 read_lock_irqsave(&mhi_cntrl->pm_lock, pm_lock_flags); 680 if (tre_ring->wp != tre_ring->rp && 681 MHI_DB_ACCESS_VALID(mhi_cntrl)) { 682 mhi_ring_chan_db(mhi_cntrl, mhi_chan); 683 } 684 read_unlock_irqrestore(&mhi_cntrl->pm_lock, pm_lock_flags); 685 break; 686 } 687 case MHI_EV_CC_BAD_TRE: 688 default: 689 dev_err(dev, "Unknown event 0x%x\n", ev_code); 690 break; 691 } /* switch(MHI_EV_READ_CODE(EV_TRB_CODE,event)) */ 692 693 end_process_tx_event: 694 if (ev_code >= MHI_EV_CC_OOB) 695 write_unlock_irqrestore(&mhi_chan->lock, flags); 696 else 697 read_unlock_bh(&mhi_chan->lock); 698 699 return 0; 700 } 701 702 static int parse_rsc_event(struct mhi_controller *mhi_cntrl, 703 struct mhi_ring_element *event, 704 struct mhi_chan *mhi_chan) 705 { 706 struct mhi_ring *buf_ring, *tre_ring; 707 struct mhi_buf_info *buf_info; 708 struct mhi_result result; 709 int ev_code; 710 u32 cookie; /* offset to local descriptor */ 711 u16 xfer_len; 712 713 buf_ring = &mhi_chan->buf_ring; 714 tre_ring = &mhi_chan->tre_ring; 715 716 ev_code = MHI_TRE_GET_EV_CODE(event); 717 cookie = MHI_TRE_GET_EV_COOKIE(event); 718 xfer_len = MHI_TRE_GET_EV_LEN(event); 719 720 /* Received out of bound cookie */ 721 WARN_ON(cookie >= buf_ring->len); 722 723 buf_info = buf_ring->base + cookie; 724 725 result.transaction_status = (ev_code == MHI_EV_CC_OVERFLOW) ? 726 -EOVERFLOW : 0; 727 728 /* truncate to buf len if xfer_len is larger */ 729 result.bytes_xferd = min_t(u16, xfer_len, buf_info->len); 730 result.buf_addr = buf_info->cb_buf; 731 result.dir = mhi_chan->dir; 732 733 read_lock_bh(&mhi_chan->lock); 734 735 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) 736 goto end_process_rsc_event; 737 738 WARN_ON(!buf_info->used); 739 740 /* notify the client */ 741 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 742 743 /* 744 * Note: We're arbitrarily incrementing RP even though, completion 745 * packet we processed might not be the same one, reason we can do this 746 * is because device guaranteed to cache descriptors in order it 747 * receive, so even though completion event is different we can re-use 748 * all descriptors in between. 749 * Example: 750 * Transfer Ring has descriptors: A, B, C, D 751 * Last descriptor host queue is D (WP) and first descriptor 752 * host queue is A (RP). 753 * The completion event we just serviced is descriptor C. 754 * Then we can safely queue descriptors to replace A, B, and C 755 * even though host did not receive any completions. 756 */ 757 mhi_del_ring_element(mhi_cntrl, tre_ring); 758 buf_info->used = false; 759 760 end_process_rsc_event: 761 read_unlock_bh(&mhi_chan->lock); 762 763 return 0; 764 } 765 766 static void mhi_process_cmd_completion(struct mhi_controller *mhi_cntrl, 767 struct mhi_ring_element *tre) 768 { 769 dma_addr_t ptr = MHI_TRE_GET_EV_PTR(tre); 770 struct mhi_cmd *cmd_ring = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; 771 struct mhi_ring *mhi_ring = &cmd_ring->ring; 772 struct mhi_ring_element *cmd_pkt; 773 struct mhi_chan *mhi_chan; 774 u32 chan; 775 776 if (!is_valid_ring_ptr(mhi_ring, ptr)) { 777 dev_err(&mhi_cntrl->mhi_dev->dev, 778 "Event element points outside of the cmd ring\n"); 779 return; 780 } 781 782 cmd_pkt = mhi_to_virtual(mhi_ring, ptr); 783 784 chan = MHI_TRE_GET_CMD_CHID(cmd_pkt); 785 786 if (chan < mhi_cntrl->max_chan && 787 mhi_cntrl->mhi_chan[chan].configured) { 788 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 789 write_lock_bh(&mhi_chan->lock); 790 mhi_chan->ccs = MHI_TRE_GET_EV_CODE(tre); 791 complete(&mhi_chan->completion); 792 write_unlock_bh(&mhi_chan->lock); 793 } else { 794 dev_err(&mhi_cntrl->mhi_dev->dev, 795 "Completion packet for invalid channel ID: %d\n", chan); 796 } 797 798 mhi_del_ring_element(mhi_cntrl, mhi_ring); 799 } 800 801 int mhi_process_ctrl_ev_ring(struct mhi_controller *mhi_cntrl, 802 struct mhi_event *mhi_event, 803 u32 event_quota) 804 { 805 struct mhi_ring_element *dev_rp, *local_rp; 806 struct mhi_ring *ev_ring = &mhi_event->ring; 807 struct mhi_event_ctxt *er_ctxt = 808 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; 809 struct mhi_chan *mhi_chan; 810 struct device *dev = &mhi_cntrl->mhi_dev->dev; 811 u32 chan; 812 int count = 0; 813 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); 814 815 /* 816 * This is a quick check to avoid unnecessary event processing 817 * in case MHI is already in error state, but it's still possible 818 * to transition to error state while processing events 819 */ 820 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) 821 return -EIO; 822 823 if (!is_valid_ring_ptr(ev_ring, ptr)) { 824 dev_err(&mhi_cntrl->mhi_dev->dev, 825 "Event ring rp points outside of the event ring\n"); 826 return -EIO; 827 } 828 829 dev_rp = mhi_to_virtual(ev_ring, ptr); 830 local_rp = ev_ring->rp; 831 832 while (dev_rp != local_rp) { 833 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); 834 835 switch (type) { 836 case MHI_PKT_TYPE_BW_REQ_EVENT: 837 { 838 struct mhi_link_info *link_info; 839 840 link_info = &mhi_cntrl->mhi_link_info; 841 write_lock_irq(&mhi_cntrl->pm_lock); 842 link_info->target_link_speed = 843 MHI_TRE_GET_EV_LINKSPEED(local_rp); 844 link_info->target_link_width = 845 MHI_TRE_GET_EV_LINKWIDTH(local_rp); 846 write_unlock_irq(&mhi_cntrl->pm_lock); 847 dev_dbg(dev, "Received BW_REQ event\n"); 848 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_BW_REQ); 849 break; 850 } 851 case MHI_PKT_TYPE_STATE_CHANGE_EVENT: 852 { 853 enum mhi_state new_state; 854 855 new_state = MHI_TRE_GET_EV_STATE(local_rp); 856 857 dev_dbg(dev, "State change event to state: %s\n", 858 mhi_state_str(new_state)); 859 860 switch (new_state) { 861 case MHI_STATE_M0: 862 mhi_pm_m0_transition(mhi_cntrl); 863 break; 864 case MHI_STATE_M1: 865 mhi_pm_m1_transition(mhi_cntrl); 866 break; 867 case MHI_STATE_M3: 868 mhi_pm_m3_transition(mhi_cntrl); 869 break; 870 case MHI_STATE_SYS_ERR: 871 { 872 enum mhi_pm_state pm_state; 873 874 dev_dbg(dev, "System error detected\n"); 875 write_lock_irq(&mhi_cntrl->pm_lock); 876 pm_state = mhi_tryset_pm_state(mhi_cntrl, 877 MHI_PM_SYS_ERR_DETECT); 878 write_unlock_irq(&mhi_cntrl->pm_lock); 879 if (pm_state == MHI_PM_SYS_ERR_DETECT) 880 mhi_pm_sys_err_handler(mhi_cntrl); 881 break; 882 } 883 default: 884 dev_err(dev, "Invalid state: %s\n", 885 mhi_state_str(new_state)); 886 } 887 888 break; 889 } 890 case MHI_PKT_TYPE_CMD_COMPLETION_EVENT: 891 mhi_process_cmd_completion(mhi_cntrl, local_rp); 892 break; 893 case MHI_PKT_TYPE_EE_EVENT: 894 { 895 enum dev_st_transition st = DEV_ST_TRANSITION_MAX; 896 enum mhi_ee_type event = MHI_TRE_GET_EV_EXECENV(local_rp); 897 898 dev_dbg(dev, "Received EE event: %s\n", 899 TO_MHI_EXEC_STR(event)); 900 switch (event) { 901 case MHI_EE_SBL: 902 st = DEV_ST_TRANSITION_SBL; 903 break; 904 case MHI_EE_WFW: 905 case MHI_EE_AMSS: 906 st = DEV_ST_TRANSITION_MISSION_MODE; 907 break; 908 case MHI_EE_FP: 909 st = DEV_ST_TRANSITION_FP; 910 break; 911 case MHI_EE_RDDM: 912 mhi_cntrl->status_cb(mhi_cntrl, MHI_CB_EE_RDDM); 913 write_lock_irq(&mhi_cntrl->pm_lock); 914 mhi_cntrl->ee = event; 915 write_unlock_irq(&mhi_cntrl->pm_lock); 916 wake_up_all(&mhi_cntrl->state_event); 917 break; 918 default: 919 dev_err(dev, 920 "Unhandled EE event: 0x%x\n", type); 921 } 922 if (st != DEV_ST_TRANSITION_MAX) 923 mhi_queue_state_transition(mhi_cntrl, st); 924 925 break; 926 } 927 case MHI_PKT_TYPE_TX_EVENT: 928 chan = MHI_TRE_GET_EV_CHID(local_rp); 929 930 WARN_ON(chan >= mhi_cntrl->max_chan); 931 932 /* 933 * Only process the event ring elements whose channel 934 * ID is within the maximum supported range. 935 */ 936 if (chan < mhi_cntrl->max_chan) { 937 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 938 if (!mhi_chan->configured) 939 break; 940 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); 941 } 942 break; 943 default: 944 dev_err(dev, "Unhandled event type: %d\n", type); 945 break; 946 } 947 948 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); 949 local_rp = ev_ring->rp; 950 951 ptr = le64_to_cpu(er_ctxt->rp); 952 if (!is_valid_ring_ptr(ev_ring, ptr)) { 953 dev_err(&mhi_cntrl->mhi_dev->dev, 954 "Event ring rp points outside of the event ring\n"); 955 return -EIO; 956 } 957 958 dev_rp = mhi_to_virtual(ev_ring, ptr); 959 count++; 960 } 961 962 read_lock_bh(&mhi_cntrl->pm_lock); 963 964 /* Ring EV DB only if there is any pending element to process */ 965 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) 966 mhi_ring_er_db(mhi_event); 967 read_unlock_bh(&mhi_cntrl->pm_lock); 968 969 return count; 970 } 971 972 int mhi_process_data_event_ring(struct mhi_controller *mhi_cntrl, 973 struct mhi_event *mhi_event, 974 u32 event_quota) 975 { 976 struct mhi_ring_element *dev_rp, *local_rp; 977 struct mhi_ring *ev_ring = &mhi_event->ring; 978 struct mhi_event_ctxt *er_ctxt = 979 &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_event->er_index]; 980 int count = 0; 981 u32 chan; 982 struct mhi_chan *mhi_chan; 983 dma_addr_t ptr = le64_to_cpu(er_ctxt->rp); 984 985 if (unlikely(MHI_EVENT_ACCESS_INVALID(mhi_cntrl->pm_state))) 986 return -EIO; 987 988 if (!is_valid_ring_ptr(ev_ring, ptr)) { 989 dev_err(&mhi_cntrl->mhi_dev->dev, 990 "Event ring rp points outside of the event ring\n"); 991 return -EIO; 992 } 993 994 dev_rp = mhi_to_virtual(ev_ring, ptr); 995 local_rp = ev_ring->rp; 996 997 while (dev_rp != local_rp && event_quota > 0) { 998 enum mhi_pkt_type type = MHI_TRE_GET_EV_TYPE(local_rp); 999 1000 chan = MHI_TRE_GET_EV_CHID(local_rp); 1001 1002 WARN_ON(chan >= mhi_cntrl->max_chan); 1003 1004 /* 1005 * Only process the event ring elements whose channel 1006 * ID is within the maximum supported range. 1007 */ 1008 if (chan < mhi_cntrl->max_chan && 1009 mhi_cntrl->mhi_chan[chan].configured) { 1010 mhi_chan = &mhi_cntrl->mhi_chan[chan]; 1011 1012 if (likely(type == MHI_PKT_TYPE_TX_EVENT)) { 1013 parse_xfer_event(mhi_cntrl, local_rp, mhi_chan); 1014 event_quota--; 1015 } else if (type == MHI_PKT_TYPE_RSC_TX_EVENT) { 1016 parse_rsc_event(mhi_cntrl, local_rp, mhi_chan); 1017 event_quota--; 1018 } 1019 } 1020 1021 mhi_recycle_ev_ring_element(mhi_cntrl, ev_ring); 1022 local_rp = ev_ring->rp; 1023 1024 ptr = le64_to_cpu(er_ctxt->rp); 1025 if (!is_valid_ring_ptr(ev_ring, ptr)) { 1026 dev_err(&mhi_cntrl->mhi_dev->dev, 1027 "Event ring rp points outside of the event ring\n"); 1028 return -EIO; 1029 } 1030 1031 dev_rp = mhi_to_virtual(ev_ring, ptr); 1032 count++; 1033 } 1034 read_lock_bh(&mhi_cntrl->pm_lock); 1035 1036 /* Ring EV DB only if there is any pending element to process */ 1037 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl)) && count) 1038 mhi_ring_er_db(mhi_event); 1039 read_unlock_bh(&mhi_cntrl->pm_lock); 1040 1041 return count; 1042 } 1043 1044 void mhi_ev_task(unsigned long data) 1045 { 1046 struct mhi_event *mhi_event = (struct mhi_event *)data; 1047 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; 1048 1049 /* process all pending events */ 1050 spin_lock_bh(&mhi_event->lock); 1051 mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); 1052 spin_unlock_bh(&mhi_event->lock); 1053 } 1054 1055 void mhi_ctrl_ev_task(unsigned long data) 1056 { 1057 struct mhi_event *mhi_event = (struct mhi_event *)data; 1058 struct mhi_controller *mhi_cntrl = mhi_event->mhi_cntrl; 1059 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1060 enum mhi_state state; 1061 enum mhi_pm_state pm_state = 0; 1062 int ret; 1063 1064 /* 1065 * We can check PM state w/o a lock here because there is no way 1066 * PM state can change from reg access valid to no access while this 1067 * thread being executed. 1068 */ 1069 if (!MHI_REG_ACCESS_VALID(mhi_cntrl->pm_state)) { 1070 /* 1071 * We may have a pending event but not allowed to 1072 * process it since we are probably in a suspended state, 1073 * so trigger a resume. 1074 */ 1075 mhi_trigger_resume(mhi_cntrl); 1076 1077 return; 1078 } 1079 1080 /* Process ctrl events */ 1081 ret = mhi_event->process_event(mhi_cntrl, mhi_event, U32_MAX); 1082 1083 /* 1084 * We received an IRQ but no events to process, maybe device went to 1085 * SYS_ERR state? Check the state to confirm. 1086 */ 1087 if (!ret) { 1088 write_lock_irq(&mhi_cntrl->pm_lock); 1089 state = mhi_get_mhi_state(mhi_cntrl); 1090 if (state == MHI_STATE_SYS_ERR) { 1091 dev_dbg(dev, "System error detected\n"); 1092 pm_state = mhi_tryset_pm_state(mhi_cntrl, 1093 MHI_PM_SYS_ERR_DETECT); 1094 } 1095 write_unlock_irq(&mhi_cntrl->pm_lock); 1096 if (pm_state == MHI_PM_SYS_ERR_DETECT) 1097 mhi_pm_sys_err_handler(mhi_cntrl); 1098 } 1099 } 1100 1101 static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, 1102 struct mhi_ring *ring) 1103 { 1104 void *tmp = ring->wp + ring->el_size; 1105 1106 if (tmp >= (ring->base + ring->len)) 1107 tmp = ring->base; 1108 1109 return (tmp == ring->rp); 1110 } 1111 1112 static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, 1113 enum dma_data_direction dir, enum mhi_flags mflags) 1114 { 1115 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1116 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : 1117 mhi_dev->dl_chan; 1118 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; 1119 unsigned long flags; 1120 int ret; 1121 1122 if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) 1123 return -EIO; 1124 1125 read_lock_irqsave(&mhi_cntrl->pm_lock, flags); 1126 1127 ret = mhi_is_ring_full(mhi_cntrl, tre_ring); 1128 if (unlikely(ret)) { 1129 ret = -EAGAIN; 1130 goto exit_unlock; 1131 } 1132 1133 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); 1134 if (unlikely(ret)) 1135 goto exit_unlock; 1136 1137 /* Packet is queued, take a usage ref to exit M3 if necessary 1138 * for host->device buffer, balanced put is done on buffer completion 1139 * for device->host buffer, balanced put is after ringing the DB 1140 */ 1141 mhi_cntrl->runtime_get(mhi_cntrl); 1142 1143 /* Assert dev_wake (to exit/prevent M1/M2)*/ 1144 mhi_cntrl->wake_toggle(mhi_cntrl); 1145 1146 if (mhi_chan->dir == DMA_TO_DEVICE) 1147 atomic_inc(&mhi_cntrl->pending_pkts); 1148 1149 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) 1150 mhi_ring_chan_db(mhi_cntrl, mhi_chan); 1151 1152 if (dir == DMA_FROM_DEVICE) 1153 mhi_cntrl->runtime_put(mhi_cntrl); 1154 1155 exit_unlock: 1156 read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); 1157 1158 return ret; 1159 } 1160 1161 int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, 1162 struct sk_buff *skb, size_t len, enum mhi_flags mflags) 1163 { 1164 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : 1165 mhi_dev->dl_chan; 1166 struct mhi_buf_info buf_info = { }; 1167 1168 buf_info.v_addr = skb->data; 1169 buf_info.cb_buf = skb; 1170 buf_info.len = len; 1171 1172 if (unlikely(mhi_chan->pre_alloc)) 1173 return -EINVAL; 1174 1175 return mhi_queue(mhi_dev, &buf_info, dir, mflags); 1176 } 1177 EXPORT_SYMBOL_GPL(mhi_queue_skb); 1178 1179 int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, 1180 struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) 1181 { 1182 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : 1183 mhi_dev->dl_chan; 1184 struct mhi_buf_info buf_info = { }; 1185 1186 buf_info.p_addr = mhi_buf->dma_addr; 1187 buf_info.cb_buf = mhi_buf; 1188 buf_info.pre_mapped = true; 1189 buf_info.len = len; 1190 1191 if (unlikely(mhi_chan->pre_alloc)) 1192 return -EINVAL; 1193 1194 return mhi_queue(mhi_dev, &buf_info, dir, mflags); 1195 } 1196 EXPORT_SYMBOL_GPL(mhi_queue_dma); 1197 1198 int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, 1199 struct mhi_buf_info *info, enum mhi_flags flags) 1200 { 1201 struct mhi_ring *buf_ring, *tre_ring; 1202 struct mhi_ring_element *mhi_tre; 1203 struct mhi_buf_info *buf_info; 1204 int eot, eob, chain, bei; 1205 int ret; 1206 1207 buf_ring = &mhi_chan->buf_ring; 1208 tre_ring = &mhi_chan->tre_ring; 1209 1210 buf_info = buf_ring->wp; 1211 WARN_ON(buf_info->used); 1212 buf_info->pre_mapped = info->pre_mapped; 1213 if (info->pre_mapped) 1214 buf_info->p_addr = info->p_addr; 1215 else 1216 buf_info->v_addr = info->v_addr; 1217 buf_info->cb_buf = info->cb_buf; 1218 buf_info->wp = tre_ring->wp; 1219 buf_info->dir = mhi_chan->dir; 1220 buf_info->len = info->len; 1221 1222 if (!info->pre_mapped) { 1223 ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); 1224 if (ret) 1225 return ret; 1226 } 1227 1228 eob = !!(flags & MHI_EOB); 1229 eot = !!(flags & MHI_EOT); 1230 chain = !!(flags & MHI_CHAIN); 1231 bei = !!(mhi_chan->intmod); 1232 1233 mhi_tre = tre_ring->wp; 1234 mhi_tre->ptr = MHI_TRE_DATA_PTR(buf_info->p_addr); 1235 mhi_tre->dword[0] = MHI_TRE_DATA_DWORD0(info->len); 1236 mhi_tre->dword[1] = MHI_TRE_DATA_DWORD1(bei, eot, eob, chain); 1237 1238 /* increment WP */ 1239 mhi_add_ring_element(mhi_cntrl, tre_ring); 1240 mhi_add_ring_element(mhi_cntrl, buf_ring); 1241 1242 return 0; 1243 } 1244 1245 int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, 1246 void *buf, size_t len, enum mhi_flags mflags) 1247 { 1248 struct mhi_buf_info buf_info = { }; 1249 1250 buf_info.v_addr = buf; 1251 buf_info.cb_buf = buf; 1252 buf_info.len = len; 1253 1254 return mhi_queue(mhi_dev, &buf_info, dir, mflags); 1255 } 1256 EXPORT_SYMBOL_GPL(mhi_queue_buf); 1257 1258 bool mhi_queue_is_full(struct mhi_device *mhi_dev, enum dma_data_direction dir) 1259 { 1260 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1261 struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? 1262 mhi_dev->ul_chan : mhi_dev->dl_chan; 1263 struct mhi_ring *tre_ring = &mhi_chan->tre_ring; 1264 1265 return mhi_is_ring_full(mhi_cntrl, tre_ring); 1266 } 1267 EXPORT_SYMBOL_GPL(mhi_queue_is_full); 1268 1269 int mhi_send_cmd(struct mhi_controller *mhi_cntrl, 1270 struct mhi_chan *mhi_chan, 1271 enum mhi_cmd_type cmd) 1272 { 1273 struct mhi_ring_element *cmd_tre = NULL; 1274 struct mhi_cmd *mhi_cmd = &mhi_cntrl->mhi_cmd[PRIMARY_CMD_RING]; 1275 struct mhi_ring *ring = &mhi_cmd->ring; 1276 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1277 int chan = 0; 1278 1279 if (mhi_chan) 1280 chan = mhi_chan->chan; 1281 1282 spin_lock_bh(&mhi_cmd->lock); 1283 if (!get_nr_avail_ring_elements(mhi_cntrl, ring)) { 1284 spin_unlock_bh(&mhi_cmd->lock); 1285 return -ENOMEM; 1286 } 1287 1288 /* prepare the cmd tre */ 1289 cmd_tre = ring->wp; 1290 switch (cmd) { 1291 case MHI_CMD_RESET_CHAN: 1292 cmd_tre->ptr = MHI_TRE_CMD_RESET_PTR; 1293 cmd_tre->dword[0] = MHI_TRE_CMD_RESET_DWORD0; 1294 cmd_tre->dword[1] = MHI_TRE_CMD_RESET_DWORD1(chan); 1295 break; 1296 case MHI_CMD_STOP_CHAN: 1297 cmd_tre->ptr = MHI_TRE_CMD_STOP_PTR; 1298 cmd_tre->dword[0] = MHI_TRE_CMD_STOP_DWORD0; 1299 cmd_tre->dword[1] = MHI_TRE_CMD_STOP_DWORD1(chan); 1300 break; 1301 case MHI_CMD_START_CHAN: 1302 cmd_tre->ptr = MHI_TRE_CMD_START_PTR; 1303 cmd_tre->dword[0] = MHI_TRE_CMD_START_DWORD0; 1304 cmd_tre->dword[1] = MHI_TRE_CMD_START_DWORD1(chan); 1305 break; 1306 default: 1307 dev_err(dev, "Command not supported\n"); 1308 break; 1309 } 1310 1311 /* queue to hardware */ 1312 mhi_add_ring_element(mhi_cntrl, ring); 1313 read_lock_bh(&mhi_cntrl->pm_lock); 1314 if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) 1315 mhi_ring_cmd_db(mhi_cntrl, mhi_cmd); 1316 read_unlock_bh(&mhi_cntrl->pm_lock); 1317 spin_unlock_bh(&mhi_cmd->lock); 1318 1319 return 0; 1320 } 1321 1322 static int mhi_update_channel_state(struct mhi_controller *mhi_cntrl, 1323 struct mhi_chan *mhi_chan, 1324 enum mhi_ch_state_type to_state) 1325 { 1326 struct device *dev = &mhi_chan->mhi_dev->dev; 1327 enum mhi_cmd_type cmd = MHI_CMD_NOP; 1328 int ret; 1329 1330 dev_dbg(dev, "%d: Updating channel state to: %s\n", mhi_chan->chan, 1331 TO_CH_STATE_TYPE_STR(to_state)); 1332 1333 switch (to_state) { 1334 case MHI_CH_STATE_TYPE_RESET: 1335 write_lock_irq(&mhi_chan->lock); 1336 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && 1337 mhi_chan->ch_state != MHI_CH_STATE_ENABLED && 1338 mhi_chan->ch_state != MHI_CH_STATE_SUSPENDED) { 1339 write_unlock_irq(&mhi_chan->lock); 1340 return -EINVAL; 1341 } 1342 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 1343 write_unlock_irq(&mhi_chan->lock); 1344 1345 cmd = MHI_CMD_RESET_CHAN; 1346 break; 1347 case MHI_CH_STATE_TYPE_STOP: 1348 if (mhi_chan->ch_state != MHI_CH_STATE_ENABLED) 1349 return -EINVAL; 1350 1351 cmd = MHI_CMD_STOP_CHAN; 1352 break; 1353 case MHI_CH_STATE_TYPE_START: 1354 if (mhi_chan->ch_state != MHI_CH_STATE_STOP && 1355 mhi_chan->ch_state != MHI_CH_STATE_DISABLED) 1356 return -EINVAL; 1357 1358 cmd = MHI_CMD_START_CHAN; 1359 break; 1360 default: 1361 dev_err(dev, "%d: Channel state update to %s not allowed\n", 1362 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); 1363 return -EINVAL; 1364 } 1365 1366 /* bring host and device out of suspended states */ 1367 ret = mhi_device_get_sync(mhi_cntrl->mhi_dev); 1368 if (ret) 1369 return ret; 1370 mhi_cntrl->runtime_get(mhi_cntrl); 1371 1372 reinit_completion(&mhi_chan->completion); 1373 ret = mhi_send_cmd(mhi_cntrl, mhi_chan, cmd); 1374 if (ret) { 1375 dev_err(dev, "%d: Failed to send %s channel command\n", 1376 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); 1377 goto exit_channel_update; 1378 } 1379 1380 ret = wait_for_completion_timeout(&mhi_chan->completion, 1381 msecs_to_jiffies(mhi_cntrl->timeout_ms)); 1382 if (!ret || mhi_chan->ccs != MHI_EV_CC_SUCCESS) { 1383 dev_err(dev, 1384 "%d: Failed to receive %s channel command completion\n", 1385 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); 1386 ret = -EIO; 1387 goto exit_channel_update; 1388 } 1389 1390 ret = 0; 1391 1392 if (to_state != MHI_CH_STATE_TYPE_RESET) { 1393 write_lock_irq(&mhi_chan->lock); 1394 mhi_chan->ch_state = (to_state == MHI_CH_STATE_TYPE_START) ? 1395 MHI_CH_STATE_ENABLED : MHI_CH_STATE_STOP; 1396 write_unlock_irq(&mhi_chan->lock); 1397 } 1398 1399 dev_dbg(dev, "%d: Channel state change to %s successful\n", 1400 mhi_chan->chan, TO_CH_STATE_TYPE_STR(to_state)); 1401 1402 exit_channel_update: 1403 mhi_cntrl->runtime_put(mhi_cntrl); 1404 mhi_device_put(mhi_cntrl->mhi_dev); 1405 1406 return ret; 1407 } 1408 1409 static void mhi_unprepare_channel(struct mhi_controller *mhi_cntrl, 1410 struct mhi_chan *mhi_chan) 1411 { 1412 int ret; 1413 struct device *dev = &mhi_chan->mhi_dev->dev; 1414 1415 mutex_lock(&mhi_chan->mutex); 1416 1417 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { 1418 dev_dbg(dev, "Current EE: %s Required EE Mask: 0x%x\n", 1419 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); 1420 goto exit_unprepare_channel; 1421 } 1422 1423 /* no more processing events for this channel */ 1424 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, 1425 MHI_CH_STATE_TYPE_RESET); 1426 if (ret) 1427 dev_err(dev, "%d: Failed to reset channel, still resetting\n", 1428 mhi_chan->chan); 1429 1430 exit_unprepare_channel: 1431 write_lock_irq(&mhi_chan->lock); 1432 mhi_chan->ch_state = MHI_CH_STATE_DISABLED; 1433 write_unlock_irq(&mhi_chan->lock); 1434 1435 if (!mhi_chan->offload_ch) { 1436 mhi_reset_chan(mhi_cntrl, mhi_chan); 1437 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); 1438 } 1439 dev_dbg(dev, "%d: successfully reset\n", mhi_chan->chan); 1440 1441 mutex_unlock(&mhi_chan->mutex); 1442 } 1443 1444 int mhi_prepare_channel(struct mhi_controller *mhi_cntrl, 1445 struct mhi_chan *mhi_chan, unsigned int flags) 1446 { 1447 int ret = 0; 1448 struct device *dev = &mhi_chan->mhi_dev->dev; 1449 1450 if (!(BIT(mhi_cntrl->ee) & mhi_chan->ee_mask)) { 1451 dev_err(dev, "Current EE: %s Required EE Mask: 0x%x\n", 1452 TO_MHI_EXEC_STR(mhi_cntrl->ee), mhi_chan->ee_mask); 1453 return -ENOTCONN; 1454 } 1455 1456 mutex_lock(&mhi_chan->mutex); 1457 1458 /* Check of client manages channel context for offload channels */ 1459 if (!mhi_chan->offload_ch) { 1460 ret = mhi_init_chan_ctxt(mhi_cntrl, mhi_chan); 1461 if (ret) 1462 goto error_init_chan; 1463 } 1464 1465 ret = mhi_update_channel_state(mhi_cntrl, mhi_chan, 1466 MHI_CH_STATE_TYPE_START); 1467 if (ret) 1468 goto error_pm_state; 1469 1470 if (mhi_chan->dir == DMA_FROM_DEVICE) 1471 mhi_chan->pre_alloc = !!(flags & MHI_CH_INBOUND_ALLOC_BUFS); 1472 1473 /* Pre-allocate buffer for xfer ring */ 1474 if (mhi_chan->pre_alloc) { 1475 int nr_el = get_nr_avail_ring_elements(mhi_cntrl, 1476 &mhi_chan->tre_ring); 1477 size_t len = mhi_cntrl->buffer_len; 1478 1479 while (nr_el--) { 1480 void *buf; 1481 struct mhi_buf_info info = { }; 1482 1483 buf = kmalloc(len, GFP_KERNEL); 1484 if (!buf) { 1485 ret = -ENOMEM; 1486 goto error_pre_alloc; 1487 } 1488 1489 /* Prepare transfer descriptors */ 1490 info.v_addr = buf; 1491 info.cb_buf = buf; 1492 info.len = len; 1493 ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &info, MHI_EOT); 1494 if (ret) { 1495 kfree(buf); 1496 goto error_pre_alloc; 1497 } 1498 } 1499 1500 read_lock_bh(&mhi_cntrl->pm_lock); 1501 if (MHI_DB_ACCESS_VALID(mhi_cntrl)) { 1502 read_lock_irq(&mhi_chan->lock); 1503 mhi_ring_chan_db(mhi_cntrl, mhi_chan); 1504 read_unlock_irq(&mhi_chan->lock); 1505 } 1506 read_unlock_bh(&mhi_cntrl->pm_lock); 1507 } 1508 1509 mutex_unlock(&mhi_chan->mutex); 1510 1511 return 0; 1512 1513 error_pm_state: 1514 if (!mhi_chan->offload_ch) 1515 mhi_deinit_chan_ctxt(mhi_cntrl, mhi_chan); 1516 1517 error_init_chan: 1518 mutex_unlock(&mhi_chan->mutex); 1519 1520 return ret; 1521 1522 error_pre_alloc: 1523 mutex_unlock(&mhi_chan->mutex); 1524 mhi_unprepare_channel(mhi_cntrl, mhi_chan); 1525 1526 return ret; 1527 } 1528 1529 static void mhi_mark_stale_events(struct mhi_controller *mhi_cntrl, 1530 struct mhi_event *mhi_event, 1531 struct mhi_event_ctxt *er_ctxt, 1532 int chan) 1533 1534 { 1535 struct mhi_ring_element *dev_rp, *local_rp; 1536 struct mhi_ring *ev_ring; 1537 struct device *dev = &mhi_cntrl->mhi_dev->dev; 1538 unsigned long flags; 1539 dma_addr_t ptr; 1540 1541 dev_dbg(dev, "Marking all events for chan: %d as stale\n", chan); 1542 1543 ev_ring = &mhi_event->ring; 1544 1545 /* mark all stale events related to channel as STALE event */ 1546 spin_lock_irqsave(&mhi_event->lock, flags); 1547 1548 ptr = le64_to_cpu(er_ctxt->rp); 1549 if (!is_valid_ring_ptr(ev_ring, ptr)) { 1550 dev_err(&mhi_cntrl->mhi_dev->dev, 1551 "Event ring rp points outside of the event ring\n"); 1552 dev_rp = ev_ring->rp; 1553 } else { 1554 dev_rp = mhi_to_virtual(ev_ring, ptr); 1555 } 1556 1557 local_rp = ev_ring->rp; 1558 while (dev_rp != local_rp) { 1559 if (MHI_TRE_GET_EV_TYPE(local_rp) == MHI_PKT_TYPE_TX_EVENT && 1560 chan == MHI_TRE_GET_EV_CHID(local_rp)) 1561 local_rp->dword[1] = MHI_TRE_EV_DWORD1(chan, 1562 MHI_PKT_TYPE_STALE_EVENT); 1563 local_rp++; 1564 if (local_rp == (ev_ring->base + ev_ring->len)) 1565 local_rp = ev_ring->base; 1566 } 1567 1568 dev_dbg(dev, "Finished marking events as stale events\n"); 1569 spin_unlock_irqrestore(&mhi_event->lock, flags); 1570 } 1571 1572 static void mhi_reset_data_chan(struct mhi_controller *mhi_cntrl, 1573 struct mhi_chan *mhi_chan) 1574 { 1575 struct mhi_ring *buf_ring, *tre_ring; 1576 struct mhi_result result; 1577 1578 /* Reset any pending buffers */ 1579 buf_ring = &mhi_chan->buf_ring; 1580 tre_ring = &mhi_chan->tre_ring; 1581 result.transaction_status = -ENOTCONN; 1582 result.bytes_xferd = 0; 1583 while (tre_ring->rp != tre_ring->wp) { 1584 struct mhi_buf_info *buf_info = buf_ring->rp; 1585 1586 if (mhi_chan->dir == DMA_TO_DEVICE) { 1587 atomic_dec(&mhi_cntrl->pending_pkts); 1588 /* Release the reference got from mhi_queue() */ 1589 mhi_cntrl->runtime_put(mhi_cntrl); 1590 } 1591 1592 if (!buf_info->pre_mapped) 1593 mhi_cntrl->unmap_single(mhi_cntrl, buf_info); 1594 1595 mhi_del_ring_element(mhi_cntrl, buf_ring); 1596 mhi_del_ring_element(mhi_cntrl, tre_ring); 1597 1598 if (mhi_chan->pre_alloc) { 1599 kfree(buf_info->cb_buf); 1600 } else { 1601 result.buf_addr = buf_info->cb_buf; 1602 mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); 1603 } 1604 } 1605 } 1606 1607 void mhi_reset_chan(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan) 1608 { 1609 struct mhi_event *mhi_event; 1610 struct mhi_event_ctxt *er_ctxt; 1611 int chan = mhi_chan->chan; 1612 1613 /* Nothing to reset, client doesn't queue buffers */ 1614 if (mhi_chan->offload_ch) 1615 return; 1616 1617 read_lock_bh(&mhi_cntrl->pm_lock); 1618 mhi_event = &mhi_cntrl->mhi_event[mhi_chan->er_index]; 1619 er_ctxt = &mhi_cntrl->mhi_ctxt->er_ctxt[mhi_chan->er_index]; 1620 1621 mhi_mark_stale_events(mhi_cntrl, mhi_event, er_ctxt, chan); 1622 1623 mhi_reset_data_chan(mhi_cntrl, mhi_chan); 1624 1625 read_unlock_bh(&mhi_cntrl->pm_lock); 1626 } 1627 1628 static int __mhi_prepare_for_transfer(struct mhi_device *mhi_dev, unsigned int flags) 1629 { 1630 int ret, dir; 1631 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1632 struct mhi_chan *mhi_chan; 1633 1634 for (dir = 0; dir < 2; dir++) { 1635 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; 1636 if (!mhi_chan) 1637 continue; 1638 1639 ret = mhi_prepare_channel(mhi_cntrl, mhi_chan, flags); 1640 if (ret) 1641 goto error_open_chan; 1642 } 1643 1644 return 0; 1645 1646 error_open_chan: 1647 for (--dir; dir >= 0; dir--) { 1648 mhi_chan = dir ? mhi_dev->dl_chan : mhi_dev->ul_chan; 1649 if (!mhi_chan) 1650 continue; 1651 1652 mhi_unprepare_channel(mhi_cntrl, mhi_chan); 1653 } 1654 1655 return ret; 1656 } 1657 1658 int mhi_prepare_for_transfer(struct mhi_device *mhi_dev) 1659 { 1660 return __mhi_prepare_for_transfer(mhi_dev, 0); 1661 } 1662 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer); 1663 1664 int mhi_prepare_for_transfer_autoqueue(struct mhi_device *mhi_dev) 1665 { 1666 return __mhi_prepare_for_transfer(mhi_dev, MHI_CH_INBOUND_ALLOC_BUFS); 1667 } 1668 EXPORT_SYMBOL_GPL(mhi_prepare_for_transfer_autoqueue); 1669 1670 void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev) 1671 { 1672 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; 1673 struct mhi_chan *mhi_chan; 1674 int dir; 1675 1676 for (dir = 0; dir < 2; dir++) { 1677 mhi_chan = dir ? mhi_dev->ul_chan : mhi_dev->dl_chan; 1678 if (!mhi_chan) 1679 continue; 1680 1681 mhi_unprepare_channel(mhi_cntrl, mhi_chan); 1682 } 1683 } 1684 EXPORT_SYMBOL_GPL(mhi_unprepare_from_transfer); 1685