1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2011-2017, The Linux Foundation 4 */ 5 6 #include <linux/irq.h> 7 #include <linux/kernel.h> 8 #include <linux/init.h> 9 #include <linux/slab.h> 10 #include <linux/io.h> 11 #include <linux/interrupt.h> 12 #include <linux/platform_device.h> 13 #include <linux/delay.h> 14 #include <linux/clk.h> 15 #include <linux/of.h> 16 #include <linux/pm_runtime.h> 17 #include "slimbus.h" 18 19 /* Manager registers */ 20 #define MGR_CFG 0x200 21 #define MGR_STATUS 0x204 22 #define MGR_INT_EN 0x210 23 #define MGR_INT_STAT 0x214 24 #define MGR_INT_CLR 0x218 25 #define MGR_TX_MSG 0x230 26 #define MGR_RX_MSG 0x270 27 #define MGR_IE_STAT 0x2F0 28 #define MGR_VE_STAT 0x300 29 #define MGR_CFG_ENABLE 1 30 31 /* Framer registers */ 32 #define FRM_CFG 0x400 33 #define FRM_STAT 0x404 34 #define FRM_INT_EN 0x410 35 #define FRM_INT_STAT 0x414 36 #define FRM_INT_CLR 0x418 37 #define FRM_WAKEUP 0x41C 38 #define FRM_CLKCTL_DONE 0x420 39 #define FRM_IE_STAT 0x430 40 #define FRM_VE_STAT 0x440 41 42 /* Interface registers */ 43 #define INTF_CFG 0x600 44 #define INTF_STAT 0x604 45 #define INTF_INT_EN 0x610 46 #define INTF_INT_STAT 0x614 47 #define INTF_INT_CLR 0x618 48 #define INTF_IE_STAT 0x630 49 #define INTF_VE_STAT 0x640 50 51 /* Interrupt status bits */ 52 #define MGR_INT_TX_NACKED_2 BIT(25) 53 #define MGR_INT_MSG_BUF_CONTE BIT(26) 54 #define MGR_INT_RX_MSG_RCVD BIT(30) 55 #define MGR_INT_TX_MSG_SENT BIT(31) 56 57 /* Framer config register settings */ 58 #define FRM_ACTIVE 1 59 #define CLK_GEAR 7 60 #define ROOT_FREQ 11 61 #define REF_CLK_GEAR 15 62 #define INTR_WAKE 19 63 64 #define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \ 65 ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16)) 66 67 #define SLIM_ROOT_FREQ 24576000 68 #define QCOM_SLIM_AUTOSUSPEND 1000 69 70 /* MAX message size over control channel */ 71 #define SLIM_MSGQ_BUF_LEN 40 72 #define QCOM_TX_MSGS 2 73 #define QCOM_RX_MSGS 8 74 #define QCOM_BUF_ALLOC_RETRIES 10 75 76 #define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r)) 77 78 /* V2 Component registers */ 79 #define CFG_PORT_V2(r) ((r ## _V2)) 80 #define COMP_CFG_V2 4 81 #define COMP_TRUST_CFG_V2 0x3000 82 83 /* V1 Component registers */ 84 #define CFG_PORT_V1(r) ((r ## _V1)) 85 #define COMP_CFG_V1 0 86 #define COMP_TRUST_CFG_V1 0x14 87 88 /* Resource group info for manager, and non-ported generic device-components */ 89 #define EE_MGR_RSC_GRP (1 << 10) 90 #define EE_NGD_2 (2 << 6) 91 #define EE_NGD_1 0 92 93 struct slim_ctrl_buf { 94 void *base; 95 spinlock_t lock; 96 int head; 97 int tail; 98 int sl_sz; 99 int n; 100 }; 101 102 struct qcom_slim_ctrl { 103 struct slim_controller ctrl; 104 struct slim_framer framer; 105 struct device *dev; 106 void __iomem *base; 107 void __iomem *slew_reg; 108 109 struct slim_ctrl_buf rx; 110 struct slim_ctrl_buf tx; 111 112 struct completion **wr_comp; 113 int irq; 114 struct workqueue_struct *rxwq; 115 struct work_struct wd; 116 struct clk *rclk; 117 struct clk *hclk; 118 }; 119 120 static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf, 121 u8 len, u32 tx_reg) 122 { 123 int count = (len + 3) >> 2; 124 125 __iowrite32_copy(ctrl->base + tx_reg, buf, count); 126 127 /* Ensure Oder of subsequent writes */ 128 mb(); 129 } 130 131 static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl) 132 { 133 unsigned long flags; 134 int idx; 135 136 spin_lock_irqsave(&ctrl->rx.lock, flags); 137 if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) { 138 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 139 dev_err(ctrl->dev, "RX QUEUE full!"); 140 return NULL; 141 } 142 idx = ctrl->rx.tail; 143 ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n; 144 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 145 146 return ctrl->rx.base + (idx * ctrl->rx.sl_sz); 147 } 148 149 static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err) 150 { 151 struct completion *comp; 152 unsigned long flags; 153 int idx; 154 155 spin_lock_irqsave(&ctrl->tx.lock, flags); 156 idx = ctrl->tx.head; 157 ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n; 158 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 159 160 comp = ctrl->wr_comp[idx]; 161 ctrl->wr_comp[idx] = NULL; 162 163 complete(comp); 164 } 165 166 static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl, 167 u32 stat) 168 { 169 int err = 0; 170 171 if (stat & MGR_INT_TX_MSG_SENT) 172 writel_relaxed(MGR_INT_TX_MSG_SENT, 173 ctrl->base + MGR_INT_CLR); 174 175 if (stat & MGR_INT_TX_NACKED_2) { 176 u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS); 177 u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT); 178 u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT); 179 u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG); 180 u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT); 181 u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT); 182 u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT); 183 u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT); 184 u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT); 185 186 writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR); 187 188 dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n", 189 stat, mgr_stat); 190 dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat); 191 dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n", 192 frm_intr_stat, frm_stat); 193 dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n", 194 frm_cfg, frm_ie_stat); 195 dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n", 196 intf_intr_stat, intf_stat); 197 dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n", 198 intf_ie_stat); 199 err = -ENOTCONN; 200 } 201 202 slim_ack_txn(ctrl, err); 203 204 return IRQ_HANDLED; 205 } 206 207 static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl, 208 u32 stat) 209 { 210 u32 *rx_buf, pkt[10]; 211 bool q_rx = false; 212 u8 mc, mt, len; 213 214 pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG); 215 mt = SLIM_HEADER_GET_MT(pkt[0]); 216 len = SLIM_HEADER_GET_RL(pkt[0]); 217 mc = SLIM_HEADER_GET_MC(pkt[0]>>8); 218 219 /* 220 * this message cannot be handled by ISR, so 221 * let work-queue handle it 222 */ 223 if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) { 224 rx_buf = (u32 *)slim_alloc_rxbuf(ctrl); 225 if (!rx_buf) { 226 dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n", 227 pkt[0]); 228 goto rx_ret_irq; 229 } 230 rx_buf[0] = pkt[0]; 231 232 } else { 233 rx_buf = pkt; 234 } 235 236 __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4, 237 DIV_ROUND_UP(len, 4)); 238 239 switch (mc) { 240 241 case SLIM_MSG_MC_REPORT_PRESENT: 242 q_rx = true; 243 break; 244 case SLIM_MSG_MC_REPLY_INFORMATION: 245 case SLIM_MSG_MC_REPLY_VALUE: 246 slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1), 247 (u8)(*rx_buf >> 24), (len - 4)); 248 break; 249 default: 250 dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n", 251 mc, mt); 252 break; 253 } 254 rx_ret_irq: 255 writel(MGR_INT_RX_MSG_RCVD, ctrl->base + 256 MGR_INT_CLR); 257 if (q_rx) 258 queue_work(ctrl->rxwq, &ctrl->wd); 259 260 return IRQ_HANDLED; 261 } 262 263 static irqreturn_t qcom_slim_interrupt(int irq, void *d) 264 { 265 struct qcom_slim_ctrl *ctrl = d; 266 u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT); 267 int ret = IRQ_NONE; 268 269 if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2) 270 ret = qcom_slim_handle_tx_irq(ctrl, stat); 271 272 if (stat & MGR_INT_RX_MSG_RCVD) 273 ret = qcom_slim_handle_rx_irq(ctrl, stat); 274 275 return ret; 276 } 277 278 static int qcom_clk_pause_wakeup(struct slim_controller *sctrl) 279 { 280 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 281 282 clk_prepare_enable(ctrl->hclk); 283 clk_prepare_enable(ctrl->rclk); 284 enable_irq(ctrl->irq); 285 286 writel_relaxed(1, ctrl->base + FRM_WAKEUP); 287 /* Make sure framer wakeup write goes through before ISR fires */ 288 mb(); 289 /* 290 * HW Workaround: Currently, slave is reporting lost-sync messages 291 * after SLIMbus comes out of clock pause. 292 * Transaction with slave fail before slave reports that message 293 * Give some time for that report to come 294 * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe 295 * being 250 usecs, we wait for 5-10 superframes here to ensure 296 * we get the message 297 */ 298 usleep_range(1250, 2500); 299 return 0; 300 } 301 302 static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl, 303 struct slim_msg_txn *txn, 304 struct completion *done) 305 { 306 unsigned long flags; 307 int idx; 308 309 spin_lock_irqsave(&ctrl->tx.lock, flags); 310 if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) { 311 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 312 dev_err(ctrl->dev, "controller TX buf unavailable"); 313 return NULL; 314 } 315 idx = ctrl->tx.tail; 316 ctrl->wr_comp[idx] = done; 317 ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n; 318 319 spin_unlock_irqrestore(&ctrl->tx.lock, flags); 320 321 return ctrl->tx.base + (idx * ctrl->tx.sl_sz); 322 } 323 324 325 static int qcom_xfer_msg(struct slim_controller *sctrl, 326 struct slim_msg_txn *txn) 327 { 328 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 329 DECLARE_COMPLETION_ONSTACK(done); 330 void *pbuf = slim_alloc_txbuf(ctrl, txn, &done); 331 unsigned long ms = txn->rl + HZ; 332 u8 *puc; 333 int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES; 334 unsigned long time_left; 335 u8 la = txn->la; 336 u32 *head; 337 /* HW expects length field to be excluded */ 338 txn->rl--; 339 340 /* spin till buffer is made available */ 341 if (!pbuf) { 342 while (retries--) { 343 usleep_range(10000, 15000); 344 pbuf = slim_alloc_txbuf(ctrl, txn, &done); 345 if (pbuf) 346 break; 347 } 348 } 349 350 if (retries < 0 && !pbuf) 351 return -ENOMEM; 352 353 puc = (u8 *)pbuf; 354 head = (u32 *)pbuf; 355 356 if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) { 357 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 358 txn->mc, 0, la); 359 puc += 3; 360 } else { 361 *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt, 362 txn->mc, 1, la); 363 puc += 2; 364 } 365 366 if (slim_tid_txn(txn->mt, txn->mc)) 367 *(puc++) = txn->tid; 368 369 if (slim_ec_txn(txn->mt, txn->mc)) { 370 *(puc++) = (txn->ec & 0xFF); 371 *(puc++) = (txn->ec >> 8) & 0xFF; 372 } 373 374 if (txn->msg && txn->msg->wbuf) 375 memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes); 376 377 qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG); 378 time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms)); 379 380 if (!time_left) { 381 dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc, 382 txn->mt); 383 ret = -ETIMEDOUT; 384 } 385 386 return ret; 387 388 } 389 390 static int qcom_set_laddr(struct slim_controller *sctrl, 391 struct slim_eaddr *ead, u8 laddr) 392 { 393 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev); 394 struct { 395 __be16 manf_id; 396 __be16 prod_code; 397 u8 dev_index; 398 u8 instance; 399 u8 laddr; 400 } __packed p; 401 struct slim_val_inf msg = {0}; 402 DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS, 403 10, laddr, &msg); 404 int ret; 405 406 p.manf_id = cpu_to_be16(ead->manf_id); 407 p.prod_code = cpu_to_be16(ead->prod_code); 408 p.dev_index = ead->dev_index; 409 p.instance = ead->instance; 410 p.laddr = laddr; 411 412 msg.wbuf = (void *)&p; 413 msg.num_bytes = 7; 414 ret = slim_do_transfer(&ctrl->ctrl, &txn); 415 416 if (ret) 417 dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n", 418 laddr, ret); 419 return ret; 420 } 421 422 static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf) 423 { 424 unsigned long flags; 425 426 spin_lock_irqsave(&ctrl->rx.lock, flags); 427 if (ctrl->rx.tail == ctrl->rx.head) { 428 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 429 return -ENODATA; 430 } 431 memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz), 432 ctrl->rx.sl_sz); 433 434 ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n; 435 spin_unlock_irqrestore(&ctrl->rx.lock, flags); 436 437 return 0; 438 } 439 440 static void qcom_slim_rxwq(struct work_struct *work) 441 { 442 u8 buf[SLIM_MSGQ_BUF_LEN]; 443 u8 mc, mt; 444 int ret; 445 struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl, 446 wd); 447 448 while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) { 449 mt = SLIM_HEADER_GET_MT(buf[0]); 450 mc = SLIM_HEADER_GET_MC(buf[1]); 451 if (mt == SLIM_MSG_MT_CORE && 452 mc == SLIM_MSG_MC_REPORT_PRESENT) { 453 struct slim_eaddr ea; 454 u8 laddr; 455 456 ea.manf_id = be16_to_cpup((__be16 *)&buf[2]); 457 ea.prod_code = be16_to_cpup((__be16 *)&buf[4]); 458 ea.dev_index = buf[6]; 459 ea.instance = buf[7]; 460 461 ret = slim_device_report_present(&ctrl->ctrl, &ea, 462 &laddr); 463 if (ret < 0) 464 dev_err(ctrl->dev, "assign laddr failed:%d\n", 465 ret); 466 } else { 467 dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n", 468 mc, mt); 469 } 470 } 471 } 472 473 static void qcom_slim_prg_slew(struct platform_device *pdev, 474 struct qcom_slim_ctrl *ctrl) 475 { 476 if (!ctrl->slew_reg) { 477 /* SLEW RATE register for this SLIMbus */ 478 ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew"); 479 if (IS_ERR(ctrl->slew_reg)) 480 return; 481 } 482 483 writel_relaxed(1, ctrl->slew_reg); 484 /* Make sure SLIMbus-slew rate enabling goes through */ 485 wmb(); 486 } 487 488 static int qcom_slim_probe(struct platform_device *pdev) 489 { 490 struct qcom_slim_ctrl *ctrl; 491 struct slim_controller *sctrl; 492 int ret, ver; 493 494 ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL); 495 if (!ctrl) 496 return -ENOMEM; 497 498 ctrl->hclk = devm_clk_get(&pdev->dev, "iface"); 499 if (IS_ERR(ctrl->hclk)) 500 return PTR_ERR(ctrl->hclk); 501 502 ctrl->rclk = devm_clk_get(&pdev->dev, "core"); 503 if (IS_ERR(ctrl->rclk)) 504 return PTR_ERR(ctrl->rclk); 505 506 ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ); 507 if (ret) { 508 dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret); 509 return ret; 510 } 511 512 ctrl->irq = platform_get_irq(pdev, 0); 513 if (ctrl->irq < 0) 514 return ctrl->irq; 515 516 sctrl = &ctrl->ctrl; 517 sctrl->dev = &pdev->dev; 518 ctrl->dev = &pdev->dev; 519 platform_set_drvdata(pdev, ctrl); 520 dev_set_drvdata(ctrl->dev, ctrl); 521 522 ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl"); 523 if (IS_ERR(ctrl->base)) 524 return PTR_ERR(ctrl->base); 525 526 sctrl->set_laddr = qcom_set_laddr; 527 sctrl->xfer_msg = qcom_xfer_msg; 528 sctrl->wakeup = qcom_clk_pause_wakeup; 529 ctrl->tx.n = QCOM_TX_MSGS; 530 ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN; 531 ctrl->rx.n = QCOM_RX_MSGS; 532 ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN; 533 ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *), 534 GFP_KERNEL); 535 if (!ctrl->wr_comp) 536 return -ENOMEM; 537 538 spin_lock_init(&ctrl->rx.lock); 539 spin_lock_init(&ctrl->tx.lock); 540 INIT_WORK(&ctrl->wd, qcom_slim_rxwq); 541 ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx"); 542 if (!ctrl->rxwq) { 543 dev_err(ctrl->dev, "Failed to start Rx WQ\n"); 544 return -ENOMEM; 545 } 546 547 ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8; 548 ctrl->framer.superfreq = 549 ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8; 550 sctrl->a_framer = &ctrl->framer; 551 sctrl->clkgear = SLIM_MAX_CLK_GEAR; 552 553 qcom_slim_prg_slew(pdev, ctrl); 554 555 ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt, 556 IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl); 557 if (ret) { 558 dev_err(&pdev->dev, "request IRQ failed\n"); 559 goto err_request_irq_failed; 560 } 561 562 ret = clk_prepare_enable(ctrl->hclk); 563 if (ret) 564 goto err_hclk_enable_failed; 565 566 ret = clk_prepare_enable(ctrl->rclk); 567 if (ret) 568 goto err_rclk_enable_failed; 569 570 ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz, 571 GFP_KERNEL); 572 if (!ctrl->tx.base) { 573 ret = -ENOMEM; 574 goto err; 575 } 576 577 ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz, 578 GFP_KERNEL); 579 if (!ctrl->rx.base) { 580 ret = -ENOMEM; 581 goto err; 582 } 583 584 /* Register with framework before enabling frame, clock */ 585 ret = slim_register_controller(&ctrl->ctrl); 586 if (ret) { 587 dev_err(ctrl->dev, "error adding controller\n"); 588 goto err; 589 } 590 591 ver = readl_relaxed(ctrl->base); 592 /* Version info in 16 MSbits */ 593 ver >>= 16; 594 /* Component register initialization */ 595 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 596 writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1), 597 ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver)); 598 599 writel((MGR_INT_TX_NACKED_2 | 600 MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD | 601 MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN); 602 writel(1, ctrl->base + MGR_CFG); 603 /* Framer register initialization */ 604 writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) | 605 (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1, 606 ctrl->base + FRM_CFG); 607 writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG); 608 writel(1, ctrl->base + INTF_CFG); 609 writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver)); 610 611 pm_runtime_use_autosuspend(&pdev->dev); 612 pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND); 613 pm_runtime_set_active(&pdev->dev); 614 pm_runtime_mark_last_busy(&pdev->dev); 615 pm_runtime_enable(&pdev->dev); 616 617 dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver); 618 return 0; 619 620 err: 621 clk_disable_unprepare(ctrl->rclk); 622 err_rclk_enable_failed: 623 clk_disable_unprepare(ctrl->hclk); 624 err_hclk_enable_failed: 625 err_request_irq_failed: 626 destroy_workqueue(ctrl->rxwq); 627 return ret; 628 } 629 630 static void qcom_slim_remove(struct platform_device *pdev) 631 { 632 struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev); 633 634 pm_runtime_disable(&pdev->dev); 635 slim_unregister_controller(&ctrl->ctrl); 636 clk_disable_unprepare(ctrl->rclk); 637 clk_disable_unprepare(ctrl->hclk); 638 destroy_workqueue(ctrl->rxwq); 639 } 640 641 /* 642 * If PM_RUNTIME is not defined, these 2 functions become helper 643 * functions to be called from system suspend/resume. 644 */ 645 #ifdef CONFIG_PM 646 static int qcom_slim_runtime_suspend(struct device *device) 647 { 648 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 649 int ret; 650 651 dev_dbg(device, "pm_runtime: suspending...\n"); 652 ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED); 653 if (ret) { 654 dev_err(device, "clk pause not entered:%d", ret); 655 } else { 656 disable_irq(ctrl->irq); 657 clk_disable_unprepare(ctrl->hclk); 658 clk_disable_unprepare(ctrl->rclk); 659 } 660 return ret; 661 } 662 663 static int qcom_slim_runtime_resume(struct device *device) 664 { 665 struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device); 666 int ret = 0; 667 668 dev_dbg(device, "pm_runtime: resuming...\n"); 669 ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0); 670 if (ret) 671 dev_err(device, "clk pause not exited:%d", ret); 672 return ret; 673 } 674 #endif 675 676 #ifdef CONFIG_PM_SLEEP 677 static int qcom_slim_suspend(struct device *dev) 678 { 679 int ret = 0; 680 681 if (!pm_runtime_enabled(dev) || 682 (!pm_runtime_suspended(dev))) { 683 dev_dbg(dev, "system suspend"); 684 ret = qcom_slim_runtime_suspend(dev); 685 } 686 687 return ret; 688 } 689 690 static int qcom_slim_resume(struct device *dev) 691 { 692 if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) { 693 int ret; 694 695 dev_dbg(dev, "system resume"); 696 ret = qcom_slim_runtime_resume(dev); 697 if (!ret) { 698 pm_runtime_mark_last_busy(dev); 699 pm_request_autosuspend(dev); 700 } 701 return ret; 702 703 } 704 return 0; 705 } 706 #endif /* CONFIG_PM_SLEEP */ 707 708 static const struct dev_pm_ops qcom_slim_dev_pm_ops = { 709 SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume) 710 SET_RUNTIME_PM_OPS( 711 qcom_slim_runtime_suspend, 712 qcom_slim_runtime_resume, 713 NULL 714 ) 715 }; 716 717 static const struct of_device_id qcom_slim_dt_match[] = { 718 { .compatible = "qcom,slim", }, 719 {} 720 }; 721 MODULE_DEVICE_TABLE(of, qcom_slim_dt_match); 722 723 static struct platform_driver qcom_slim_driver = { 724 .probe = qcom_slim_probe, 725 .remove_new = qcom_slim_remove, 726 .driver = { 727 .name = "qcom_slim_ctrl", 728 .of_match_table = qcom_slim_dt_match, 729 .pm = &qcom_slim_dev_pm_ops, 730 }, 731 }; 732 module_platform_driver(qcom_slim_driver); 733 734 MODULE_LICENSE("GPL v2"); 735 MODULE_DESCRIPTION("Qualcomm SLIMbus Controller"); 736