1 // SPDX-License-Identifier: GPL-2.0-only 2 /* OMAP SSI port driver. 3 * 4 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 5 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> 6 * 7 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 8 */ 9 10 #include <linux/mod_devicetable.h> 11 #include <linux/platform_device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/delay.h> 15 16 #include <linux/gpio/consumer.h> 17 #include <linux/pinctrl/consumer.h> 18 #include <linux/debugfs.h> 19 20 #include "omap_ssi_regs.h" 21 #include "omap_ssi.h" 22 23 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) 24 { 25 return 0; 26 } 27 28 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) 29 { 30 return 0; 31 } 32 33 static inline unsigned int ssi_wakein(struct hsi_port *port) 34 { 35 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 36 return gpiod_get_value(omap_port->wake_gpio); 37 } 38 39 #ifdef CONFIG_DEBUG_FS 40 static void ssi_debug_remove_port(struct hsi_port *port) 41 { 42 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 43 44 debugfs_remove_recursive(omap_port->dir); 45 } 46 47 static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused) 48 { 49 struct hsi_port *port = m->private; 50 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 51 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 52 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 53 void __iomem *base = omap_ssi->sys; 54 unsigned int ch; 55 56 pm_runtime_get_sync(omap_port->pdev); 57 if (omap_port->wake_irq > 0) 58 seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); 59 seq_printf(m, "WAKE\t\t: 0x%08x\n", 60 readl(base + SSI_WAKE_REG(port->num))); 61 seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, 62 readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); 63 seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, 64 readl(base + SSI_MPU_STATUS_REG(port->num, 0))); 65 /* SST */ 66 base = omap_port->sst_base; 67 seq_puts(m, "\nSST\n===\n"); 68 seq_printf(m, "ID SST\t\t: 0x%08x\n", 69 readl(base + SSI_SST_ID_REG)); 70 seq_printf(m, "MODE\t\t: 0x%08x\n", 71 readl(base + SSI_SST_MODE_REG)); 72 seq_printf(m, "FRAMESIZE\t: 0x%08x\n", 73 readl(base + SSI_SST_FRAMESIZE_REG)); 74 seq_printf(m, "DIVISOR\t\t: 0x%08x\n", 75 readl(base + SSI_SST_DIVISOR_REG)); 76 seq_printf(m, "CHANNELS\t: 0x%08x\n", 77 readl(base + SSI_SST_CHANNELS_REG)); 78 seq_printf(m, "ARBMODE\t\t: 0x%08x\n", 79 readl(base + SSI_SST_ARBMODE_REG)); 80 seq_printf(m, "TXSTATE\t\t: 0x%08x\n", 81 readl(base + SSI_SST_TXSTATE_REG)); 82 seq_printf(m, "BUFSTATE\t: 0x%08x\n", 83 readl(base + SSI_SST_BUFSTATE_REG)); 84 seq_printf(m, "BREAK\t\t: 0x%08x\n", 85 readl(base + SSI_SST_BREAK_REG)); 86 for (ch = 0; ch < omap_port->channels; ch++) { 87 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, 88 readl(base + SSI_SST_BUFFER_CH_REG(ch))); 89 } 90 /* SSR */ 91 base = omap_port->ssr_base; 92 seq_puts(m, "\nSSR\n===\n"); 93 seq_printf(m, "ID SSR\t\t: 0x%08x\n", 94 readl(base + SSI_SSR_ID_REG)); 95 seq_printf(m, "MODE\t\t: 0x%08x\n", 96 readl(base + SSI_SSR_MODE_REG)); 97 seq_printf(m, "FRAMESIZE\t: 0x%08x\n", 98 readl(base + SSI_SSR_FRAMESIZE_REG)); 99 seq_printf(m, "CHANNELS\t: 0x%08x\n", 100 readl(base + SSI_SSR_CHANNELS_REG)); 101 seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", 102 readl(base + SSI_SSR_TIMEOUT_REG)); 103 seq_printf(m, "RXSTATE\t\t: 0x%08x\n", 104 readl(base + SSI_SSR_RXSTATE_REG)); 105 seq_printf(m, "BUFSTATE\t: 0x%08x\n", 106 readl(base + SSI_SSR_BUFSTATE_REG)); 107 seq_printf(m, "BREAK\t\t: 0x%08x\n", 108 readl(base + SSI_SSR_BREAK_REG)); 109 seq_printf(m, "ERROR\t\t: 0x%08x\n", 110 readl(base + SSI_SSR_ERROR_REG)); 111 seq_printf(m, "ERRORACK\t: 0x%08x\n", 112 readl(base + SSI_SSR_ERRORACK_REG)); 113 for (ch = 0; ch < omap_port->channels; ch++) { 114 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, 115 readl(base + SSI_SSR_BUFFER_CH_REG(ch))); 116 } 117 pm_runtime_put_autosuspend(omap_port->pdev); 118 119 return 0; 120 } 121 122 DEFINE_SHOW_ATTRIBUTE(ssi_port_regs); 123 124 static int ssi_div_get(void *data, u64 *val) 125 { 126 struct hsi_port *port = data; 127 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 128 129 pm_runtime_get_sync(omap_port->pdev); 130 *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); 131 pm_runtime_put_autosuspend(omap_port->pdev); 132 133 return 0; 134 } 135 136 static int ssi_div_set(void *data, u64 val) 137 { 138 struct hsi_port *port = data; 139 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 140 141 if (val > 127) 142 return -EINVAL; 143 144 pm_runtime_get_sync(omap_port->pdev); 145 writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); 146 omap_port->sst.divisor = val; 147 pm_runtime_put_autosuspend(omap_port->pdev); 148 149 return 0; 150 } 151 152 DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); 153 154 static void ssi_debug_add_port(struct omap_ssi_port *omap_port, 155 struct dentry *dir) 156 { 157 struct hsi_port *port = to_hsi_port(omap_port->dev); 158 159 dir = debugfs_create_dir(dev_name(omap_port->dev), dir); 160 omap_port->dir = dir; 161 debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); 162 dir = debugfs_create_dir("sst", dir); 163 debugfs_create_file_unsafe("divisor", 0644, dir, port, 164 &ssi_sst_div_fops); 165 } 166 #endif 167 168 static void ssi_process_errqueue(struct work_struct *work) 169 { 170 struct omap_ssi_port *omap_port; 171 struct list_head *head, *tmp; 172 struct hsi_msg *msg; 173 174 omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); 175 176 list_for_each_safe(head, tmp, &omap_port->errqueue) { 177 msg = list_entry(head, struct hsi_msg, link); 178 msg->complete(msg); 179 list_del(head); 180 } 181 } 182 183 static int ssi_claim_lch(struct hsi_msg *msg) 184 { 185 186 struct hsi_port *port = hsi_get_port(msg->cl); 187 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 188 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 189 int lch; 190 191 for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) 192 if (!omap_ssi->gdd_trn[lch].msg) { 193 omap_ssi->gdd_trn[lch].msg = msg; 194 omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; 195 return lch; 196 } 197 198 return -EBUSY; 199 } 200 201 static int ssi_start_dma(struct hsi_msg *msg, int lch) 202 { 203 struct hsi_port *port = hsi_get_port(msg->cl); 204 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 205 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 206 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 207 void __iomem *gdd = omap_ssi->gdd; 208 int err; 209 u16 csdp; 210 u16 ccr; 211 u32 s_addr; 212 u32 d_addr; 213 u32 tmp; 214 215 /* Hold clocks during the transfer */ 216 pm_runtime_get(omap_port->pdev); 217 218 if (!pm_runtime_active(omap_port->pdev)) { 219 dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n"); 220 pm_runtime_put_autosuspend(omap_port->pdev); 221 return -EREMOTEIO; 222 } 223 224 if (msg->ttype == HSI_MSG_READ) { 225 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, 226 DMA_FROM_DEVICE); 227 if (!err) { 228 dev_dbg(&ssi->device, "DMA map SG failed !\n"); 229 pm_runtime_put_autosuspend(omap_port->pdev); 230 return -EIO; 231 } 232 csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | 233 SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | 234 SSI_DATA_TYPE_S32; 235 ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ 236 ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | 237 SSI_CCR_ENABLE; 238 s_addr = omap_port->ssr_dma + 239 SSI_SSR_BUFFER_CH_REG(msg->channel); 240 d_addr = sg_dma_address(msg->sgt.sgl); 241 } else { 242 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, 243 DMA_TO_DEVICE); 244 if (!err) { 245 dev_dbg(&ssi->device, "DMA map SG failed !\n"); 246 pm_runtime_put_autosuspend(omap_port->pdev); 247 return -EIO; 248 } 249 csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | 250 SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | 251 SSI_DATA_TYPE_S32; 252 ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ 253 ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | 254 SSI_CCR_ENABLE; 255 s_addr = sg_dma_address(msg->sgt.sgl); 256 d_addr = omap_port->sst_dma + 257 SSI_SST_BUFFER_CH_REG(msg->channel); 258 } 259 dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", 260 lch, csdp, ccr, s_addr, d_addr); 261 262 writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); 263 writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); 264 writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); 265 writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); 266 writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), 267 gdd + SSI_GDD_CEN_REG(lch)); 268 269 spin_lock_bh(&omap_ssi->lock); 270 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 271 tmp |= SSI_GDD_LCH(lch); 272 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 273 spin_unlock_bh(&omap_ssi->lock); 274 writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); 275 msg->status = HSI_STATUS_PROCEEDING; 276 277 return 0; 278 } 279 280 static int ssi_start_pio(struct hsi_msg *msg) 281 { 282 struct hsi_port *port = hsi_get_port(msg->cl); 283 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 284 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 285 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 286 u32 val; 287 288 pm_runtime_get(omap_port->pdev); 289 290 if (!pm_runtime_active(omap_port->pdev)) { 291 dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n"); 292 pm_runtime_put_autosuspend(omap_port->pdev); 293 return -EREMOTEIO; 294 } 295 296 if (msg->ttype == HSI_MSG_WRITE) { 297 val = SSI_DATAACCEPT(msg->channel); 298 /* Hold clocks for pio writes */ 299 pm_runtime_get(omap_port->pdev); 300 } else { 301 val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; 302 } 303 dev_dbg(&port->device, "Single %s transfer\n", 304 msg->ttype ? "write" : "read"); 305 val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 306 writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 307 pm_runtime_put_autosuspend(omap_port->pdev); 308 msg->actual_len = 0; 309 msg->status = HSI_STATUS_PROCEEDING; 310 311 return 0; 312 } 313 314 static int ssi_start_transfer(struct list_head *queue) 315 { 316 struct hsi_msg *msg; 317 int lch = -1; 318 319 if (list_empty(queue)) 320 return 0; 321 msg = list_first_entry(queue, struct hsi_msg, link); 322 if (msg->status != HSI_STATUS_QUEUED) 323 return 0; 324 if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) 325 lch = ssi_claim_lch(msg); 326 if (lch >= 0) 327 return ssi_start_dma(msg, lch); 328 else 329 return ssi_start_pio(msg); 330 } 331 332 static int ssi_async_break(struct hsi_msg *msg) 333 { 334 struct hsi_port *port = hsi_get_port(msg->cl); 335 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 336 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 337 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 338 int err = 0; 339 u32 tmp; 340 341 pm_runtime_get_sync(omap_port->pdev); 342 if (msg->ttype == HSI_MSG_WRITE) { 343 if (omap_port->sst.mode != SSI_MODE_FRAME) { 344 err = -EINVAL; 345 goto out; 346 } 347 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); 348 msg->status = HSI_STATUS_COMPLETED; 349 msg->complete(msg); 350 } else { 351 if (omap_port->ssr.mode != SSI_MODE_FRAME) { 352 err = -EINVAL; 353 goto out; 354 } 355 spin_lock_bh(&omap_port->lock); 356 tmp = readl(omap_ssi->sys + 357 SSI_MPU_ENABLE_REG(port->num, 0)); 358 writel(tmp | SSI_BREAKDETECTED, 359 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 360 msg->status = HSI_STATUS_PROCEEDING; 361 list_add_tail(&msg->link, &omap_port->brkqueue); 362 spin_unlock_bh(&omap_port->lock); 363 } 364 out: 365 pm_runtime_put_autosuspend(omap_port->pdev); 366 367 return err; 368 } 369 370 static int ssi_async(struct hsi_msg *msg) 371 { 372 struct hsi_port *port = hsi_get_port(msg->cl); 373 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 374 struct list_head *queue; 375 int err = 0; 376 377 BUG_ON(!msg); 378 379 if (msg->sgt.nents > 1) 380 return -ENOSYS; /* TODO: Add sg support */ 381 382 if (msg->break_frame) 383 return ssi_async_break(msg); 384 385 if (msg->ttype) { 386 BUG_ON(msg->channel >= omap_port->sst.channels); 387 queue = &omap_port->txqueue[msg->channel]; 388 } else { 389 BUG_ON(msg->channel >= omap_port->ssr.channels); 390 queue = &omap_port->rxqueue[msg->channel]; 391 } 392 msg->status = HSI_STATUS_QUEUED; 393 394 pm_runtime_get_sync(omap_port->pdev); 395 spin_lock_bh(&omap_port->lock); 396 list_add_tail(&msg->link, queue); 397 err = ssi_start_transfer(queue); 398 if (err < 0) { 399 list_del(&msg->link); 400 msg->status = HSI_STATUS_ERROR; 401 } 402 spin_unlock_bh(&omap_port->lock); 403 pm_runtime_put_autosuspend(omap_port->pdev); 404 dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", 405 msg->status, msg->ttype, msg->channel); 406 407 return err; 408 } 409 410 static u32 ssi_calculate_div(struct hsi_controller *ssi) 411 { 412 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 413 u32 tx_fckrate = (u32) omap_ssi->fck_rate; 414 415 /* / 2 : SSI TX clock is always half of the SSI functional clock */ 416 tx_fckrate >>= 1; 417 /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ 418 tx_fckrate--; 419 dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", 420 tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, 421 omap_ssi->max_speed); 422 423 return tx_fckrate / omap_ssi->max_speed; 424 } 425 426 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) 427 { 428 struct list_head *node, *tmp; 429 struct hsi_msg *msg; 430 431 list_for_each_safe(node, tmp, queue) { 432 msg = list_entry(node, struct hsi_msg, link); 433 if ((cl) && (cl != msg->cl)) 434 continue; 435 list_del(node); 436 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", 437 msg->channel, msg, msg->sgt.sgl->length, 438 msg->ttype, msg->context); 439 if (msg->destructor) 440 msg->destructor(msg); 441 else 442 hsi_free_msg(msg); 443 } 444 } 445 446 static int ssi_setup(struct hsi_client *cl) 447 { 448 struct hsi_port *port = to_hsi_port(cl->device.parent); 449 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 450 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 451 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 452 void __iomem *sst = omap_port->sst_base; 453 void __iomem *ssr = omap_port->ssr_base; 454 u32 div; 455 u32 val; 456 int err = 0; 457 458 pm_runtime_get_sync(omap_port->pdev); 459 spin_lock_bh(&omap_port->lock); 460 if (cl->tx_cfg.speed) 461 omap_ssi->max_speed = cl->tx_cfg.speed; 462 div = ssi_calculate_div(ssi); 463 if (div > SSI_MAX_DIVISOR) { 464 dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", 465 cl->tx_cfg.speed, div); 466 err = -EINVAL; 467 goto out; 468 } 469 /* Set TX/RX module to sleep to stop TX/RX during cfg update */ 470 writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); 471 writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); 472 /* Flush posted write */ 473 val = readl(ssr + SSI_SSR_MODE_REG); 474 /* TX */ 475 writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); 476 writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); 477 writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); 478 writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); 479 writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); 480 /* RX */ 481 writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); 482 writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); 483 writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); 484 /* Cleanup the break queue if we leave FRAME mode */ 485 if ((omap_port->ssr.mode == SSI_MODE_FRAME) && 486 (cl->rx_cfg.mode != SSI_MODE_FRAME)) 487 ssi_flush_queue(&omap_port->brkqueue, cl); 488 writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); 489 omap_port->channels = max(cl->rx_cfg.num_hw_channels, 490 cl->tx_cfg.num_hw_channels); 491 /* Shadow registering for OFF mode */ 492 /* SST */ 493 omap_port->sst.divisor = div; 494 omap_port->sst.frame_size = 31; 495 omap_port->sst.channels = cl->tx_cfg.num_hw_channels; 496 omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; 497 omap_port->sst.mode = cl->tx_cfg.mode; 498 /* SSR */ 499 omap_port->ssr.frame_size = 31; 500 omap_port->ssr.timeout = 0; 501 omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; 502 omap_port->ssr.mode = cl->rx_cfg.mode; 503 out: 504 spin_unlock_bh(&omap_port->lock); 505 pm_runtime_put_autosuspend(omap_port->pdev); 506 507 return err; 508 } 509 510 static int ssi_flush(struct hsi_client *cl) 511 { 512 struct hsi_port *port = hsi_get_port(cl); 513 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 514 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 515 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 516 struct hsi_msg *msg; 517 void __iomem *sst = omap_port->sst_base; 518 void __iomem *ssr = omap_port->ssr_base; 519 unsigned int i; 520 u32 err; 521 522 pm_runtime_get_sync(omap_port->pdev); 523 spin_lock_bh(&omap_port->lock); 524 525 /* stop all ssi communication */ 526 pinctrl_pm_select_idle_state(omap_port->pdev); 527 udelay(1); /* wait for racing frames */ 528 529 /* Stop all DMA transfers */ 530 for (i = 0; i < SSI_MAX_GDD_LCH; i++) { 531 msg = omap_ssi->gdd_trn[i].msg; 532 if (!msg || (port != hsi_get_port(msg->cl))) 533 continue; 534 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 535 if (msg->ttype == HSI_MSG_READ) 536 pm_runtime_put_autosuspend(omap_port->pdev); 537 omap_ssi->gdd_trn[i].msg = NULL; 538 } 539 /* Flush all SST buffers */ 540 writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); 541 writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); 542 /* Flush all SSR buffers */ 543 writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); 544 writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); 545 /* Flush all errors */ 546 err = readl(ssr + SSI_SSR_ERROR_REG); 547 writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); 548 /* Flush break */ 549 writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); 550 /* Clear interrupts */ 551 writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 552 writel_relaxed(0xffffff00, 553 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 554 writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 555 writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); 556 /* Dequeue all pending requests */ 557 for (i = 0; i < omap_port->channels; i++) { 558 /* Release write clocks */ 559 if (!list_empty(&omap_port->txqueue[i])) 560 pm_runtime_put_autosuspend(omap_port->pdev); 561 ssi_flush_queue(&omap_port->txqueue[i], NULL); 562 ssi_flush_queue(&omap_port->rxqueue[i], NULL); 563 } 564 ssi_flush_queue(&omap_port->brkqueue, NULL); 565 566 /* Resume SSI communication */ 567 pinctrl_pm_select_default_state(omap_port->pdev); 568 569 spin_unlock_bh(&omap_port->lock); 570 pm_runtime_put_autosuspend(omap_port->pdev); 571 572 return 0; 573 } 574 575 static void start_tx_work(struct work_struct *work) 576 { 577 struct omap_ssi_port *omap_port = 578 container_of(work, struct omap_ssi_port, work); 579 struct hsi_port *port = to_hsi_port(omap_port->dev); 580 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 581 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 582 583 pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ 584 writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); 585 } 586 587 static int ssi_start_tx(struct hsi_client *cl) 588 { 589 struct hsi_port *port = hsi_get_port(cl); 590 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 591 592 dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); 593 594 spin_lock_bh(&omap_port->wk_lock); 595 if (omap_port->wk_refcount++) { 596 spin_unlock_bh(&omap_port->wk_lock); 597 return 0; 598 } 599 spin_unlock_bh(&omap_port->wk_lock); 600 601 schedule_work(&omap_port->work); 602 603 return 0; 604 } 605 606 static int ssi_stop_tx(struct hsi_client *cl) 607 { 608 struct hsi_port *port = hsi_get_port(cl); 609 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 610 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 611 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 612 613 dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); 614 615 spin_lock_bh(&omap_port->wk_lock); 616 BUG_ON(!omap_port->wk_refcount); 617 if (--omap_port->wk_refcount) { 618 spin_unlock_bh(&omap_port->wk_lock); 619 return 0; 620 } 621 writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); 622 spin_unlock_bh(&omap_port->wk_lock); 623 624 pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ 625 626 627 return 0; 628 } 629 630 static void ssi_transfer(struct omap_ssi_port *omap_port, 631 struct list_head *queue) 632 { 633 struct hsi_msg *msg; 634 int err = -1; 635 636 pm_runtime_get(omap_port->pdev); 637 spin_lock_bh(&omap_port->lock); 638 while (err < 0) { 639 err = ssi_start_transfer(queue); 640 if (err < 0) { 641 msg = list_first_entry(queue, struct hsi_msg, link); 642 msg->status = HSI_STATUS_ERROR; 643 msg->actual_len = 0; 644 list_del(&msg->link); 645 spin_unlock_bh(&omap_port->lock); 646 msg->complete(msg); 647 spin_lock_bh(&omap_port->lock); 648 } 649 } 650 spin_unlock_bh(&omap_port->lock); 651 pm_runtime_put_autosuspend(omap_port->pdev); 652 } 653 654 static void ssi_cleanup_queues(struct hsi_client *cl) 655 { 656 struct hsi_port *port = hsi_get_port(cl); 657 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 658 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 659 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 660 struct hsi_msg *msg; 661 unsigned int i; 662 u32 rxbufstate = 0; 663 u32 txbufstate = 0; 664 u32 status = SSI_ERROROCCURED; 665 u32 tmp; 666 667 ssi_flush_queue(&omap_port->brkqueue, cl); 668 if (list_empty(&omap_port->brkqueue)) 669 status |= SSI_BREAKDETECTED; 670 671 for (i = 0; i < omap_port->channels; i++) { 672 if (list_empty(&omap_port->txqueue[i])) 673 continue; 674 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, 675 link); 676 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { 677 txbufstate |= (1 << i); 678 status |= SSI_DATAACCEPT(i); 679 /* Release the clocks writes, also GDD ones */ 680 pm_runtime_put_autosuspend(omap_port->pdev); 681 } 682 ssi_flush_queue(&omap_port->txqueue[i], cl); 683 } 684 for (i = 0; i < omap_port->channels; i++) { 685 if (list_empty(&omap_port->rxqueue[i])) 686 continue; 687 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, 688 link); 689 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { 690 rxbufstate |= (1 << i); 691 status |= SSI_DATAAVAILABLE(i); 692 } 693 ssi_flush_queue(&omap_port->rxqueue[i], cl); 694 /* Check if we keep the error detection interrupt armed */ 695 if (!list_empty(&omap_port->rxqueue[i])) 696 status &= ~SSI_ERROROCCURED; 697 } 698 /* Cleanup write buffers */ 699 tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); 700 tmp &= ~txbufstate; 701 writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); 702 /* Cleanup read buffers */ 703 tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); 704 tmp &= ~rxbufstate; 705 writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); 706 /* Disarm and ack pending interrupts */ 707 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 708 tmp &= ~status; 709 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 710 writel_relaxed(status, omap_ssi->sys + 711 SSI_MPU_STATUS_REG(port->num, 0)); 712 } 713 714 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) 715 { 716 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 717 struct hsi_port *port = hsi_get_port(cl); 718 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 719 struct hsi_msg *msg; 720 unsigned int i; 721 u32 val = 0; 722 u32 tmp; 723 724 for (i = 0; i < SSI_MAX_GDD_LCH; i++) { 725 msg = omap_ssi->gdd_trn[i].msg; 726 if ((!msg) || (msg->cl != cl)) 727 continue; 728 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 729 val |= (1 << i); 730 /* 731 * Clock references for write will be handled in 732 * ssi_cleanup_queues 733 */ 734 if (msg->ttype == HSI_MSG_READ) { 735 pm_runtime_put_autosuspend(omap_port->pdev); 736 } 737 omap_ssi->gdd_trn[i].msg = NULL; 738 } 739 tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 740 tmp &= ~val; 741 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 742 writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); 743 } 744 745 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) 746 { 747 writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); 748 writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); 749 /* OCP barrier */ 750 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); 751 752 return 0; 753 } 754 755 static int ssi_release(struct hsi_client *cl) 756 { 757 struct hsi_port *port = hsi_get_port(cl); 758 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 759 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 760 761 pm_runtime_get_sync(omap_port->pdev); 762 spin_lock_bh(&omap_port->lock); 763 /* Stop all the pending DMA requests for that client */ 764 ssi_cleanup_gdd(ssi, cl); 765 /* Now cleanup all the queues */ 766 ssi_cleanup_queues(cl); 767 /* If it is the last client of the port, do extra checks and cleanup */ 768 if (port->claimed <= 1) { 769 /* 770 * Drop the clock reference for the incoming wake line 771 * if it is still kept high by the other side. 772 */ 773 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) 774 pm_runtime_put_sync(omap_port->pdev); 775 pm_runtime_get(omap_port->pdev); 776 /* Stop any SSI TX/RX without a client */ 777 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); 778 omap_port->sst.mode = SSI_MODE_SLEEP; 779 omap_port->ssr.mode = SSI_MODE_SLEEP; 780 pm_runtime_put(omap_port->pdev); 781 WARN_ON(omap_port->wk_refcount != 0); 782 } 783 spin_unlock_bh(&omap_port->lock); 784 pm_runtime_put_sync(omap_port->pdev); 785 786 return 0; 787 } 788 789 790 791 static void ssi_error(struct hsi_port *port) 792 { 793 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 794 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 795 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 796 struct hsi_msg *msg; 797 unsigned int i; 798 u32 err; 799 u32 val; 800 u32 tmp; 801 802 /* ACK error */ 803 err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); 804 dev_err(&port->device, "SSI error: 0x%02x\n", err); 805 if (!err) { 806 dev_dbg(&port->device, "spurious SSI error ignored!\n"); 807 return; 808 } 809 spin_lock(&omap_ssi->lock); 810 /* Cancel all GDD read transfers */ 811 for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { 812 msg = omap_ssi->gdd_trn[i].msg; 813 if ((msg) && (msg->ttype == HSI_MSG_READ)) { 814 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 815 val |= (1 << i); 816 omap_ssi->gdd_trn[i].msg = NULL; 817 } 818 } 819 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 820 tmp &= ~val; 821 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 822 spin_unlock(&omap_ssi->lock); 823 /* Cancel all PIO read transfers */ 824 spin_lock(&omap_port->lock); 825 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 826 tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ 827 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 828 /* ACK error */ 829 writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); 830 writel_relaxed(SSI_ERROROCCURED, 831 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 832 /* Signal the error all current pending read requests */ 833 for (i = 0; i < omap_port->channels; i++) { 834 if (list_empty(&omap_port->rxqueue[i])) 835 continue; 836 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, 837 link); 838 list_del(&msg->link); 839 msg->status = HSI_STATUS_ERROR; 840 spin_unlock(&omap_port->lock); 841 msg->complete(msg); 842 /* Now restart queued reads if any */ 843 ssi_transfer(omap_port, &omap_port->rxqueue[i]); 844 spin_lock(&omap_port->lock); 845 } 846 spin_unlock(&omap_port->lock); 847 } 848 849 static void ssi_break_complete(struct hsi_port *port) 850 { 851 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 852 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 853 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 854 struct hsi_msg *msg; 855 struct hsi_msg *tmp; 856 u32 val; 857 858 dev_dbg(&port->device, "HWBREAK received\n"); 859 860 spin_lock(&omap_port->lock); 861 val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 862 val &= ~SSI_BREAKDETECTED; 863 writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 864 writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); 865 writel(SSI_BREAKDETECTED, 866 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 867 spin_unlock(&omap_port->lock); 868 869 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { 870 msg->status = HSI_STATUS_COMPLETED; 871 spin_lock(&omap_port->lock); 872 list_del(&msg->link); 873 spin_unlock(&omap_port->lock); 874 msg->complete(msg); 875 } 876 877 } 878 879 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) 880 { 881 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 882 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 883 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 884 struct hsi_msg *msg; 885 u32 *buf; 886 u32 reg; 887 u32 val; 888 889 spin_lock_bh(&omap_port->lock); 890 msg = list_first_entry(queue, struct hsi_msg, link); 891 if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { 892 msg->actual_len = 0; 893 msg->status = HSI_STATUS_PENDING; 894 } 895 if (msg->ttype == HSI_MSG_WRITE) 896 val = SSI_DATAACCEPT(msg->channel); 897 else 898 val = SSI_DATAAVAILABLE(msg->channel); 899 if (msg->status == HSI_STATUS_PROCEEDING) { 900 buf = sg_virt(msg->sgt.sgl) + msg->actual_len; 901 if (msg->ttype == HSI_MSG_WRITE) 902 writel(*buf, omap_port->sst_base + 903 SSI_SST_BUFFER_CH_REG(msg->channel)); 904 else 905 *buf = readl(omap_port->ssr_base + 906 SSI_SSR_BUFFER_CH_REG(msg->channel)); 907 dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, 908 msg->ttype, *buf); 909 msg->actual_len += sizeof(*buf); 910 if (msg->actual_len >= msg->sgt.sgl->length) 911 msg->status = HSI_STATUS_COMPLETED; 912 /* 913 * Wait for the last written frame to be really sent before 914 * we call the complete callback 915 */ 916 if ((msg->status == HSI_STATUS_PROCEEDING) || 917 ((msg->status == HSI_STATUS_COMPLETED) && 918 (msg->ttype == HSI_MSG_WRITE))) { 919 writel(val, omap_ssi->sys + 920 SSI_MPU_STATUS_REG(port->num, 0)); 921 spin_unlock_bh(&omap_port->lock); 922 923 return; 924 } 925 926 } 927 /* Transfer completed at this point */ 928 reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 929 if (msg->ttype == HSI_MSG_WRITE) { 930 /* Release clocks for write transfer */ 931 pm_runtime_put_autosuspend(omap_port->pdev); 932 } 933 reg &= ~val; 934 writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 935 writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 936 list_del(&msg->link); 937 spin_unlock_bh(&omap_port->lock); 938 msg->complete(msg); 939 ssi_transfer(omap_port, queue); 940 } 941 942 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port) 943 { 944 struct hsi_port *port = (struct hsi_port *)ssi_port; 945 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 946 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 947 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 948 void __iomem *sys = omap_ssi->sys; 949 unsigned int ch; 950 u32 status_reg; 951 952 pm_runtime_get_sync(omap_port->pdev); 953 954 do { 955 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); 956 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); 957 958 for (ch = 0; ch < omap_port->channels; ch++) { 959 if (status_reg & SSI_DATAACCEPT(ch)) 960 ssi_pio_complete(port, &omap_port->txqueue[ch]); 961 if (status_reg & SSI_DATAAVAILABLE(ch)) 962 ssi_pio_complete(port, &omap_port->rxqueue[ch]); 963 } 964 if (status_reg & SSI_BREAKDETECTED) 965 ssi_break_complete(port); 966 if (status_reg & SSI_ERROROCCURED) 967 ssi_error(port); 968 969 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); 970 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); 971 972 /* TODO: sleep if we retry? */ 973 } while (status_reg); 974 975 pm_runtime_put_autosuspend(omap_port->pdev); 976 977 return IRQ_HANDLED; 978 } 979 980 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port) 981 { 982 struct hsi_port *port = (struct hsi_port *)ssi_port; 983 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 984 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 985 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 986 987 if (ssi_wakein(port)) { 988 /** 989 * We can have a quick High-Low-High transition in the line. 990 * In such a case if we have long interrupt latencies, 991 * we can miss the low event or get twice a high event. 992 * This workaround will avoid breaking the clock reference 993 * count when such a situation ocurrs. 994 */ 995 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) 996 pm_runtime_get_sync(omap_port->pdev); 997 dev_dbg(&ssi->device, "Wake in high\n"); 998 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ 999 writel(SSI_WAKE(0), 1000 omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); 1001 } 1002 hsi_event(port, HSI_EVENT_START_RX); 1003 } else { 1004 dev_dbg(&ssi->device, "Wake in low\n"); 1005 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ 1006 writel(SSI_WAKE(0), 1007 omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); 1008 } 1009 hsi_event(port, HSI_EVENT_STOP_RX); 1010 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { 1011 pm_runtime_put_autosuspend(omap_port->pdev); 1012 } 1013 } 1014 1015 return IRQ_HANDLED; 1016 } 1017 1018 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd) 1019 { 1020 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1021 int err; 1022 1023 err = platform_get_irq(pd, 0); 1024 if (err < 0) 1025 return err; 1026 omap_port->irq = err; 1027 err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, 1028 ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port); 1029 if (err < 0) 1030 dev_err(&port->device, "Request IRQ %d failed (%d)\n", 1031 omap_port->irq, err); 1032 return err; 1033 } 1034 1035 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) 1036 { 1037 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1038 int cawake_irq; 1039 int err; 1040 1041 if (!omap_port->wake_gpio) { 1042 omap_port->wake_irq = -1; 1043 return 0; 1044 } 1045 1046 cawake_irq = gpiod_to_irq(omap_port->wake_gpio); 1047 omap_port->wake_irq = cawake_irq; 1048 1049 err = devm_request_threaded_irq(&port->device, cawake_irq, NULL, 1050 ssi_wake_thread, 1051 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1052 "SSI cawake", port); 1053 if (err < 0) 1054 dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", 1055 cawake_irq, err); 1056 err = enable_irq_wake(cawake_irq); 1057 if (err < 0) 1058 dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", 1059 cawake_irq, err); 1060 1061 return err; 1062 } 1063 1064 static void ssi_queues_init(struct omap_ssi_port *omap_port) 1065 { 1066 unsigned int ch; 1067 1068 for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { 1069 INIT_LIST_HEAD(&omap_port->txqueue[ch]); 1070 INIT_LIST_HEAD(&omap_port->rxqueue[ch]); 1071 } 1072 INIT_LIST_HEAD(&omap_port->brkqueue); 1073 } 1074 1075 static int ssi_port_get_iomem(struct platform_device *pd, 1076 const char *name, void __iomem **pbase, dma_addr_t *phy) 1077 { 1078 struct hsi_port *port = platform_get_drvdata(pd); 1079 struct resource *mem; 1080 struct resource *ioarea; 1081 void __iomem *base; 1082 1083 mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); 1084 if (!mem) { 1085 dev_err(&pd->dev, "IO memory region missing (%s)\n", name); 1086 return -ENXIO; 1087 } 1088 ioarea = devm_request_mem_region(&port->device, mem->start, 1089 resource_size(mem), dev_name(&pd->dev)); 1090 if (!ioarea) { 1091 dev_err(&pd->dev, "%s IO memory region request failed\n", 1092 mem->name); 1093 return -ENXIO; 1094 } 1095 base = devm_ioremap(&port->device, mem->start, resource_size(mem)); 1096 if (!base) { 1097 dev_err(&pd->dev, "%s IO remap failed\n", mem->name); 1098 return -ENXIO; 1099 } 1100 *pbase = base; 1101 1102 if (phy) 1103 *phy = mem->start; 1104 1105 return 0; 1106 } 1107 1108 static int ssi_port_probe(struct platform_device *pd) 1109 { 1110 struct device_node *np = pd->dev.of_node; 1111 struct hsi_port *port; 1112 struct omap_ssi_port *omap_port; 1113 struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); 1114 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1115 struct gpio_desc *cawake_gpio = NULL; 1116 u32 port_id; 1117 int err; 1118 1119 dev_dbg(&pd->dev, "init ssi port...\n"); 1120 1121 if (!ssi->port || !omap_ssi->port) { 1122 dev_err(&pd->dev, "ssi controller not initialized!\n"); 1123 err = -ENODEV; 1124 goto error; 1125 } 1126 1127 /* get id of first uninitialized port in controller */ 1128 for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; 1129 port_id++) 1130 ; 1131 1132 if (port_id >= ssi->num_ports) { 1133 dev_err(&pd->dev, "port id out of range!\n"); 1134 err = -ENODEV; 1135 goto error; 1136 } 1137 1138 port = ssi->port[port_id]; 1139 1140 if (!np) { 1141 dev_err(&pd->dev, "missing device tree data\n"); 1142 err = -EINVAL; 1143 goto error; 1144 } 1145 1146 cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN); 1147 if (IS_ERR(cawake_gpio)) { 1148 err = PTR_ERR(cawake_gpio); 1149 dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err); 1150 goto error; 1151 } 1152 1153 omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); 1154 if (!omap_port) { 1155 err = -ENOMEM; 1156 goto error; 1157 } 1158 omap_port->wake_gpio = cawake_gpio; 1159 omap_port->pdev = &pd->dev; 1160 omap_port->port_id = port_id; 1161 1162 INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); 1163 INIT_WORK(&omap_port->work, start_tx_work); 1164 1165 /* initialize HSI port */ 1166 port->async = ssi_async; 1167 port->setup = ssi_setup; 1168 port->flush = ssi_flush; 1169 port->start_tx = ssi_start_tx; 1170 port->stop_tx = ssi_stop_tx; 1171 port->release = ssi_release; 1172 hsi_port_set_drvdata(port, omap_port); 1173 omap_ssi->port[port_id] = omap_port; 1174 1175 platform_set_drvdata(pd, port); 1176 1177 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, 1178 &omap_port->sst_dma); 1179 if (err < 0) 1180 goto error; 1181 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, 1182 &omap_port->ssr_dma); 1183 if (err < 0) 1184 goto error; 1185 1186 err = ssi_port_irq(port, pd); 1187 if (err < 0) 1188 goto error; 1189 err = ssi_wake_irq(port, pd); 1190 if (err < 0) 1191 goto error; 1192 1193 ssi_queues_init(omap_port); 1194 spin_lock_init(&omap_port->lock); 1195 spin_lock_init(&omap_port->wk_lock); 1196 omap_port->dev = &port->device; 1197 1198 pm_runtime_use_autosuspend(omap_port->pdev); 1199 pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); 1200 pm_runtime_enable(omap_port->pdev); 1201 1202 #ifdef CONFIG_DEBUG_FS 1203 ssi_debug_add_port(omap_port, omap_ssi->dir); 1204 #endif 1205 1206 hsi_add_clients_from_dt(port, np); 1207 1208 dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id); 1209 1210 return 0; 1211 1212 error: 1213 return err; 1214 } 1215 1216 static void ssi_port_remove(struct platform_device *pd) 1217 { 1218 struct hsi_port *port = platform_get_drvdata(pd); 1219 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1220 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1221 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1222 1223 #ifdef CONFIG_DEBUG_FS 1224 ssi_debug_remove_port(port); 1225 #endif 1226 1227 cancel_delayed_work_sync(&omap_port->errqueue_work); 1228 1229 hsi_port_unregister_clients(port); 1230 1231 port->async = hsi_dummy_msg; 1232 port->setup = hsi_dummy_cl; 1233 port->flush = hsi_dummy_cl; 1234 port->start_tx = hsi_dummy_cl; 1235 port->stop_tx = hsi_dummy_cl; 1236 port->release = hsi_dummy_cl; 1237 1238 omap_ssi->port[omap_port->port_id] = NULL; 1239 platform_set_drvdata(pd, NULL); 1240 1241 pm_runtime_dont_use_autosuspend(&pd->dev); 1242 pm_runtime_disable(&pd->dev); 1243 } 1244 1245 static int ssi_restore_divisor(struct omap_ssi_port *omap_port) 1246 { 1247 writel_relaxed(omap_port->sst.divisor, 1248 omap_port->sst_base + SSI_SST_DIVISOR_REG); 1249 1250 return 0; 1251 } 1252 1253 void omap_ssi_port_update_fclk(struct hsi_controller *ssi, 1254 struct omap_ssi_port *omap_port) 1255 { 1256 /* update divisor */ 1257 u32 div = ssi_calculate_div(ssi); 1258 omap_port->sst.divisor = div; 1259 ssi_restore_divisor(omap_port); 1260 } 1261 1262 #ifdef CONFIG_PM 1263 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) 1264 { 1265 struct hsi_port *port = to_hsi_port(omap_port->dev); 1266 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1267 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1268 1269 omap_port->sys_mpu_enable = readl(omap_ssi->sys + 1270 SSI_MPU_ENABLE_REG(port->num, 0)); 1271 1272 return 0; 1273 } 1274 1275 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) 1276 { 1277 struct hsi_port *port = to_hsi_port(omap_port->dev); 1278 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1279 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1280 void __iomem *base; 1281 1282 writel_relaxed(omap_port->sys_mpu_enable, 1283 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 1284 1285 /* SST context */ 1286 base = omap_port->sst_base; 1287 writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); 1288 writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); 1289 writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); 1290 1291 /* SSR context */ 1292 base = omap_port->ssr_base; 1293 writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); 1294 writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); 1295 writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); 1296 1297 return 0; 1298 } 1299 1300 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) 1301 { 1302 u32 mode; 1303 1304 writel_relaxed(omap_port->sst.mode, 1305 omap_port->sst_base + SSI_SST_MODE_REG); 1306 writel_relaxed(omap_port->ssr.mode, 1307 omap_port->ssr_base + SSI_SSR_MODE_REG); 1308 /* OCP barrier */ 1309 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); 1310 1311 return 0; 1312 } 1313 1314 static int omap_ssi_port_runtime_suspend(struct device *dev) 1315 { 1316 struct hsi_port *port = dev_get_drvdata(dev); 1317 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1318 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1319 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1320 1321 dev_dbg(dev, "port runtime suspend!\n"); 1322 1323 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); 1324 if (omap_ssi->get_loss) 1325 omap_port->loss_count = 1326 omap_ssi->get_loss(ssi->device.parent); 1327 ssi_save_port_ctx(omap_port); 1328 1329 return 0; 1330 } 1331 1332 static int omap_ssi_port_runtime_resume(struct device *dev) 1333 { 1334 struct hsi_port *port = dev_get_drvdata(dev); 1335 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1336 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1337 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1338 1339 dev_dbg(dev, "port runtime resume!\n"); 1340 1341 if ((omap_ssi->get_loss) && (omap_port->loss_count == 1342 omap_ssi->get_loss(ssi->device.parent))) 1343 goto mode; /* We always need to restore the mode & TX divisor */ 1344 1345 ssi_restore_port_ctx(omap_port); 1346 1347 mode: 1348 ssi_restore_divisor(omap_port); 1349 ssi_restore_port_mode(omap_port); 1350 1351 return 0; 1352 } 1353 1354 static const struct dev_pm_ops omap_ssi_port_pm_ops = { 1355 SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, 1356 omap_ssi_port_runtime_resume, NULL) 1357 }; 1358 1359 #define DEV_PM_OPS (&omap_ssi_port_pm_ops) 1360 #else 1361 #define DEV_PM_OPS NULL 1362 #endif 1363 1364 1365 #ifdef CONFIG_OF 1366 static const struct of_device_id omap_ssi_port_of_match[] = { 1367 { .compatible = "ti,omap3-ssi-port", }, 1368 {}, 1369 }; 1370 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); 1371 #else 1372 #define omap_ssi_port_of_match NULL 1373 #endif 1374 1375 struct platform_driver ssi_port_pdriver = { 1376 .probe = ssi_port_probe, 1377 .remove = ssi_port_remove, 1378 .driver = { 1379 .name = "omap_ssi_port", 1380 .of_match_table = omap_ssi_port_of_match, 1381 .pm = DEV_PM_OPS, 1382 }, 1383 }; 1384