1 // SPDX-License-Identifier: GPL-2.0-only 2 /* OMAP SSI port driver. 3 * 4 * Copyright (C) 2010 Nokia Corporation. All rights reserved. 5 * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org> 6 * 7 * Contact: Carlos Chinea <carlos.chinea@nokia.com> 8 */ 9 10 #include <linux/mod_devicetable.h> 11 #include <linux/platform_device.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/pm_runtime.h> 14 #include <linux/delay.h> 15 16 #include <linux/gpio/consumer.h> 17 #include <linux/pinctrl/consumer.h> 18 #include <linux/debugfs.h> 19 20 #include "omap_ssi_regs.h" 21 #include "omap_ssi.h" 22 23 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) 24 { 25 return 0; 26 } 27 28 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) 29 { 30 return 0; 31 } 32 33 static inline unsigned int ssi_wakein(struct hsi_port *port) 34 { 35 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 36 return gpiod_get_value(omap_port->wake_gpio); 37 } 38 39 #ifdef CONFIG_DEBUG_FS 40 static void ssi_debug_remove_port(struct hsi_port *port) 41 { 42 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 43 44 debugfs_remove_recursive(omap_port->dir); 45 } 46 47 static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused) 48 { 49 struct hsi_port *port = m->private; 50 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 51 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 52 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 53 void __iomem *base = omap_ssi->sys; 54 unsigned int ch; 55 56 pm_runtime_get_sync(omap_port->pdev); 57 if (omap_port->wake_irq > 0) 58 seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port)); 59 seq_printf(m, "WAKE\t\t: 0x%08x\n", 60 readl(base + SSI_WAKE_REG(port->num))); 61 seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0, 62 readl(base + SSI_MPU_ENABLE_REG(port->num, 0))); 63 seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0, 64 readl(base + SSI_MPU_STATUS_REG(port->num, 0))); 65 /* SST */ 66 base = omap_port->sst_base; 67 seq_puts(m, "\nSST\n===\n"); 68 seq_printf(m, "ID SST\t\t: 0x%08x\n", 69 readl(base + SSI_SST_ID_REG)); 70 seq_printf(m, "MODE\t\t: 0x%08x\n", 71 readl(base + SSI_SST_MODE_REG)); 72 seq_printf(m, "FRAMESIZE\t: 0x%08x\n", 73 readl(base + SSI_SST_FRAMESIZE_REG)); 74 seq_printf(m, "DIVISOR\t\t: 0x%08x\n", 75 readl(base + SSI_SST_DIVISOR_REG)); 76 seq_printf(m, "CHANNELS\t: 0x%08x\n", 77 readl(base + SSI_SST_CHANNELS_REG)); 78 seq_printf(m, "ARBMODE\t\t: 0x%08x\n", 79 readl(base + SSI_SST_ARBMODE_REG)); 80 seq_printf(m, "TXSTATE\t\t: 0x%08x\n", 81 readl(base + SSI_SST_TXSTATE_REG)); 82 seq_printf(m, "BUFSTATE\t: 0x%08x\n", 83 readl(base + SSI_SST_BUFSTATE_REG)); 84 seq_printf(m, "BREAK\t\t: 0x%08x\n", 85 readl(base + SSI_SST_BREAK_REG)); 86 for (ch = 0; ch < omap_port->channels; ch++) { 87 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, 88 readl(base + SSI_SST_BUFFER_CH_REG(ch))); 89 } 90 /* SSR */ 91 base = omap_port->ssr_base; 92 seq_puts(m, "\nSSR\n===\n"); 93 seq_printf(m, "ID SSR\t\t: 0x%08x\n", 94 readl(base + SSI_SSR_ID_REG)); 95 seq_printf(m, "MODE\t\t: 0x%08x\n", 96 readl(base + SSI_SSR_MODE_REG)); 97 seq_printf(m, "FRAMESIZE\t: 0x%08x\n", 98 readl(base + SSI_SSR_FRAMESIZE_REG)); 99 seq_printf(m, "CHANNELS\t: 0x%08x\n", 100 readl(base + SSI_SSR_CHANNELS_REG)); 101 seq_printf(m, "TIMEOUT\t\t: 0x%08x\n", 102 readl(base + SSI_SSR_TIMEOUT_REG)); 103 seq_printf(m, "RXSTATE\t\t: 0x%08x\n", 104 readl(base + SSI_SSR_RXSTATE_REG)); 105 seq_printf(m, "BUFSTATE\t: 0x%08x\n", 106 readl(base + SSI_SSR_BUFSTATE_REG)); 107 seq_printf(m, "BREAK\t\t: 0x%08x\n", 108 readl(base + SSI_SSR_BREAK_REG)); 109 seq_printf(m, "ERROR\t\t: 0x%08x\n", 110 readl(base + SSI_SSR_ERROR_REG)); 111 seq_printf(m, "ERRORACK\t: 0x%08x\n", 112 readl(base + SSI_SSR_ERRORACK_REG)); 113 for (ch = 0; ch < omap_port->channels; ch++) { 114 seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, 115 readl(base + SSI_SSR_BUFFER_CH_REG(ch))); 116 } 117 pm_runtime_put_autosuspend(omap_port->pdev); 118 119 return 0; 120 } 121 122 DEFINE_SHOW_ATTRIBUTE(ssi_port_regs); 123 124 static int ssi_div_get(void *data, u64 *val) 125 { 126 struct hsi_port *port = data; 127 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 128 129 pm_runtime_get_sync(omap_port->pdev); 130 *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); 131 pm_runtime_put_autosuspend(omap_port->pdev); 132 133 return 0; 134 } 135 136 static int ssi_div_set(void *data, u64 val) 137 { 138 struct hsi_port *port = data; 139 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 140 141 if (val > 127) 142 return -EINVAL; 143 144 pm_runtime_get_sync(omap_port->pdev); 145 writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); 146 omap_port->sst.divisor = val; 147 pm_runtime_put_autosuspend(omap_port->pdev); 148 149 return 0; 150 } 151 152 DEFINE_DEBUGFS_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n"); 153 154 static void ssi_debug_add_port(struct omap_ssi_port *omap_port, 155 struct dentry *dir) 156 { 157 struct hsi_port *port = to_hsi_port(omap_port->dev); 158 159 dir = debugfs_create_dir(dev_name(omap_port->dev), dir); 160 omap_port->dir = dir; 161 debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops); 162 dir = debugfs_create_dir("sst", dir); 163 debugfs_create_file_unsafe("divisor", 0644, dir, port, 164 &ssi_sst_div_fops); 165 } 166 #endif 167 168 static void ssi_process_errqueue(struct work_struct *work) 169 { 170 struct omap_ssi_port *omap_port; 171 struct list_head *head, *tmp; 172 struct hsi_msg *msg; 173 174 omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); 175 176 list_for_each_safe(head, tmp, &omap_port->errqueue) { 177 msg = list_entry(head, struct hsi_msg, link); 178 msg->complete(msg); 179 list_del(head); 180 } 181 } 182 183 static int ssi_claim_lch(struct hsi_msg *msg) 184 { 185 186 struct hsi_port *port = hsi_get_port(msg->cl); 187 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 188 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 189 int lch; 190 191 for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) 192 if (!omap_ssi->gdd_trn[lch].msg) { 193 omap_ssi->gdd_trn[lch].msg = msg; 194 omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl; 195 return lch; 196 } 197 198 return -EBUSY; 199 } 200 201 static int ssi_start_dma(struct hsi_msg *msg, int lch) 202 { 203 struct hsi_port *port = hsi_get_port(msg->cl); 204 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 205 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 206 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 207 void __iomem *gdd = omap_ssi->gdd; 208 int err; 209 u16 csdp; 210 u16 ccr; 211 u32 s_addr; 212 u32 d_addr; 213 u32 tmp; 214 215 /* Hold clocks during the transfer */ 216 pm_runtime_get(omap_port->pdev); 217 218 if (!pm_runtime_active(omap_port->pdev)) { 219 dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n"); 220 pm_runtime_put_autosuspend(omap_port->pdev); 221 return -EREMOTEIO; 222 } 223 224 if (msg->ttype == HSI_MSG_READ) { 225 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, 226 DMA_FROM_DEVICE); 227 if (!err) { 228 dev_dbg(&ssi->device, "DMA map SG failed !\n"); 229 pm_runtime_put_autosuspend(omap_port->pdev); 230 return -EIO; 231 } 232 csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | 233 SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT | 234 SSI_DATA_TYPE_S32; 235 ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */ 236 ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST | 237 SSI_CCR_ENABLE; 238 s_addr = omap_port->ssr_dma + 239 SSI_SSR_BUFFER_CH_REG(msg->channel); 240 d_addr = sg_dma_address(msg->sgt.sgl); 241 } else { 242 err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, 243 DMA_TO_DEVICE); 244 if (!err) { 245 dev_dbg(&ssi->device, "DMA map SG failed !\n"); 246 pm_runtime_put_autosuspend(omap_port->pdev); 247 return -EIO; 248 } 249 csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | 250 SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT | 251 SSI_DATA_TYPE_S32; 252 ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */ 253 ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST | 254 SSI_CCR_ENABLE; 255 s_addr = sg_dma_address(msg->sgt.sgl); 256 d_addr = omap_port->sst_dma + 257 SSI_SST_BUFFER_CH_REG(msg->channel); 258 } 259 dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", 260 lch, csdp, ccr, s_addr, d_addr); 261 262 writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); 263 writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); 264 writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); 265 writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch)); 266 writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length), 267 gdd + SSI_GDD_CEN_REG(lch)); 268 269 spin_lock_bh(&omap_ssi->lock); 270 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 271 tmp |= SSI_GDD_LCH(lch); 272 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 273 spin_unlock_bh(&omap_ssi->lock); 274 writew(ccr, gdd + SSI_GDD_CCR_REG(lch)); 275 msg->status = HSI_STATUS_PROCEEDING; 276 277 return 0; 278 } 279 280 static int ssi_start_pio(struct hsi_msg *msg) 281 { 282 struct hsi_port *port = hsi_get_port(msg->cl); 283 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 284 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 285 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 286 u32 val; 287 288 pm_runtime_get(omap_port->pdev); 289 290 if (!pm_runtime_active(omap_port->pdev)) { 291 dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n"); 292 pm_runtime_put_autosuspend(omap_port->pdev); 293 return -EREMOTEIO; 294 } 295 296 if (msg->ttype == HSI_MSG_WRITE) { 297 val = SSI_DATAACCEPT(msg->channel); 298 /* Hold clocks for pio writes */ 299 pm_runtime_get(omap_port->pdev); 300 } else { 301 val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; 302 } 303 dev_dbg(&port->device, "Single %s transfer\n", 304 msg->ttype ? "write" : "read"); 305 val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 306 writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 307 pm_runtime_put_autosuspend(omap_port->pdev); 308 msg->actual_len = 0; 309 msg->status = HSI_STATUS_PROCEEDING; 310 311 return 0; 312 } 313 314 static int ssi_start_transfer(struct list_head *queue) 315 { 316 struct hsi_msg *msg; 317 int lch = -1; 318 319 if (list_empty(queue)) 320 return 0; 321 msg = list_first_entry(queue, struct hsi_msg, link); 322 if (msg->status != HSI_STATUS_QUEUED) 323 return 0; 324 if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32))) 325 lch = ssi_claim_lch(msg); 326 if (lch >= 0) 327 return ssi_start_dma(msg, lch); 328 else 329 return ssi_start_pio(msg); 330 } 331 332 static int ssi_async_break(struct hsi_msg *msg) 333 { 334 struct hsi_port *port = hsi_get_port(msg->cl); 335 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 336 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 337 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 338 int err = 0; 339 u32 tmp; 340 341 pm_runtime_get_sync(omap_port->pdev); 342 if (msg->ttype == HSI_MSG_WRITE) { 343 if (omap_port->sst.mode != SSI_MODE_FRAME) { 344 err = -EINVAL; 345 goto out; 346 } 347 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); 348 msg->status = HSI_STATUS_COMPLETED; 349 msg->complete(msg); 350 } else { 351 if (omap_port->ssr.mode != SSI_MODE_FRAME) { 352 err = -EINVAL; 353 goto out; 354 } 355 spin_lock_bh(&omap_port->lock); 356 tmp = readl(omap_ssi->sys + 357 SSI_MPU_ENABLE_REG(port->num, 0)); 358 writel(tmp | SSI_BREAKDETECTED, 359 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 360 msg->status = HSI_STATUS_PROCEEDING; 361 list_add_tail(&msg->link, &omap_port->brkqueue); 362 spin_unlock_bh(&omap_port->lock); 363 } 364 out: 365 pm_runtime_put_autosuspend(omap_port->pdev); 366 367 return err; 368 } 369 370 static int ssi_async(struct hsi_msg *msg) 371 { 372 struct hsi_port *port = hsi_get_port(msg->cl); 373 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 374 struct list_head *queue; 375 int err = 0; 376 377 BUG_ON(!msg); 378 379 if (msg->sgt.nents > 1) 380 return -ENOSYS; /* TODO: Add sg support */ 381 382 if (msg->break_frame) 383 return ssi_async_break(msg); 384 385 if (msg->ttype) { 386 BUG_ON(msg->channel >= omap_port->sst.channels); 387 queue = &omap_port->txqueue[msg->channel]; 388 } else { 389 BUG_ON(msg->channel >= omap_port->ssr.channels); 390 queue = &omap_port->rxqueue[msg->channel]; 391 } 392 msg->status = HSI_STATUS_QUEUED; 393 394 pm_runtime_get_sync(omap_port->pdev); 395 spin_lock_bh(&omap_port->lock); 396 list_add_tail(&msg->link, queue); 397 err = ssi_start_transfer(queue); 398 if (err < 0) { 399 list_del(&msg->link); 400 msg->status = HSI_STATUS_ERROR; 401 } 402 spin_unlock_bh(&omap_port->lock); 403 pm_runtime_put_autosuspend(omap_port->pdev); 404 dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", 405 msg->status, msg->ttype, msg->channel); 406 407 return err; 408 } 409 410 static u32 ssi_calculate_div(struct hsi_controller *ssi) 411 { 412 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 413 u32 tx_fckrate = (u32) omap_ssi->fck_rate; 414 415 /* / 2 : SSI TX clock is always half of the SSI functional clock */ 416 tx_fckrate >>= 1; 417 /* Round down when tx_fckrate % omap_ssi->max_speed == 0 */ 418 tx_fckrate--; 419 dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n", 420 tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate, 421 omap_ssi->max_speed); 422 423 return tx_fckrate / omap_ssi->max_speed; 424 } 425 426 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl) 427 { 428 struct list_head *node, *tmp; 429 struct hsi_msg *msg; 430 431 list_for_each_safe(node, tmp, queue) { 432 msg = list_entry(node, struct hsi_msg, link); 433 if ((cl) && (cl != msg->cl)) 434 continue; 435 list_del(node); 436 pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n", 437 msg->channel, msg, msg->sgt.sgl->length, 438 msg->ttype, msg->context); 439 if (msg->destructor) 440 msg->destructor(msg); 441 else 442 hsi_free_msg(msg); 443 } 444 } 445 446 static int ssi_setup(struct hsi_client *cl) 447 { 448 struct hsi_port *port = to_hsi_port(cl->device.parent); 449 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 450 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 451 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 452 void __iomem *sst = omap_port->sst_base; 453 void __iomem *ssr = omap_port->ssr_base; 454 u32 div; 455 int err = 0; 456 457 pm_runtime_get_sync(omap_port->pdev); 458 spin_lock_bh(&omap_port->lock); 459 if (cl->tx_cfg.speed) 460 omap_ssi->max_speed = cl->tx_cfg.speed; 461 div = ssi_calculate_div(ssi); 462 if (div > SSI_MAX_DIVISOR) { 463 dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n", 464 cl->tx_cfg.speed, div); 465 err = -EINVAL; 466 goto out; 467 } 468 /* Set TX/RX module to sleep to stop TX/RX during cfg update */ 469 writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG); 470 writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG); 471 /* Flush posted write */ 472 readl(ssr + SSI_SSR_MODE_REG); 473 /* TX */ 474 writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG); 475 writel_relaxed(div, sst + SSI_SST_DIVISOR_REG); 476 writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG); 477 writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG); 478 writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG); 479 /* RX */ 480 writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG); 481 writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG); 482 writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG); 483 /* Cleanup the break queue if we leave FRAME mode */ 484 if ((omap_port->ssr.mode == SSI_MODE_FRAME) && 485 (cl->rx_cfg.mode != SSI_MODE_FRAME)) 486 ssi_flush_queue(&omap_port->brkqueue, cl); 487 writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG); 488 omap_port->channels = max(cl->rx_cfg.num_hw_channels, 489 cl->tx_cfg.num_hw_channels); 490 /* Shadow registering for OFF mode */ 491 /* SST */ 492 omap_port->sst.divisor = div; 493 omap_port->sst.frame_size = 31; 494 omap_port->sst.channels = cl->tx_cfg.num_hw_channels; 495 omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; 496 omap_port->sst.mode = cl->tx_cfg.mode; 497 /* SSR */ 498 omap_port->ssr.frame_size = 31; 499 omap_port->ssr.timeout = 0; 500 omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; 501 omap_port->ssr.mode = cl->rx_cfg.mode; 502 out: 503 spin_unlock_bh(&omap_port->lock); 504 pm_runtime_put_autosuspend(omap_port->pdev); 505 506 return err; 507 } 508 509 static int ssi_flush(struct hsi_client *cl) 510 { 511 struct hsi_port *port = hsi_get_port(cl); 512 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 513 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 514 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 515 struct hsi_msg *msg; 516 void __iomem *sst = omap_port->sst_base; 517 void __iomem *ssr = omap_port->ssr_base; 518 unsigned int i; 519 u32 err; 520 521 pm_runtime_get_sync(omap_port->pdev); 522 spin_lock_bh(&omap_port->lock); 523 524 /* stop all ssi communication */ 525 pinctrl_pm_select_idle_state(omap_port->pdev); 526 udelay(1); /* wait for racing frames */ 527 528 /* Stop all DMA transfers */ 529 for (i = 0; i < SSI_MAX_GDD_LCH; i++) { 530 msg = omap_ssi->gdd_trn[i].msg; 531 if (!msg || (port != hsi_get_port(msg->cl))) 532 continue; 533 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 534 if (msg->ttype == HSI_MSG_READ) 535 pm_runtime_put_autosuspend(omap_port->pdev); 536 omap_ssi->gdd_trn[i].msg = NULL; 537 } 538 /* Flush all SST buffers */ 539 writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG); 540 writel_relaxed(0, sst + SSI_SST_TXSTATE_REG); 541 /* Flush all SSR buffers */ 542 writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG); 543 writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG); 544 /* Flush all errors */ 545 err = readl(ssr + SSI_SSR_ERROR_REG); 546 writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG); 547 /* Flush break */ 548 writel_relaxed(0, ssr + SSI_SSR_BREAK_REG); 549 /* Clear interrupts */ 550 writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 551 writel_relaxed(0xffffff00, 552 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 553 writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 554 writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); 555 /* Dequeue all pending requests */ 556 for (i = 0; i < omap_port->channels; i++) { 557 /* Release write clocks */ 558 if (!list_empty(&omap_port->txqueue[i])) 559 pm_runtime_put_autosuspend(omap_port->pdev); 560 ssi_flush_queue(&omap_port->txqueue[i], NULL); 561 ssi_flush_queue(&omap_port->rxqueue[i], NULL); 562 } 563 ssi_flush_queue(&omap_port->brkqueue, NULL); 564 565 /* Resume SSI communication */ 566 pinctrl_pm_select_default_state(omap_port->pdev); 567 568 spin_unlock_bh(&omap_port->lock); 569 pm_runtime_put_autosuspend(omap_port->pdev); 570 571 return 0; 572 } 573 574 static void start_tx_work(struct work_struct *work) 575 { 576 struct omap_ssi_port *omap_port = 577 container_of(work, struct omap_ssi_port, work); 578 struct hsi_port *port = to_hsi_port(omap_port->dev); 579 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 580 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 581 582 pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ 583 writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); 584 } 585 586 static int ssi_start_tx(struct hsi_client *cl) 587 { 588 struct hsi_port *port = hsi_get_port(cl); 589 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 590 591 dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); 592 593 spin_lock_bh(&omap_port->wk_lock); 594 if (omap_port->wk_refcount++) { 595 spin_unlock_bh(&omap_port->wk_lock); 596 return 0; 597 } 598 spin_unlock_bh(&omap_port->wk_lock); 599 600 schedule_work(&omap_port->work); 601 602 return 0; 603 } 604 605 static int ssi_stop_tx(struct hsi_client *cl) 606 { 607 struct hsi_port *port = hsi_get_port(cl); 608 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 609 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 610 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 611 612 dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); 613 614 spin_lock_bh(&omap_port->wk_lock); 615 BUG_ON(!omap_port->wk_refcount); 616 if (--omap_port->wk_refcount) { 617 spin_unlock_bh(&omap_port->wk_lock); 618 return 0; 619 } 620 writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); 621 spin_unlock_bh(&omap_port->wk_lock); 622 623 pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ 624 625 626 return 0; 627 } 628 629 static void ssi_transfer(struct omap_ssi_port *omap_port, 630 struct list_head *queue) 631 { 632 struct hsi_msg *msg; 633 int err = -1; 634 635 pm_runtime_get(omap_port->pdev); 636 spin_lock_bh(&omap_port->lock); 637 while (err < 0) { 638 err = ssi_start_transfer(queue); 639 if (err < 0) { 640 msg = list_first_entry(queue, struct hsi_msg, link); 641 msg->status = HSI_STATUS_ERROR; 642 msg->actual_len = 0; 643 list_del(&msg->link); 644 spin_unlock_bh(&omap_port->lock); 645 msg->complete(msg); 646 spin_lock_bh(&omap_port->lock); 647 } 648 } 649 spin_unlock_bh(&omap_port->lock); 650 pm_runtime_put_autosuspend(omap_port->pdev); 651 } 652 653 static void ssi_cleanup_queues(struct hsi_client *cl) 654 { 655 struct hsi_port *port = hsi_get_port(cl); 656 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 657 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 658 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 659 struct hsi_msg *msg; 660 unsigned int i; 661 u32 rxbufstate = 0; 662 u32 txbufstate = 0; 663 u32 status = SSI_ERROROCCURED; 664 u32 tmp; 665 666 ssi_flush_queue(&omap_port->brkqueue, cl); 667 if (list_empty(&omap_port->brkqueue)) 668 status |= SSI_BREAKDETECTED; 669 670 for (i = 0; i < omap_port->channels; i++) { 671 if (list_empty(&omap_port->txqueue[i])) 672 continue; 673 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, 674 link); 675 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { 676 txbufstate |= (1 << i); 677 status |= SSI_DATAACCEPT(i); 678 /* Release the clocks writes, also GDD ones */ 679 pm_runtime_put_autosuspend(omap_port->pdev); 680 } 681 ssi_flush_queue(&omap_port->txqueue[i], cl); 682 } 683 for (i = 0; i < omap_port->channels; i++) { 684 if (list_empty(&omap_port->rxqueue[i])) 685 continue; 686 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, 687 link); 688 if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) { 689 rxbufstate |= (1 << i); 690 status |= SSI_DATAAVAILABLE(i); 691 } 692 ssi_flush_queue(&omap_port->rxqueue[i], cl); 693 /* Check if we keep the error detection interrupt armed */ 694 if (!list_empty(&omap_port->rxqueue[i])) 695 status &= ~SSI_ERROROCCURED; 696 } 697 /* Cleanup write buffers */ 698 tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); 699 tmp &= ~txbufstate; 700 writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); 701 /* Cleanup read buffers */ 702 tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); 703 tmp &= ~rxbufstate; 704 writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); 705 /* Disarm and ack pending interrupts */ 706 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 707 tmp &= ~status; 708 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 709 writel_relaxed(status, omap_ssi->sys + 710 SSI_MPU_STATUS_REG(port->num, 0)); 711 } 712 713 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) 714 { 715 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 716 struct hsi_port *port = hsi_get_port(cl); 717 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 718 struct hsi_msg *msg; 719 unsigned int i; 720 u32 val = 0; 721 u32 tmp; 722 723 for (i = 0; i < SSI_MAX_GDD_LCH; i++) { 724 msg = omap_ssi->gdd_trn[i].msg; 725 if ((!msg) || (msg->cl != cl)) 726 continue; 727 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 728 val |= (1 << i); 729 /* 730 * Clock references for write will be handled in 731 * ssi_cleanup_queues 732 */ 733 if (msg->ttype == HSI_MSG_READ) { 734 pm_runtime_put_autosuspend(omap_port->pdev); 735 } 736 omap_ssi->gdd_trn[i].msg = NULL; 737 } 738 tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 739 tmp &= ~val; 740 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 741 writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG); 742 } 743 744 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) 745 { 746 writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); 747 writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); 748 /* OCP barrier */ 749 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); 750 751 return 0; 752 } 753 754 static int ssi_release(struct hsi_client *cl) 755 { 756 struct hsi_port *port = hsi_get_port(cl); 757 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 758 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 759 760 pm_runtime_get_sync(omap_port->pdev); 761 spin_lock_bh(&omap_port->lock); 762 /* Stop all the pending DMA requests for that client */ 763 ssi_cleanup_gdd(ssi, cl); 764 /* Now cleanup all the queues */ 765 ssi_cleanup_queues(cl); 766 /* If it is the last client of the port, do extra checks and cleanup */ 767 if (port->claimed <= 1) { 768 /* 769 * Drop the clock reference for the incoming wake line 770 * if it is still kept high by the other side. 771 */ 772 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) 773 pm_runtime_put_sync(omap_port->pdev); 774 pm_runtime_get(omap_port->pdev); 775 /* Stop any SSI TX/RX without a client */ 776 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); 777 omap_port->sst.mode = SSI_MODE_SLEEP; 778 omap_port->ssr.mode = SSI_MODE_SLEEP; 779 pm_runtime_put(omap_port->pdev); 780 WARN_ON(omap_port->wk_refcount != 0); 781 } 782 spin_unlock_bh(&omap_port->lock); 783 pm_runtime_put_sync(omap_port->pdev); 784 785 return 0; 786 } 787 788 789 790 static void ssi_error(struct hsi_port *port) 791 { 792 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 793 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 794 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 795 struct hsi_msg *msg; 796 unsigned int i; 797 u32 err; 798 u32 val; 799 u32 tmp; 800 801 /* ACK error */ 802 err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); 803 dev_err(&port->device, "SSI error: 0x%02x\n", err); 804 if (!err) { 805 dev_dbg(&port->device, "spurious SSI error ignored!\n"); 806 return; 807 } 808 spin_lock(&omap_ssi->lock); 809 /* Cancel all GDD read transfers */ 810 for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) { 811 msg = omap_ssi->gdd_trn[i].msg; 812 if ((msg) && (msg->ttype == HSI_MSG_READ)) { 813 writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); 814 val |= (1 << i); 815 omap_ssi->gdd_trn[i].msg = NULL; 816 } 817 } 818 tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 819 tmp &= ~val; 820 writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); 821 spin_unlock(&omap_ssi->lock); 822 /* Cancel all PIO read transfers */ 823 spin_lock(&omap_port->lock); 824 tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 825 tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */ 826 writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 827 /* ACK error */ 828 writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); 829 writel_relaxed(SSI_ERROROCCURED, 830 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 831 /* Signal the error all current pending read requests */ 832 for (i = 0; i < omap_port->channels; i++) { 833 if (list_empty(&omap_port->rxqueue[i])) 834 continue; 835 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, 836 link); 837 list_del(&msg->link); 838 msg->status = HSI_STATUS_ERROR; 839 spin_unlock(&omap_port->lock); 840 msg->complete(msg); 841 /* Now restart queued reads if any */ 842 ssi_transfer(omap_port, &omap_port->rxqueue[i]); 843 spin_lock(&omap_port->lock); 844 } 845 spin_unlock(&omap_port->lock); 846 } 847 848 static void ssi_break_complete(struct hsi_port *port) 849 { 850 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 851 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 852 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 853 struct hsi_msg *msg; 854 struct hsi_msg *tmp; 855 u32 val; 856 857 dev_dbg(&port->device, "HWBREAK received\n"); 858 859 spin_lock(&omap_port->lock); 860 val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 861 val &= ~SSI_BREAKDETECTED; 862 writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 863 writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); 864 writel(SSI_BREAKDETECTED, 865 omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 866 spin_unlock(&omap_port->lock); 867 868 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { 869 msg->status = HSI_STATUS_COMPLETED; 870 spin_lock(&omap_port->lock); 871 list_del(&msg->link); 872 spin_unlock(&omap_port->lock); 873 msg->complete(msg); 874 } 875 876 } 877 878 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) 879 { 880 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 881 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 882 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 883 struct hsi_msg *msg; 884 u32 *buf; 885 u32 reg; 886 u32 val; 887 888 spin_lock_bh(&omap_port->lock); 889 msg = list_first_entry(queue, struct hsi_msg, link); 890 if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { 891 msg->actual_len = 0; 892 msg->status = HSI_STATUS_PENDING; 893 } 894 if (msg->ttype == HSI_MSG_WRITE) 895 val = SSI_DATAACCEPT(msg->channel); 896 else 897 val = SSI_DATAAVAILABLE(msg->channel); 898 if (msg->status == HSI_STATUS_PROCEEDING) { 899 buf = sg_virt(msg->sgt.sgl) + msg->actual_len; 900 if (msg->ttype == HSI_MSG_WRITE) 901 writel(*buf, omap_port->sst_base + 902 SSI_SST_BUFFER_CH_REG(msg->channel)); 903 else 904 *buf = readl(omap_port->ssr_base + 905 SSI_SSR_BUFFER_CH_REG(msg->channel)); 906 dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel, 907 msg->ttype, *buf); 908 msg->actual_len += sizeof(*buf); 909 if (msg->actual_len >= msg->sgt.sgl->length) 910 msg->status = HSI_STATUS_COMPLETED; 911 /* 912 * Wait for the last written frame to be really sent before 913 * we call the complete callback 914 */ 915 if ((msg->status == HSI_STATUS_PROCEEDING) || 916 ((msg->status == HSI_STATUS_COMPLETED) && 917 (msg->ttype == HSI_MSG_WRITE))) { 918 writel(val, omap_ssi->sys + 919 SSI_MPU_STATUS_REG(port->num, 0)); 920 spin_unlock_bh(&omap_port->lock); 921 922 return; 923 } 924 925 } 926 /* Transfer completed at this point */ 927 reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 928 if (msg->ttype == HSI_MSG_WRITE) { 929 /* Release clocks for write transfer */ 930 pm_runtime_put_autosuspend(omap_port->pdev); 931 } 932 reg &= ~val; 933 writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 934 writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); 935 list_del(&msg->link); 936 spin_unlock_bh(&omap_port->lock); 937 msg->complete(msg); 938 ssi_transfer(omap_port, queue); 939 } 940 941 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port) 942 { 943 struct hsi_port *port = (struct hsi_port *)ssi_port; 944 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 945 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 946 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 947 void __iomem *sys = omap_ssi->sys; 948 unsigned int ch; 949 u32 status_reg; 950 951 pm_runtime_get_sync(omap_port->pdev); 952 953 do { 954 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); 955 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); 956 957 for (ch = 0; ch < omap_port->channels; ch++) { 958 if (status_reg & SSI_DATAACCEPT(ch)) 959 ssi_pio_complete(port, &omap_port->txqueue[ch]); 960 if (status_reg & SSI_DATAAVAILABLE(ch)) 961 ssi_pio_complete(port, &omap_port->rxqueue[ch]); 962 } 963 if (status_reg & SSI_BREAKDETECTED) 964 ssi_break_complete(port); 965 if (status_reg & SSI_ERROROCCURED) 966 ssi_error(port); 967 968 status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); 969 status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); 970 971 /* TODO: sleep if we retry? */ 972 } while (status_reg); 973 974 pm_runtime_put_autosuspend(omap_port->pdev); 975 976 return IRQ_HANDLED; 977 } 978 979 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port) 980 { 981 struct hsi_port *port = (struct hsi_port *)ssi_port; 982 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 983 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 984 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 985 986 if (ssi_wakein(port)) { 987 /** 988 * We can have a quick High-Low-High transition in the line. 989 * In such a case if we have long interrupt latencies, 990 * we can miss the low event or get twice a high event. 991 * This workaround will avoid breaking the clock reference 992 * count when such a situation ocurrs. 993 */ 994 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) 995 pm_runtime_get_sync(omap_port->pdev); 996 dev_dbg(&ssi->device, "Wake in high\n"); 997 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ 998 writel(SSI_WAKE(0), 999 omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); 1000 } 1001 hsi_event(port, HSI_EVENT_START_RX); 1002 } else { 1003 dev_dbg(&ssi->device, "Wake in low\n"); 1004 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ 1005 writel(SSI_WAKE(0), 1006 omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); 1007 } 1008 hsi_event(port, HSI_EVENT_STOP_RX); 1009 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { 1010 pm_runtime_put_autosuspend(omap_port->pdev); 1011 } 1012 } 1013 1014 return IRQ_HANDLED; 1015 } 1016 1017 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd) 1018 { 1019 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1020 int err; 1021 1022 err = platform_get_irq(pd, 0); 1023 if (err < 0) 1024 return err; 1025 omap_port->irq = err; 1026 err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, 1027 ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port); 1028 if (err < 0) 1029 dev_err(&port->device, "Request IRQ %d failed (%d)\n", 1030 omap_port->irq, err); 1031 return err; 1032 } 1033 1034 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) 1035 { 1036 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1037 int cawake_irq; 1038 int err; 1039 1040 if (!omap_port->wake_gpio) { 1041 omap_port->wake_irq = -1; 1042 return 0; 1043 } 1044 1045 cawake_irq = gpiod_to_irq(omap_port->wake_gpio); 1046 omap_port->wake_irq = cawake_irq; 1047 1048 err = devm_request_threaded_irq(&port->device, cawake_irq, NULL, 1049 ssi_wake_thread, 1050 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, 1051 "SSI cawake", port); 1052 if (err < 0) 1053 dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", 1054 cawake_irq, err); 1055 err = enable_irq_wake(cawake_irq); 1056 if (err < 0) 1057 dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n", 1058 cawake_irq, err); 1059 1060 return err; 1061 } 1062 1063 static void ssi_queues_init(struct omap_ssi_port *omap_port) 1064 { 1065 unsigned int ch; 1066 1067 for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) { 1068 INIT_LIST_HEAD(&omap_port->txqueue[ch]); 1069 INIT_LIST_HEAD(&omap_port->rxqueue[ch]); 1070 } 1071 INIT_LIST_HEAD(&omap_port->brkqueue); 1072 } 1073 1074 static int ssi_port_get_iomem(struct platform_device *pd, 1075 const char *name, void __iomem **pbase, dma_addr_t *phy) 1076 { 1077 struct hsi_port *port = platform_get_drvdata(pd); 1078 struct resource *mem; 1079 struct resource *ioarea; 1080 void __iomem *base; 1081 1082 mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name); 1083 if (!mem) { 1084 dev_err(&pd->dev, "IO memory region missing (%s)\n", name); 1085 return -ENXIO; 1086 } 1087 ioarea = devm_request_mem_region(&port->device, mem->start, 1088 resource_size(mem), dev_name(&pd->dev)); 1089 if (!ioarea) { 1090 dev_err(&pd->dev, "%s IO memory region request failed\n", 1091 mem->name); 1092 return -ENXIO; 1093 } 1094 base = devm_ioremap(&port->device, mem->start, resource_size(mem)); 1095 if (!base) { 1096 dev_err(&pd->dev, "%s IO remap failed\n", mem->name); 1097 return -ENXIO; 1098 } 1099 *pbase = base; 1100 1101 if (phy) 1102 *phy = mem->start; 1103 1104 return 0; 1105 } 1106 1107 static int ssi_port_probe(struct platform_device *pd) 1108 { 1109 struct device_node *np = pd->dev.of_node; 1110 struct hsi_port *port; 1111 struct omap_ssi_port *omap_port; 1112 struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent); 1113 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1114 struct gpio_desc *cawake_gpio = NULL; 1115 u32 port_id; 1116 int err; 1117 1118 dev_dbg(&pd->dev, "init ssi port...\n"); 1119 1120 if (!omap_ssi->port) { 1121 dev_err(&pd->dev, "ssi controller not initialized!\n"); 1122 err = -ENODEV; 1123 goto error; 1124 } 1125 1126 /* get id of first uninitialized port in controller */ 1127 for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id]; 1128 port_id++) 1129 ; 1130 1131 if (port_id >= ssi->num_ports) { 1132 dev_err(&pd->dev, "port id out of range!\n"); 1133 err = -ENODEV; 1134 goto error; 1135 } 1136 1137 port = ssi->port[port_id]; 1138 1139 if (!np) { 1140 dev_err(&pd->dev, "missing device tree data\n"); 1141 err = -EINVAL; 1142 goto error; 1143 } 1144 1145 cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN); 1146 if (IS_ERR(cawake_gpio)) { 1147 err = PTR_ERR(cawake_gpio); 1148 dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err); 1149 goto error; 1150 } 1151 1152 omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); 1153 if (!omap_port) { 1154 err = -ENOMEM; 1155 goto error; 1156 } 1157 omap_port->wake_gpio = cawake_gpio; 1158 omap_port->pdev = &pd->dev; 1159 omap_port->port_id = port_id; 1160 1161 INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); 1162 INIT_WORK(&omap_port->work, start_tx_work); 1163 1164 /* initialize HSI port */ 1165 port->async = ssi_async; 1166 port->setup = ssi_setup; 1167 port->flush = ssi_flush; 1168 port->start_tx = ssi_start_tx; 1169 port->stop_tx = ssi_stop_tx; 1170 port->release = ssi_release; 1171 hsi_port_set_drvdata(port, omap_port); 1172 omap_ssi->port[port_id] = omap_port; 1173 1174 platform_set_drvdata(pd, port); 1175 1176 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, 1177 &omap_port->sst_dma); 1178 if (err < 0) 1179 goto error; 1180 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, 1181 &omap_port->ssr_dma); 1182 if (err < 0) 1183 goto error; 1184 1185 err = ssi_port_irq(port, pd); 1186 if (err < 0) 1187 goto error; 1188 err = ssi_wake_irq(port, pd); 1189 if (err < 0) 1190 goto error; 1191 1192 ssi_queues_init(omap_port); 1193 spin_lock_init(&omap_port->lock); 1194 spin_lock_init(&omap_port->wk_lock); 1195 omap_port->dev = &port->device; 1196 1197 pm_runtime_use_autosuspend(omap_port->pdev); 1198 pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); 1199 pm_runtime_enable(omap_port->pdev); 1200 1201 #ifdef CONFIG_DEBUG_FS 1202 ssi_debug_add_port(omap_port, omap_ssi->dir); 1203 #endif 1204 1205 hsi_add_clients_from_dt(port, np); 1206 1207 dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id); 1208 1209 return 0; 1210 1211 error: 1212 return err; 1213 } 1214 1215 static void ssi_port_remove(struct platform_device *pd) 1216 { 1217 struct hsi_port *port = platform_get_drvdata(pd); 1218 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1219 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1220 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1221 1222 #ifdef CONFIG_DEBUG_FS 1223 ssi_debug_remove_port(port); 1224 #endif 1225 1226 cancel_delayed_work_sync(&omap_port->errqueue_work); 1227 1228 hsi_port_unregister_clients(port); 1229 1230 port->async = hsi_dummy_msg; 1231 port->setup = hsi_dummy_cl; 1232 port->flush = hsi_dummy_cl; 1233 port->start_tx = hsi_dummy_cl; 1234 port->stop_tx = hsi_dummy_cl; 1235 port->release = hsi_dummy_cl; 1236 1237 omap_ssi->port[omap_port->port_id] = NULL; 1238 platform_set_drvdata(pd, NULL); 1239 1240 pm_runtime_dont_use_autosuspend(&pd->dev); 1241 pm_runtime_disable(&pd->dev); 1242 } 1243 1244 static int ssi_restore_divisor(struct omap_ssi_port *omap_port) 1245 { 1246 writel_relaxed(omap_port->sst.divisor, 1247 omap_port->sst_base + SSI_SST_DIVISOR_REG); 1248 1249 return 0; 1250 } 1251 1252 void omap_ssi_port_update_fclk(struct hsi_controller *ssi, 1253 struct omap_ssi_port *omap_port) 1254 { 1255 /* update divisor */ 1256 u32 div = ssi_calculate_div(ssi); 1257 omap_port->sst.divisor = div; 1258 ssi_restore_divisor(omap_port); 1259 } 1260 1261 #ifdef CONFIG_PM 1262 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) 1263 { 1264 struct hsi_port *port = to_hsi_port(omap_port->dev); 1265 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1266 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1267 1268 omap_port->sys_mpu_enable = readl(omap_ssi->sys + 1269 SSI_MPU_ENABLE_REG(port->num, 0)); 1270 1271 return 0; 1272 } 1273 1274 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) 1275 { 1276 struct hsi_port *port = to_hsi_port(omap_port->dev); 1277 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1278 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1279 void __iomem *base; 1280 1281 writel_relaxed(omap_port->sys_mpu_enable, 1282 omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); 1283 1284 /* SST context */ 1285 base = omap_port->sst_base; 1286 writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); 1287 writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); 1288 writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); 1289 1290 /* SSR context */ 1291 base = omap_port->ssr_base; 1292 writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); 1293 writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); 1294 writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); 1295 1296 return 0; 1297 } 1298 1299 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) 1300 { 1301 writel_relaxed(omap_port->sst.mode, 1302 omap_port->sst_base + SSI_SST_MODE_REG); 1303 writel_relaxed(omap_port->ssr.mode, 1304 omap_port->ssr_base + SSI_SSR_MODE_REG); 1305 /* OCP barrier */ 1306 readl(omap_port->ssr_base + SSI_SSR_MODE_REG); 1307 1308 return 0; 1309 } 1310 1311 static int omap_ssi_port_runtime_suspend(struct device *dev) 1312 { 1313 struct hsi_port *port = dev_get_drvdata(dev); 1314 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1315 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1316 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1317 1318 dev_dbg(dev, "port runtime suspend!\n"); 1319 1320 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); 1321 if (omap_ssi->get_loss) 1322 omap_port->loss_count = 1323 omap_ssi->get_loss(ssi->device.parent); 1324 ssi_save_port_ctx(omap_port); 1325 1326 return 0; 1327 } 1328 1329 static int omap_ssi_port_runtime_resume(struct device *dev) 1330 { 1331 struct hsi_port *port = dev_get_drvdata(dev); 1332 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); 1333 struct hsi_controller *ssi = to_hsi_controller(port->device.parent); 1334 struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); 1335 1336 dev_dbg(dev, "port runtime resume!\n"); 1337 1338 if ((omap_ssi->get_loss) && (omap_port->loss_count == 1339 omap_ssi->get_loss(ssi->device.parent))) 1340 goto mode; /* We always need to restore the mode & TX divisor */ 1341 1342 ssi_restore_port_ctx(omap_port); 1343 1344 mode: 1345 ssi_restore_divisor(omap_port); 1346 ssi_restore_port_mode(omap_port); 1347 1348 return 0; 1349 } 1350 1351 static const struct dev_pm_ops omap_ssi_port_pm_ops = { 1352 SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend, 1353 omap_ssi_port_runtime_resume, NULL) 1354 }; 1355 1356 #define DEV_PM_OPS (&omap_ssi_port_pm_ops) 1357 #else 1358 #define DEV_PM_OPS NULL 1359 #endif 1360 1361 1362 #ifdef CONFIG_OF 1363 static const struct of_device_id omap_ssi_port_of_match[] = { 1364 { .compatible = "ti,omap3-ssi-port", }, 1365 {}, 1366 }; 1367 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match); 1368 #else 1369 #define omap_ssi_port_of_match NULL 1370 #endif 1371 1372 struct platform_driver ssi_port_pdriver = { 1373 .probe = ssi_port_probe, 1374 .remove = ssi_port_remove, 1375 .driver = { 1376 .name = "omap_ssi_port", 1377 .of_match_table = omap_ssi_port_of_match, 1378 .pm = DEV_PM_OPS, 1379 }, 1380 }; 1381