Lines Matching refs:omap_port
35 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_wakein() local
36 return gpiod_get_value(omap_port->wake_gpio); in ssi_wakein()
42 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_debug_remove_port() local
44 debugfs_remove_recursive(omap_port->dir); in ssi_debug_remove_port()
50 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_regs_show() local
56 pm_runtime_get_sync(omap_port->pdev); in ssi_port_regs_show()
57 if (omap_port->wake_irq > 0) in ssi_port_regs_show()
66 base = omap_port->sst_base; in ssi_port_regs_show()
86 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_port_regs_show()
91 base = omap_port->ssr_base; in ssi_port_regs_show()
113 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_port_regs_show()
117 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_port_regs_show()
127 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_div_get() local
129 pm_runtime_get_sync(omap_port->pdev); in ssi_div_get()
130 *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_div_get()
131 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_div_get()
139 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_div_set() local
144 pm_runtime_get_sync(omap_port->pdev); in ssi_div_set()
145 writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_div_set()
146 omap_port->sst.divisor = val; in ssi_div_set()
147 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_div_set()
154 static void ssi_debug_add_port(struct omap_ssi_port *omap_port, in ssi_debug_add_port() argument
157 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_debug_add_port()
159 dir = debugfs_create_dir(dev_name(omap_port->dev), dir); in ssi_debug_add_port()
160 omap_port->dir = dir; in ssi_debug_add_port()
170 struct omap_ssi_port *omap_port; in ssi_process_errqueue() local
174 omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); in ssi_process_errqueue()
176 list_for_each_safe(head, tmp, &omap_port->errqueue) { in ssi_process_errqueue()
204 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_dma() local
216 pm_runtime_get(omap_port->pdev); in ssi_start_dma()
218 if (!pm_runtime_active(omap_port->pdev)) { in ssi_start_dma()
220 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
229 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
238 s_addr = omap_port->ssr_dma + in ssi_start_dma()
246 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_dma()
256 d_addr = omap_port->sst_dma + in ssi_start_dma()
283 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_pio() local
288 pm_runtime_get(omap_port->pdev); in ssi_start_pio()
290 if (!pm_runtime_active(omap_port->pdev)) { in ssi_start_pio()
292 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_pio()
299 pm_runtime_get(omap_port->pdev); in ssi_start_pio()
307 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_start_pio()
335 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_async_break() local
341 pm_runtime_get_sync(omap_port->pdev); in ssi_async_break()
343 if (omap_port->sst.mode != SSI_MODE_FRAME) { in ssi_async_break()
347 writel(1, omap_port->sst_base + SSI_SST_BREAK_REG); in ssi_async_break()
351 if (omap_port->ssr.mode != SSI_MODE_FRAME) { in ssi_async_break()
355 spin_lock_bh(&omap_port->lock); in ssi_async_break()
361 list_add_tail(&msg->link, &omap_port->brkqueue); in ssi_async_break()
362 spin_unlock_bh(&omap_port->lock); in ssi_async_break()
365 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_async_break()
366 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_async_break()
374 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_async() local
387 BUG_ON(msg->channel >= omap_port->sst.channels); in ssi_async()
388 queue = &omap_port->txqueue[msg->channel]; in ssi_async()
390 BUG_ON(msg->channel >= omap_port->ssr.channels); in ssi_async()
391 queue = &omap_port->rxqueue[msg->channel]; in ssi_async()
395 pm_runtime_get_sync(omap_port->pdev); in ssi_async()
396 spin_lock_bh(&omap_port->lock); in ssi_async()
403 spin_unlock_bh(&omap_port->lock); in ssi_async()
404 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_async()
405 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_async()
451 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_setup() local
454 void __iomem *sst = omap_port->sst_base; in ssi_setup()
455 void __iomem *ssr = omap_port->ssr_base; in ssi_setup()
460 pm_runtime_get_sync(omap_port->pdev); in ssi_setup()
461 spin_lock_bh(&omap_port->lock); in ssi_setup()
487 if ((omap_port->ssr.mode == SSI_MODE_FRAME) && in ssi_setup()
489 ssi_flush_queue(&omap_port->brkqueue, cl); in ssi_setup()
491 omap_port->channels = max(cl->rx_cfg.num_hw_channels, in ssi_setup()
495 omap_port->sst.divisor = div; in ssi_setup()
496 omap_port->sst.frame_size = 31; in ssi_setup()
497 omap_port->sst.channels = cl->tx_cfg.num_hw_channels; in ssi_setup()
498 omap_port->sst.arb_mode = cl->tx_cfg.arb_mode; in ssi_setup()
499 omap_port->sst.mode = cl->tx_cfg.mode; in ssi_setup()
501 omap_port->ssr.frame_size = 31; in ssi_setup()
502 omap_port->ssr.timeout = 0; in ssi_setup()
503 omap_port->ssr.channels = cl->rx_cfg.num_hw_channels; in ssi_setup()
504 omap_port->ssr.mode = cl->rx_cfg.mode; in ssi_setup()
506 spin_unlock_bh(&omap_port->lock); in ssi_setup()
507 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_setup()
508 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_setup()
516 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_flush() local
520 void __iomem *sst = omap_port->sst_base; in ssi_flush()
521 void __iomem *ssr = omap_port->ssr_base; in ssi_flush()
525 pm_runtime_get_sync(omap_port->pdev); in ssi_flush()
526 spin_lock_bh(&omap_port->lock); in ssi_flush()
529 pinctrl_pm_select_idle_state(omap_port->pdev); in ssi_flush()
539 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
560 for (i = 0; i < omap_port->channels; i++) { in ssi_flush()
562 if (!list_empty(&omap_port->txqueue[i])) in ssi_flush()
563 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
564 ssi_flush_queue(&omap_port->txqueue[i], NULL); in ssi_flush()
565 ssi_flush_queue(&omap_port->rxqueue[i], NULL); in ssi_flush()
567 ssi_flush_queue(&omap_port->brkqueue, NULL); in ssi_flush()
570 pinctrl_pm_select_default_state(omap_port->pdev); in ssi_flush()
572 spin_unlock_bh(&omap_port->lock); in ssi_flush()
573 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_flush()
574 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_flush()
581 struct omap_ssi_port *omap_port = in start_tx_work() local
583 struct hsi_port *port = to_hsi_port(omap_port->dev); in start_tx_work()
587 pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ in start_tx_work()
594 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_start_tx() local
596 dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); in ssi_start_tx()
598 spin_lock_bh(&omap_port->wk_lock); in ssi_start_tx()
599 if (omap_port->wk_refcount++) { in ssi_start_tx()
600 spin_unlock_bh(&omap_port->wk_lock); in ssi_start_tx()
603 spin_unlock_bh(&omap_port->wk_lock); in ssi_start_tx()
605 schedule_work(&omap_port->work); in ssi_start_tx()
613 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_stop_tx() local
617 dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount); in ssi_stop_tx()
619 spin_lock_bh(&omap_port->wk_lock); in ssi_stop_tx()
620 BUG_ON(!omap_port->wk_refcount); in ssi_stop_tx()
621 if (--omap_port->wk_refcount) { in ssi_stop_tx()
622 spin_unlock_bh(&omap_port->wk_lock); in ssi_stop_tx()
626 spin_unlock_bh(&omap_port->wk_lock); in ssi_stop_tx()
628 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_stop_tx()
629 pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ in ssi_stop_tx()
635 static void ssi_transfer(struct omap_ssi_port *omap_port, in ssi_transfer() argument
641 pm_runtime_get(omap_port->pdev); in ssi_transfer()
642 spin_lock_bh(&omap_port->lock); in ssi_transfer()
650 spin_unlock_bh(&omap_port->lock); in ssi_transfer()
652 spin_lock_bh(&omap_port->lock); in ssi_transfer()
655 spin_unlock_bh(&omap_port->lock); in ssi_transfer()
656 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_transfer()
657 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_transfer()
663 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_cleanup_queues() local
673 ssi_flush_queue(&omap_port->brkqueue, cl); in ssi_cleanup_queues()
674 if (list_empty(&omap_port->brkqueue)) in ssi_cleanup_queues()
677 for (i = 0; i < omap_port->channels; i++) { in ssi_cleanup_queues()
678 if (list_empty(&omap_port->txqueue[i])) in ssi_cleanup_queues()
680 msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg, in ssi_cleanup_queues()
686 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_cleanup_queues()
687 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_cleanup_queues()
689 ssi_flush_queue(&omap_port->txqueue[i], cl); in ssi_cleanup_queues()
691 for (i = 0; i < omap_port->channels; i++) { in ssi_cleanup_queues()
692 if (list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues()
694 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_cleanup_queues()
700 ssi_flush_queue(&omap_port->rxqueue[i], cl); in ssi_cleanup_queues()
702 if (!list_empty(&omap_port->rxqueue[i])) in ssi_cleanup_queues()
706 tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG); in ssi_cleanup_queues()
708 writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG); in ssi_cleanup_queues()
710 tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); in ssi_cleanup_queues()
712 writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG); in ssi_cleanup_queues()
725 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_cleanup_gdd() local
742 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_cleanup_gdd()
743 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_cleanup_gdd()
753 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode) in ssi_set_port_mode() argument
755 writel(mode, omap_port->sst_base + SSI_SST_MODE_REG); in ssi_set_port_mode()
756 writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_set_port_mode()
758 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_set_port_mode()
766 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_release() local
769 pm_runtime_get_sync(omap_port->pdev); in ssi_release()
770 spin_lock_bh(&omap_port->lock); in ssi_release()
781 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) in ssi_release()
782 pm_runtime_put_sync(omap_port->pdev); in ssi_release()
783 pm_runtime_get(omap_port->pdev); in ssi_release()
785 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); in ssi_release()
786 omap_port->sst.mode = SSI_MODE_SLEEP; in ssi_release()
787 omap_port->ssr.mode = SSI_MODE_SLEEP; in ssi_release()
788 pm_runtime_put(omap_port->pdev); in ssi_release()
789 WARN_ON(omap_port->wk_refcount != 0); in ssi_release()
791 spin_unlock_bh(&omap_port->lock); in ssi_release()
792 pm_runtime_put_sync(omap_port->pdev); in ssi_release()
801 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_error() local
811 err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG); in ssi_error()
832 spin_lock(&omap_port->lock); in ssi_error()
837 writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG); in ssi_error()
841 for (i = 0; i < omap_port->channels; i++) { in ssi_error()
842 if (list_empty(&omap_port->rxqueue[i])) in ssi_error()
844 msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg, in ssi_error()
848 spin_unlock(&omap_port->lock); in ssi_error()
851 ssi_transfer(omap_port, &omap_port->rxqueue[i]); in ssi_error()
852 spin_lock(&omap_port->lock); in ssi_error()
854 spin_unlock(&omap_port->lock); in ssi_error()
859 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_break_complete() local
868 spin_lock(&omap_port->lock); in ssi_break_complete()
872 writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG); in ssi_break_complete()
875 spin_unlock(&omap_port->lock); in ssi_break_complete()
877 list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) { in ssi_break_complete()
879 spin_lock(&omap_port->lock); in ssi_break_complete()
881 spin_unlock(&omap_port->lock); in ssi_break_complete()
891 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_pio_complete() local
897 spin_lock_bh(&omap_port->lock); in ssi_pio_complete()
910 writel(*buf, omap_port->sst_base + in ssi_pio_complete()
913 *buf = readl(omap_port->ssr_base + in ssi_pio_complete()
929 spin_unlock_bh(&omap_port->lock); in ssi_pio_complete()
939 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_pio_complete()
940 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_pio_complete()
946 spin_unlock_bh(&omap_port->lock); in ssi_pio_complete()
948 ssi_transfer(omap_port, queue); in ssi_pio_complete()
955 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_pio_thread() local
961 pm_runtime_get_sync(omap_port->pdev); in ssi_pio_thread()
967 for (ch = 0; ch < omap_port->channels; ch++) { in ssi_pio_thread()
969 ssi_pio_complete(port, &omap_port->txqueue[ch]); in ssi_pio_thread()
971 ssi_pio_complete(port, &omap_port->rxqueue[ch]); in ssi_pio_thread()
984 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_pio_thread()
985 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_pio_thread()
994 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_wake_thread() local
1005 if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) in ssi_wake_thread()
1006 pm_runtime_get_sync(omap_port->pdev); in ssi_wake_thread()
1008 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ in ssi_wake_thread()
1015 if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ in ssi_wake_thread()
1020 if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { in ssi_wake_thread()
1021 pm_runtime_mark_last_busy(omap_port->pdev); in ssi_wake_thread()
1022 pm_runtime_put_autosuspend(omap_port->pdev); in ssi_wake_thread()
1031 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_irq() local
1037 omap_port->irq = err; in ssi_port_irq()
1038 err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, in ssi_port_irq()
1042 omap_port->irq, err); in ssi_port_irq()
1048 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_wake_irq() local
1052 if (!omap_port->wake_gpio) { in ssi_wake_irq()
1053 omap_port->wake_irq = -1; in ssi_wake_irq()
1057 cawake_irq = gpiod_to_irq(omap_port->wake_gpio); in ssi_wake_irq()
1058 omap_port->wake_irq = cawake_irq; in ssi_wake_irq()
1075 static void ssi_queues_init(struct omap_ssi_port *omap_port) in ssi_queues_init() argument
1080 INIT_LIST_HEAD(&omap_port->txqueue[ch]); in ssi_queues_init()
1081 INIT_LIST_HEAD(&omap_port->rxqueue[ch]); in ssi_queues_init()
1083 INIT_LIST_HEAD(&omap_port->brkqueue); in ssi_queues_init()
1123 struct omap_ssi_port *omap_port; in ssi_port_probe() local
1164 omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL); in ssi_port_probe()
1165 if (!omap_port) { in ssi_port_probe()
1169 omap_port->wake_gpio = cawake_gpio; in ssi_port_probe()
1170 omap_port->pdev = &pd->dev; in ssi_port_probe()
1171 omap_port->port_id = port_id; in ssi_port_probe()
1173 INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); in ssi_port_probe()
1174 INIT_WORK(&omap_port->work, start_tx_work); in ssi_port_probe()
1183 hsi_port_set_drvdata(port, omap_port); in ssi_port_probe()
1184 omap_ssi->port[port_id] = omap_port; in ssi_port_probe()
1188 err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base, in ssi_port_probe()
1189 &omap_port->sst_dma); in ssi_port_probe()
1192 err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base, in ssi_port_probe()
1193 &omap_port->ssr_dma); in ssi_port_probe()
1204 ssi_queues_init(omap_port); in ssi_port_probe()
1205 spin_lock_init(&omap_port->lock); in ssi_port_probe()
1206 spin_lock_init(&omap_port->wk_lock); in ssi_port_probe()
1207 omap_port->dev = &port->device; in ssi_port_probe()
1209 pm_runtime_use_autosuspend(omap_port->pdev); in ssi_port_probe()
1210 pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); in ssi_port_probe()
1211 pm_runtime_enable(omap_port->pdev); in ssi_port_probe()
1214 ssi_debug_add_port(omap_port, omap_ssi->dir); in ssi_port_probe()
1230 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in ssi_port_remove() local
1238 cancel_delayed_work_sync(&omap_port->errqueue_work); in ssi_port_remove()
1249 omap_ssi->port[omap_port->port_id] = NULL; in ssi_port_remove()
1256 static int ssi_restore_divisor(struct omap_ssi_port *omap_port) in ssi_restore_divisor() argument
1258 writel_relaxed(omap_port->sst.divisor, in ssi_restore_divisor()
1259 omap_port->sst_base + SSI_SST_DIVISOR_REG); in ssi_restore_divisor()
1265 struct omap_ssi_port *omap_port) in omap_ssi_port_update_fclk() argument
1269 omap_port->sst.divisor = div; in omap_ssi_port_update_fclk()
1270 ssi_restore_divisor(omap_port); in omap_ssi_port_update_fclk()
1274 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port) in ssi_save_port_ctx() argument
1276 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_save_port_ctx()
1280 omap_port->sys_mpu_enable = readl(omap_ssi->sys + in ssi_save_port_ctx()
1286 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port) in ssi_restore_port_ctx() argument
1288 struct hsi_port *port = to_hsi_port(omap_port->dev); in ssi_restore_port_ctx()
1293 writel_relaxed(omap_port->sys_mpu_enable, in ssi_restore_port_ctx()
1297 base = omap_port->sst_base; in ssi_restore_port_ctx()
1298 writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG); in ssi_restore_port_ctx()
1299 writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG); in ssi_restore_port_ctx()
1300 writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG); in ssi_restore_port_ctx()
1303 base = omap_port->ssr_base; in ssi_restore_port_ctx()
1304 writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG); in ssi_restore_port_ctx()
1305 writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG); in ssi_restore_port_ctx()
1306 writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG); in ssi_restore_port_ctx()
1311 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port) in ssi_restore_port_mode() argument
1315 writel_relaxed(omap_port->sst.mode, in ssi_restore_port_mode()
1316 omap_port->sst_base + SSI_SST_MODE_REG); in ssi_restore_port_mode()
1317 writel_relaxed(omap_port->ssr.mode, in ssi_restore_port_mode()
1318 omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_restore_port_mode()
1320 mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG); in ssi_restore_port_mode()
1328 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in omap_ssi_port_runtime_suspend() local
1334 ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); in omap_ssi_port_runtime_suspend()
1336 omap_port->loss_count = in omap_ssi_port_runtime_suspend()
1338 ssi_save_port_ctx(omap_port); in omap_ssi_port_runtime_suspend()
1346 struct omap_ssi_port *omap_port = hsi_port_drvdata(port); in omap_ssi_port_runtime_resume() local
1352 if ((omap_ssi->get_loss) && (omap_port->loss_count == in omap_ssi_port_runtime_resume()
1356 ssi_restore_port_ctx(omap_port); in omap_ssi_port_runtime_resume()
1359 ssi_restore_divisor(omap_port); in omap_ssi_port_runtime_resume()
1360 ssi_restore_port_mode(omap_port); in omap_ssi_port_runtime_resume()