xref: /linux/drivers/hsi/controllers/omap_ssi_port.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /* OMAP SSI port driver.
2  *
3  * Copyright (C) 2010 Nokia Corporation. All rights reserved.
4  * Copyright (C) 2014 Sebastian Reichel <sre@kernel.org>
5  *
6  * Contact: Carlos Chinea <carlos.chinea@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  */
22 
23 #include <linux/platform_device.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/delay.h>
27 
28 #include <linux/gpio/consumer.h>
29 #include <linux/pinctrl/consumer.h>
30 #include <linux/debugfs.h>
31 
32 #include "omap_ssi_regs.h"
33 #include "omap_ssi.h"
34 
35 static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused)
36 {
37 	return 0;
38 }
39 
40 static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused)
41 {
42 	return 0;
43 }
44 
45 static inline unsigned int ssi_wakein(struct hsi_port *port)
46 {
47 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
48 	return gpiod_get_value(omap_port->wake_gpio);
49 }
50 
51 #ifdef CONFIG_DEBUG_FS
52 static void ssi_debug_remove_port(struct hsi_port *port)
53 {
54 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
55 
56 	debugfs_remove_recursive(omap_port->dir);
57 }
58 
59 static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused)
60 {
61 	struct hsi_port *port = m->private;
62 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
63 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
64 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
65 	void __iomem	*base = omap_ssi->sys;
66 	unsigned int ch;
67 
68 	pm_runtime_get_sync(omap_port->pdev);
69 	if (omap_port->wake_irq > 0)
70 		seq_printf(m, "CAWAKE\t\t: %d\n", ssi_wakein(port));
71 	seq_printf(m, "WAKE\t\t: 0x%08x\n",
72 				readl(base + SSI_WAKE_REG(port->num)));
73 	seq_printf(m, "MPU_ENABLE_IRQ%d\t: 0x%08x\n", 0,
74 			readl(base + SSI_MPU_ENABLE_REG(port->num, 0)));
75 	seq_printf(m, "MPU_STATUS_IRQ%d\t: 0x%08x\n", 0,
76 			readl(base + SSI_MPU_STATUS_REG(port->num, 0)));
77 	/* SST */
78 	base = omap_port->sst_base;
79 	seq_puts(m, "\nSST\n===\n");
80 	seq_printf(m, "ID SST\t\t: 0x%08x\n",
81 				readl(base + SSI_SST_ID_REG));
82 	seq_printf(m, "MODE\t\t: 0x%08x\n",
83 				readl(base + SSI_SST_MODE_REG));
84 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
85 				readl(base + SSI_SST_FRAMESIZE_REG));
86 	seq_printf(m, "DIVISOR\t\t: 0x%08x\n",
87 				readl(base + SSI_SST_DIVISOR_REG));
88 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
89 				readl(base + SSI_SST_CHANNELS_REG));
90 	seq_printf(m, "ARBMODE\t\t: 0x%08x\n",
91 				readl(base + SSI_SST_ARBMODE_REG));
92 	seq_printf(m, "TXSTATE\t\t: 0x%08x\n",
93 				readl(base + SSI_SST_TXSTATE_REG));
94 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
95 				readl(base + SSI_SST_BUFSTATE_REG));
96 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
97 				readl(base + SSI_SST_BREAK_REG));
98 	for (ch = 0; ch < omap_port->channels; ch++) {
99 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
100 				readl(base + SSI_SST_BUFFER_CH_REG(ch)));
101 	}
102 	/* SSR */
103 	base = omap_port->ssr_base;
104 	seq_puts(m, "\nSSR\n===\n");
105 	seq_printf(m, "ID SSR\t\t: 0x%08x\n",
106 				readl(base + SSI_SSR_ID_REG));
107 	seq_printf(m, "MODE\t\t: 0x%08x\n",
108 				readl(base + SSI_SSR_MODE_REG));
109 	seq_printf(m, "FRAMESIZE\t: 0x%08x\n",
110 				readl(base + SSI_SSR_FRAMESIZE_REG));
111 	seq_printf(m, "CHANNELS\t: 0x%08x\n",
112 				readl(base + SSI_SSR_CHANNELS_REG));
113 	seq_printf(m, "TIMEOUT\t\t: 0x%08x\n",
114 				readl(base + SSI_SSR_TIMEOUT_REG));
115 	seq_printf(m, "RXSTATE\t\t: 0x%08x\n",
116 				readl(base + SSI_SSR_RXSTATE_REG));
117 	seq_printf(m, "BUFSTATE\t: 0x%08x\n",
118 				readl(base + SSI_SSR_BUFSTATE_REG));
119 	seq_printf(m, "BREAK\t\t: 0x%08x\n",
120 				readl(base + SSI_SSR_BREAK_REG));
121 	seq_printf(m, "ERROR\t\t: 0x%08x\n",
122 				readl(base + SSI_SSR_ERROR_REG));
123 	seq_printf(m, "ERRORACK\t: 0x%08x\n",
124 				readl(base + SSI_SSR_ERRORACK_REG));
125 	for (ch = 0; ch < omap_port->channels; ch++) {
126 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
127 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
128 	}
129 	pm_runtime_put_autosuspend(omap_port->pdev);
130 
131 	return 0;
132 }
133 
134 static int ssi_port_regs_open(struct inode *inode, struct file *file)
135 {
136 	return single_open(file, ssi_debug_port_show, inode->i_private);
137 }
138 
139 static const struct file_operations ssi_port_regs_fops = {
140 	.open		= ssi_port_regs_open,
141 	.read		= seq_read,
142 	.llseek		= seq_lseek,
143 	.release	= single_release,
144 };
145 
146 static int ssi_div_get(void *data, u64 *val)
147 {
148 	struct hsi_port *port = data;
149 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
150 
151 	pm_runtime_get_sync(omap_port->pdev);
152 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
153 	pm_runtime_put_autosuspend(omap_port->pdev);
154 
155 	return 0;
156 }
157 
158 static int ssi_div_set(void *data, u64 val)
159 {
160 	struct hsi_port *port = data;
161 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
162 
163 	if (val > 127)
164 		return -EINVAL;
165 
166 	pm_runtime_get_sync(omap_port->pdev);
167 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
168 	omap_port->sst.divisor = val;
169 	pm_runtime_put_autosuspend(omap_port->pdev);
170 
171 	return 0;
172 }
173 
174 DEFINE_SIMPLE_ATTRIBUTE(ssi_sst_div_fops, ssi_div_get, ssi_div_set, "%llu\n");
175 
176 static int ssi_debug_add_port(struct omap_ssi_port *omap_port,
177 				     struct dentry *dir)
178 {
179 	struct hsi_port *port = to_hsi_port(omap_port->dev);
180 
181 	dir = debugfs_create_dir(dev_name(omap_port->dev), dir);
182 	if (!dir)
183 		return -ENOMEM;
184 	omap_port->dir = dir;
185 	debugfs_create_file("regs", S_IRUGO, dir, port, &ssi_port_regs_fops);
186 	dir = debugfs_create_dir("sst", dir);
187 	if (!dir)
188 		return -ENOMEM;
189 	debugfs_create_file("divisor", S_IRUGO | S_IWUSR, dir, port,
190 			    &ssi_sst_div_fops);
191 
192 	return 0;
193 }
194 #endif
195 
196 static void ssi_process_errqueue(struct work_struct *work)
197 {
198 	struct omap_ssi_port *omap_port;
199 	struct list_head *head, *tmp;
200 	struct hsi_msg *msg;
201 
202 	omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work);
203 
204 	list_for_each_safe(head, tmp, &omap_port->errqueue) {
205 		msg = list_entry(head, struct hsi_msg, link);
206 		msg->complete(msg);
207 		list_del(head);
208 	}
209 }
210 
211 static int ssi_claim_lch(struct hsi_msg *msg)
212 {
213 
214 	struct hsi_port *port = hsi_get_port(msg->cl);
215 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
216 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
217 	int lch;
218 
219 	for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++)
220 		if (!omap_ssi->gdd_trn[lch].msg) {
221 			omap_ssi->gdd_trn[lch].msg = msg;
222 			omap_ssi->gdd_trn[lch].sg = msg->sgt.sgl;
223 			return lch;
224 		}
225 
226 	return -EBUSY;
227 }
228 
229 static int ssi_start_dma(struct hsi_msg *msg, int lch)
230 {
231 	struct hsi_port *port = hsi_get_port(msg->cl);
232 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
233 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
234 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
235 	void __iomem *gdd = omap_ssi->gdd;
236 	int err;
237 	u16 csdp;
238 	u16 ccr;
239 	u32 s_addr;
240 	u32 d_addr;
241 	u32 tmp;
242 
243 	/* Hold clocks during the transfer */
244 	pm_runtime_get(omap_port->pdev);
245 
246 	if (!pm_runtime_active(omap_port->pdev)) {
247 		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
248 		pm_runtime_put_autosuspend(omap_port->pdev);
249 		return -EREMOTEIO;
250 	}
251 
252 	if (msg->ttype == HSI_MSG_READ) {
253 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
254 							DMA_FROM_DEVICE);
255 		if (err < 0) {
256 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
257 			pm_runtime_put_autosuspend(omap_port->pdev);
258 			return err;
259 		}
260 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
261 			SSI_SRC_SINGLE_ACCESS0 | SSI_SRC_PERIPHERAL_PORT |
262 			SSI_DATA_TYPE_S32;
263 		ccr = msg->channel + 0x10 + (port->num * 8); /* Sync */
264 		ccr |= SSI_DST_AMODE_POSTINC | SSI_SRC_AMODE_CONST |
265 			SSI_CCR_ENABLE;
266 		s_addr = omap_port->ssr_dma +
267 					SSI_SSR_BUFFER_CH_REG(msg->channel);
268 		d_addr = sg_dma_address(msg->sgt.sgl);
269 	} else {
270 		err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents,
271 							DMA_TO_DEVICE);
272 		if (err < 0) {
273 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
274 			pm_runtime_put_autosuspend(omap_port->pdev);
275 			return err;
276 		}
277 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
278 			SSI_DST_SINGLE_ACCESS0 | SSI_DST_PERIPHERAL_PORT |
279 			SSI_DATA_TYPE_S32;
280 		ccr = (msg->channel + 1 + (port->num * 8)) & 0xf; /* Sync */
281 		ccr |= SSI_SRC_AMODE_POSTINC | SSI_DST_AMODE_CONST |
282 			SSI_CCR_ENABLE;
283 		s_addr = sg_dma_address(msg->sgt.sgl);
284 		d_addr = omap_port->sst_dma +
285 					SSI_SST_BUFFER_CH_REG(msg->channel);
286 	}
287 	dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n",
288 		lch, csdp, ccr, s_addr, d_addr);
289 
290 	writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch));
291 	writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch));
292 	writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch));
293 	writel_relaxed(s_addr, gdd + SSI_GDD_CSSA_REG(lch));
294 	writew_relaxed(SSI_BYTES_TO_FRAMES(msg->sgt.sgl->length),
295 						gdd + SSI_GDD_CEN_REG(lch));
296 
297 	spin_lock_bh(&omap_ssi->lock);
298 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
299 	tmp |= SSI_GDD_LCH(lch);
300 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
301 	spin_unlock_bh(&omap_ssi->lock);
302 	writew(ccr, gdd + SSI_GDD_CCR_REG(lch));
303 	msg->status = HSI_STATUS_PROCEEDING;
304 
305 	return 0;
306 }
307 
308 static int ssi_start_pio(struct hsi_msg *msg)
309 {
310 	struct hsi_port *port = hsi_get_port(msg->cl);
311 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
312 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
313 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
314 	u32 val;
315 
316 	pm_runtime_get(omap_port->pdev);
317 
318 	if (!pm_runtime_active(omap_port->pdev)) {
319 		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
320 		pm_runtime_put_autosuspend(omap_port->pdev);
321 		return -EREMOTEIO;
322 	}
323 
324 	if (msg->ttype == HSI_MSG_WRITE) {
325 		val = SSI_DATAACCEPT(msg->channel);
326 		/* Hold clocks for pio writes */
327 		pm_runtime_get(omap_port->pdev);
328 	} else {
329 		val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED;
330 	}
331 	dev_dbg(&port->device, "Single %s transfer\n",
332 						msg->ttype ? "write" : "read");
333 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
334 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
335 	pm_runtime_put_autosuspend(omap_port->pdev);
336 	msg->actual_len = 0;
337 	msg->status = HSI_STATUS_PROCEEDING;
338 
339 	return 0;
340 }
341 
342 static int ssi_start_transfer(struct list_head *queue)
343 {
344 	struct hsi_msg *msg;
345 	int lch = -1;
346 
347 	if (list_empty(queue))
348 		return 0;
349 	msg = list_first_entry(queue, struct hsi_msg, link);
350 	if (msg->status != HSI_STATUS_QUEUED)
351 		return 0;
352 	if ((msg->sgt.nents) && (msg->sgt.sgl->length > sizeof(u32)))
353 		lch = ssi_claim_lch(msg);
354 	if (lch >= 0)
355 		return ssi_start_dma(msg, lch);
356 	else
357 		return ssi_start_pio(msg);
358 }
359 
360 static int ssi_async_break(struct hsi_msg *msg)
361 {
362 	struct hsi_port *port = hsi_get_port(msg->cl);
363 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
364 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
365 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
366 	int err = 0;
367 	u32 tmp;
368 
369 	pm_runtime_get_sync(omap_port->pdev);
370 	if (msg->ttype == HSI_MSG_WRITE) {
371 		if (omap_port->sst.mode != SSI_MODE_FRAME) {
372 			err = -EINVAL;
373 			goto out;
374 		}
375 		writel(1, omap_port->sst_base + SSI_SST_BREAK_REG);
376 		msg->status = HSI_STATUS_COMPLETED;
377 		msg->complete(msg);
378 	} else {
379 		if (omap_port->ssr.mode != SSI_MODE_FRAME) {
380 			err = -EINVAL;
381 			goto out;
382 		}
383 		spin_lock_bh(&omap_port->lock);
384 		tmp = readl(omap_ssi->sys +
385 					SSI_MPU_ENABLE_REG(port->num, 0));
386 		writel(tmp | SSI_BREAKDETECTED,
387 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
388 		msg->status = HSI_STATUS_PROCEEDING;
389 		list_add_tail(&msg->link, &omap_port->brkqueue);
390 		spin_unlock_bh(&omap_port->lock);
391 	}
392 out:
393 	pm_runtime_mark_last_busy(omap_port->pdev);
394 	pm_runtime_put_autosuspend(omap_port->pdev);
395 
396 	return err;
397 }
398 
399 static int ssi_async(struct hsi_msg *msg)
400 {
401 	struct hsi_port *port = hsi_get_port(msg->cl);
402 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
403 	struct list_head *queue;
404 	int err = 0;
405 
406 	BUG_ON(!msg);
407 
408 	if (msg->sgt.nents > 1)
409 		return -ENOSYS; /* TODO: Add sg support */
410 
411 	if (msg->break_frame)
412 		return ssi_async_break(msg);
413 
414 	if (msg->ttype) {
415 		BUG_ON(msg->channel >= omap_port->sst.channels);
416 		queue = &omap_port->txqueue[msg->channel];
417 	} else {
418 		BUG_ON(msg->channel >= omap_port->ssr.channels);
419 		queue = &omap_port->rxqueue[msg->channel];
420 	}
421 	msg->status = HSI_STATUS_QUEUED;
422 
423 	pm_runtime_get_sync(omap_port->pdev);
424 	spin_lock_bh(&omap_port->lock);
425 	list_add_tail(&msg->link, queue);
426 	err = ssi_start_transfer(queue);
427 	if (err < 0) {
428 		list_del(&msg->link);
429 		msg->status = HSI_STATUS_ERROR;
430 	}
431 	spin_unlock_bh(&omap_port->lock);
432 	pm_runtime_mark_last_busy(omap_port->pdev);
433 	pm_runtime_put_autosuspend(omap_port->pdev);
434 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
435 				msg->status, msg->ttype, msg->channel);
436 
437 	return err;
438 }
439 
440 static u32 ssi_calculate_div(struct hsi_controller *ssi)
441 {
442 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
443 	u32 tx_fckrate = (u32) omap_ssi->fck_rate;
444 
445 	/* / 2 : SSI TX clock is always half of the SSI functional clock */
446 	tx_fckrate >>= 1;
447 	/* Round down when tx_fckrate % omap_ssi->max_speed == 0 */
448 	tx_fckrate--;
449 	dev_dbg(&ssi->device, "TX div %d for fck_rate %lu Khz speed %d Kb/s\n",
450 		tx_fckrate / omap_ssi->max_speed, omap_ssi->fck_rate,
451 		omap_ssi->max_speed);
452 
453 	return tx_fckrate / omap_ssi->max_speed;
454 }
455 
456 static void ssi_flush_queue(struct list_head *queue, struct hsi_client *cl)
457 {
458 	struct list_head *node, *tmp;
459 	struct hsi_msg *msg;
460 
461 	list_for_each_safe(node, tmp, queue) {
462 		msg = list_entry(node, struct hsi_msg, link);
463 		if ((cl) && (cl != msg->cl))
464 			continue;
465 		list_del(node);
466 		pr_debug("flush queue: ch %d, msg %p len %d type %d ctxt %p\n",
467 			msg->channel, msg, msg->sgt.sgl->length,
468 					msg->ttype, msg->context);
469 		if (msg->destructor)
470 			msg->destructor(msg);
471 		else
472 			hsi_free_msg(msg);
473 	}
474 }
475 
476 static int ssi_setup(struct hsi_client *cl)
477 {
478 	struct hsi_port *port = to_hsi_port(cl->device.parent);
479 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
480 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
481 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
482 	void __iomem *sst = omap_port->sst_base;
483 	void __iomem *ssr = omap_port->ssr_base;
484 	u32 div;
485 	u32 val;
486 	int err = 0;
487 
488 	pm_runtime_get_sync(omap_port->pdev);
489 	spin_lock_bh(&omap_port->lock);
490 	if (cl->tx_cfg.speed)
491 		omap_ssi->max_speed = cl->tx_cfg.speed;
492 	div = ssi_calculate_div(ssi);
493 	if (div > SSI_MAX_DIVISOR) {
494 		dev_err(&cl->device, "Invalid TX speed %d Mb/s (div %d)\n",
495 						cl->tx_cfg.speed, div);
496 		err = -EINVAL;
497 		goto out;
498 	}
499 	/* Set TX/RX module to sleep to stop TX/RX during cfg update */
500 	writel_relaxed(SSI_MODE_SLEEP, sst + SSI_SST_MODE_REG);
501 	writel_relaxed(SSI_MODE_SLEEP, ssr + SSI_SSR_MODE_REG);
502 	/* Flush posted write */
503 	val = readl(ssr + SSI_SSR_MODE_REG);
504 	/* TX */
505 	writel_relaxed(31, sst + SSI_SST_FRAMESIZE_REG);
506 	writel_relaxed(div, sst + SSI_SST_DIVISOR_REG);
507 	writel_relaxed(cl->tx_cfg.num_hw_channels, sst + SSI_SST_CHANNELS_REG);
508 	writel_relaxed(cl->tx_cfg.arb_mode, sst + SSI_SST_ARBMODE_REG);
509 	writel_relaxed(cl->tx_cfg.mode, sst + SSI_SST_MODE_REG);
510 	/* RX */
511 	writel_relaxed(31, ssr + SSI_SSR_FRAMESIZE_REG);
512 	writel_relaxed(cl->rx_cfg.num_hw_channels, ssr + SSI_SSR_CHANNELS_REG);
513 	writel_relaxed(0, ssr + SSI_SSR_TIMEOUT_REG);
514 	/* Cleanup the break queue if we leave FRAME mode */
515 	if ((omap_port->ssr.mode == SSI_MODE_FRAME) &&
516 		(cl->rx_cfg.mode != SSI_MODE_FRAME))
517 		ssi_flush_queue(&omap_port->brkqueue, cl);
518 	writel_relaxed(cl->rx_cfg.mode, ssr + SSI_SSR_MODE_REG);
519 	omap_port->channels = max(cl->rx_cfg.num_hw_channels,
520 				  cl->tx_cfg.num_hw_channels);
521 	/* Shadow registering for OFF mode */
522 	/* SST */
523 	omap_port->sst.divisor = div;
524 	omap_port->sst.frame_size = 31;
525 	omap_port->sst.channels = cl->tx_cfg.num_hw_channels;
526 	omap_port->sst.arb_mode = cl->tx_cfg.arb_mode;
527 	omap_port->sst.mode = cl->tx_cfg.mode;
528 	/* SSR */
529 	omap_port->ssr.frame_size = 31;
530 	omap_port->ssr.timeout = 0;
531 	omap_port->ssr.channels = cl->rx_cfg.num_hw_channels;
532 	omap_port->ssr.mode = cl->rx_cfg.mode;
533 out:
534 	spin_unlock_bh(&omap_port->lock);
535 	pm_runtime_mark_last_busy(omap_port->pdev);
536 	pm_runtime_put_autosuspend(omap_port->pdev);
537 
538 	return err;
539 }
540 
541 static int ssi_flush(struct hsi_client *cl)
542 {
543 	struct hsi_port *port = hsi_get_port(cl);
544 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
545 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
546 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
547 	struct hsi_msg *msg;
548 	void __iomem *sst = omap_port->sst_base;
549 	void __iomem *ssr = omap_port->ssr_base;
550 	unsigned int i;
551 	u32 err;
552 
553 	pm_runtime_get_sync(omap_port->pdev);
554 	spin_lock_bh(&omap_port->lock);
555 
556 	/* stop all ssi communication */
557 	pinctrl_pm_select_idle_state(omap_port->pdev);
558 	udelay(1); /* wait for racing frames */
559 
560 	/* Stop all DMA transfers */
561 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
562 		msg = omap_ssi->gdd_trn[i].msg;
563 		if (!msg || (port != hsi_get_port(msg->cl)))
564 			continue;
565 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
566 		if (msg->ttype == HSI_MSG_READ)
567 			pm_runtime_put_autosuspend(omap_port->pdev);
568 		omap_ssi->gdd_trn[i].msg = NULL;
569 	}
570 	/* Flush all SST buffers */
571 	writel_relaxed(0, sst + SSI_SST_BUFSTATE_REG);
572 	writel_relaxed(0, sst + SSI_SST_TXSTATE_REG);
573 	/* Flush all SSR buffers */
574 	writel_relaxed(0, ssr + SSI_SSR_RXSTATE_REG);
575 	writel_relaxed(0, ssr + SSI_SSR_BUFSTATE_REG);
576 	/* Flush all errors */
577 	err = readl(ssr + SSI_SSR_ERROR_REG);
578 	writel_relaxed(err, ssr + SSI_SSR_ERRORACK_REG);
579 	/* Flush break */
580 	writel_relaxed(0, ssr + SSI_SSR_BREAK_REG);
581 	/* Clear interrupts */
582 	writel_relaxed(0, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
583 	writel_relaxed(0xffffff00,
584 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
585 	writel_relaxed(0, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
586 	writel(0xff, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
587 	/* Dequeue all pending requests */
588 	for (i = 0; i < omap_port->channels; i++) {
589 		/* Release write clocks */
590 		if (!list_empty(&omap_port->txqueue[i]))
591 			pm_runtime_put_autosuspend(omap_port->pdev);
592 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
593 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
594 	}
595 	ssi_flush_queue(&omap_port->brkqueue, NULL);
596 
597 	/* Resume SSI communication */
598 	pinctrl_pm_select_default_state(omap_port->pdev);
599 
600 	spin_unlock_bh(&omap_port->lock);
601 	pm_runtime_mark_last_busy(omap_port->pdev);
602 	pm_runtime_put_autosuspend(omap_port->pdev);
603 
604 	return 0;
605 }
606 
607 static void start_tx_work(struct work_struct *work)
608 {
609 	struct omap_ssi_port *omap_port =
610 				container_of(work, struct omap_ssi_port, work);
611 	struct hsi_port *port = to_hsi_port(omap_port->dev);
612 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
613 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
614 
615 	pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */
616 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
617 }
618 
619 static int ssi_start_tx(struct hsi_client *cl)
620 {
621 	struct hsi_port *port = hsi_get_port(cl);
622 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
623 
624 	dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount);
625 
626 	spin_lock_bh(&omap_port->wk_lock);
627 	if (omap_port->wk_refcount++) {
628 		spin_unlock_bh(&omap_port->wk_lock);
629 		return 0;
630 	}
631 	spin_unlock_bh(&omap_port->wk_lock);
632 
633 	schedule_work(&omap_port->work);
634 
635 	return 0;
636 }
637 
638 static int ssi_stop_tx(struct hsi_client *cl)
639 {
640 	struct hsi_port *port = hsi_get_port(cl);
641 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
642 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
643 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
644 
645 	dev_dbg(&port->device, "Wake out low %d\n", omap_port->wk_refcount);
646 
647 	spin_lock_bh(&omap_port->wk_lock);
648 	BUG_ON(!omap_port->wk_refcount);
649 	if (--omap_port->wk_refcount) {
650 		spin_unlock_bh(&omap_port->wk_lock);
651 		return 0;
652 	}
653 	writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
654 	spin_unlock_bh(&omap_port->wk_lock);
655 
656 	pm_runtime_mark_last_busy(omap_port->pdev);
657 	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
658 
659 
660 	return 0;
661 }
662 
663 static void ssi_transfer(struct omap_ssi_port *omap_port,
664 							struct list_head *queue)
665 {
666 	struct hsi_msg *msg;
667 	int err = -1;
668 
669 	pm_runtime_get(omap_port->pdev);
670 	spin_lock_bh(&omap_port->lock);
671 	while (err < 0) {
672 		err = ssi_start_transfer(queue);
673 		if (err < 0) {
674 			msg = list_first_entry(queue, struct hsi_msg, link);
675 			msg->status = HSI_STATUS_ERROR;
676 			msg->actual_len = 0;
677 			list_del(&msg->link);
678 			spin_unlock_bh(&omap_port->lock);
679 			msg->complete(msg);
680 			spin_lock_bh(&omap_port->lock);
681 		}
682 	}
683 	spin_unlock_bh(&omap_port->lock);
684 	pm_runtime_mark_last_busy(omap_port->pdev);
685 	pm_runtime_put_autosuspend(omap_port->pdev);
686 }
687 
688 static void ssi_cleanup_queues(struct hsi_client *cl)
689 {
690 	struct hsi_port *port = hsi_get_port(cl);
691 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
692 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
693 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
694 	struct hsi_msg *msg;
695 	unsigned int i;
696 	u32 rxbufstate = 0;
697 	u32 txbufstate = 0;
698 	u32 status = SSI_ERROROCCURED;
699 	u32 tmp;
700 
701 	ssi_flush_queue(&omap_port->brkqueue, cl);
702 	if (list_empty(&omap_port->brkqueue))
703 		status |= SSI_BREAKDETECTED;
704 
705 	for (i = 0; i < omap_port->channels; i++) {
706 		if (list_empty(&omap_port->txqueue[i]))
707 			continue;
708 		msg = list_first_entry(&omap_port->txqueue[i], struct hsi_msg,
709 									link);
710 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
711 			txbufstate |= (1 << i);
712 			status |= SSI_DATAACCEPT(i);
713 			/* Release the clocks writes, also GDD ones */
714 			pm_runtime_mark_last_busy(omap_port->pdev);
715 			pm_runtime_put_autosuspend(omap_port->pdev);
716 		}
717 		ssi_flush_queue(&omap_port->txqueue[i], cl);
718 	}
719 	for (i = 0; i < omap_port->channels; i++) {
720 		if (list_empty(&omap_port->rxqueue[i]))
721 			continue;
722 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
723 									link);
724 		if ((msg->cl == cl) && (msg->status == HSI_STATUS_PROCEEDING)) {
725 			rxbufstate |= (1 << i);
726 			status |= SSI_DATAAVAILABLE(i);
727 		}
728 		ssi_flush_queue(&omap_port->rxqueue[i], cl);
729 		/* Check if we keep the error detection interrupt armed */
730 		if (!list_empty(&omap_port->rxqueue[i]))
731 			status &= ~SSI_ERROROCCURED;
732 	}
733 	/* Cleanup write buffers */
734 	tmp = readl(omap_port->sst_base + SSI_SST_BUFSTATE_REG);
735 	tmp &= ~txbufstate;
736 	writel_relaxed(tmp, omap_port->sst_base + SSI_SST_BUFSTATE_REG);
737 	/* Cleanup read buffers */
738 	tmp = readl(omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
739 	tmp &= ~rxbufstate;
740 	writel_relaxed(tmp, omap_port->ssr_base + SSI_SSR_BUFSTATE_REG);
741 	/* Disarm and ack pending interrupts */
742 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
743 	tmp &= ~status;
744 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
745 	writel_relaxed(status, omap_ssi->sys +
746 		SSI_MPU_STATUS_REG(port->num, 0));
747 }
748 
749 static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
750 {
751 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
752 	struct hsi_port *port = hsi_get_port(cl);
753 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
754 	struct hsi_msg *msg;
755 	unsigned int i;
756 	u32 val = 0;
757 	u32 tmp;
758 
759 	for (i = 0; i < SSI_MAX_GDD_LCH; i++) {
760 		msg = omap_ssi->gdd_trn[i].msg;
761 		if ((!msg) || (msg->cl != cl))
762 			continue;
763 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
764 		val |= (1 << i);
765 		/*
766 		 * Clock references for write will be handled in
767 		 * ssi_cleanup_queues
768 		 */
769 		if (msg->ttype == HSI_MSG_READ) {
770 			pm_runtime_mark_last_busy(omap_port->pdev);
771 			pm_runtime_put_autosuspend(omap_port->pdev);
772 		}
773 		omap_ssi->gdd_trn[i].msg = NULL;
774 	}
775 	tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
776 	tmp &= ~val;
777 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
778 	writel(val, omap_ssi->sys + SSI_GDD_MPU_IRQ_STATUS_REG);
779 }
780 
781 static int ssi_set_port_mode(struct omap_ssi_port *omap_port, u32 mode)
782 {
783 	writel(mode, omap_port->sst_base + SSI_SST_MODE_REG);
784 	writel(mode, omap_port->ssr_base + SSI_SSR_MODE_REG);
785 	/* OCP barrier */
786 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
787 
788 	return 0;
789 }
790 
791 static int ssi_release(struct hsi_client *cl)
792 {
793 	struct hsi_port *port = hsi_get_port(cl);
794 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
795 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
796 
797 	pm_runtime_get_sync(omap_port->pdev);
798 	spin_lock_bh(&omap_port->lock);
799 	/* Stop all the pending DMA requests for that client */
800 	ssi_cleanup_gdd(ssi, cl);
801 	/* Now cleanup all the queues */
802 	ssi_cleanup_queues(cl);
803 	/* If it is the last client of the port, do extra checks and cleanup */
804 	if (port->claimed <= 1) {
805 		/*
806 		 * Drop the clock reference for the incoming wake line
807 		 * if it is still kept high by the other side.
808 		 */
809 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags))
810 			pm_runtime_put_sync(omap_port->pdev);
811 		pm_runtime_get(omap_port->pdev);
812 		/* Stop any SSI TX/RX without a client */
813 		ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
814 		omap_port->sst.mode = SSI_MODE_SLEEP;
815 		omap_port->ssr.mode = SSI_MODE_SLEEP;
816 		pm_runtime_put(omap_port->pdev);
817 		WARN_ON(omap_port->wk_refcount != 0);
818 	}
819 	spin_unlock_bh(&omap_port->lock);
820 	pm_runtime_put_sync(omap_port->pdev);
821 
822 	return 0;
823 }
824 
825 
826 
827 static void ssi_error(struct hsi_port *port)
828 {
829 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
830 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
831 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
832 	struct hsi_msg *msg;
833 	unsigned int i;
834 	u32 err;
835 	u32 val;
836 	u32 tmp;
837 
838 	/* ACK error */
839 	err = readl(omap_port->ssr_base + SSI_SSR_ERROR_REG);
840 	dev_err(&port->device, "SSI error: 0x%02x\n", err);
841 	if (!err) {
842 		dev_dbg(&port->device, "spurious SSI error ignored!\n");
843 		return;
844 	}
845 	spin_lock(&omap_ssi->lock);
846 	/* Cancel all GDD read transfers */
847 	for (i = 0, val = 0; i < SSI_MAX_GDD_LCH; i++) {
848 		msg = omap_ssi->gdd_trn[i].msg;
849 		if ((msg) && (msg->ttype == HSI_MSG_READ)) {
850 			writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
851 			val |= (1 << i);
852 			omap_ssi->gdd_trn[i].msg = NULL;
853 		}
854 	}
855 	tmp = readl(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
856 	tmp &= ~val;
857 	writel_relaxed(tmp, omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG);
858 	spin_unlock(&omap_ssi->lock);
859 	/* Cancel all PIO read transfers */
860 	spin_lock(&omap_port->lock);
861 	tmp = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
862 	tmp &= 0xfeff00ff; /* Disable error & all dataavailable interrupts */
863 	writel_relaxed(tmp, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
864 	/* ACK error */
865 	writel_relaxed(err, omap_port->ssr_base + SSI_SSR_ERRORACK_REG);
866 	writel_relaxed(SSI_ERROROCCURED,
867 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
868 	/* Signal the error all current pending read requests */
869 	for (i = 0; i < omap_port->channels; i++) {
870 		if (list_empty(&omap_port->rxqueue[i]))
871 			continue;
872 		msg = list_first_entry(&omap_port->rxqueue[i], struct hsi_msg,
873 									link);
874 		list_del(&msg->link);
875 		msg->status = HSI_STATUS_ERROR;
876 		spin_unlock(&omap_port->lock);
877 		msg->complete(msg);
878 		/* Now restart queued reads if any */
879 		ssi_transfer(omap_port, &omap_port->rxqueue[i]);
880 		spin_lock(&omap_port->lock);
881 	}
882 	spin_unlock(&omap_port->lock);
883 }
884 
885 static void ssi_break_complete(struct hsi_port *port)
886 {
887 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
888 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
889 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
890 	struct hsi_msg *msg;
891 	struct hsi_msg *tmp;
892 	u32 val;
893 
894 	dev_dbg(&port->device, "HWBREAK received\n");
895 
896 	spin_lock(&omap_port->lock);
897 	val = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
898 	val &= ~SSI_BREAKDETECTED;
899 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
900 	writel_relaxed(0, omap_port->ssr_base + SSI_SSR_BREAK_REG);
901 	writel(SSI_BREAKDETECTED,
902 			omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
903 	spin_unlock(&omap_port->lock);
904 
905 	list_for_each_entry_safe(msg, tmp, &omap_port->brkqueue, link) {
906 		msg->status = HSI_STATUS_COMPLETED;
907 		spin_lock(&omap_port->lock);
908 		list_del(&msg->link);
909 		spin_unlock(&omap_port->lock);
910 		msg->complete(msg);
911 	}
912 
913 }
914 
915 static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
916 {
917 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
918 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
919 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
920 	struct hsi_msg *msg;
921 	u32 *buf;
922 	u32 reg;
923 	u32 val;
924 
925 	spin_lock_bh(&omap_port->lock);
926 	msg = list_first_entry(queue, struct hsi_msg, link);
927 	if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) {
928 		msg->actual_len = 0;
929 		msg->status = HSI_STATUS_PENDING;
930 	}
931 	if (msg->ttype == HSI_MSG_WRITE)
932 		val = SSI_DATAACCEPT(msg->channel);
933 	else
934 		val = SSI_DATAAVAILABLE(msg->channel);
935 	if (msg->status == HSI_STATUS_PROCEEDING) {
936 		buf = sg_virt(msg->sgt.sgl) + msg->actual_len;
937 		if (msg->ttype == HSI_MSG_WRITE)
938 			writel(*buf, omap_port->sst_base +
939 					SSI_SST_BUFFER_CH_REG(msg->channel));
940 		 else
941 			*buf = readl(omap_port->ssr_base +
942 					SSI_SSR_BUFFER_CH_REG(msg->channel));
943 		dev_dbg(&port->device, "ch %d ttype %d 0x%08x\n", msg->channel,
944 							msg->ttype, *buf);
945 		msg->actual_len += sizeof(*buf);
946 		if (msg->actual_len >= msg->sgt.sgl->length)
947 			msg->status = HSI_STATUS_COMPLETED;
948 		/*
949 		 * Wait for the last written frame to be really sent before
950 		 * we call the complete callback
951 		 */
952 		if ((msg->status == HSI_STATUS_PROCEEDING) ||
953 				((msg->status == HSI_STATUS_COMPLETED) &&
954 					(msg->ttype == HSI_MSG_WRITE))) {
955 			writel(val, omap_ssi->sys +
956 					SSI_MPU_STATUS_REG(port->num, 0));
957 			spin_unlock_bh(&omap_port->lock);
958 
959 			return;
960 		}
961 
962 	}
963 	/* Transfer completed at this point */
964 	reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
965 	if (msg->ttype == HSI_MSG_WRITE) {
966 		/* Release clocks for write transfer */
967 		pm_runtime_mark_last_busy(omap_port->pdev);
968 		pm_runtime_put_autosuspend(omap_port->pdev);
969 	}
970 	reg &= ~val;
971 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
972 	writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0));
973 	list_del(&msg->link);
974 	spin_unlock_bh(&omap_port->lock);
975 	msg->complete(msg);
976 	ssi_transfer(omap_port, queue);
977 }
978 
979 static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
980 {
981 	struct hsi_port *port = (struct hsi_port *)ssi_port;
982 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
983 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
984 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
985 	void __iomem *sys = omap_ssi->sys;
986 	unsigned int ch;
987 	u32 status_reg;
988 
989 	pm_runtime_get_sync(omap_port->pdev);
990 
991 	do {
992 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
993 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
994 
995 		for (ch = 0; ch < omap_port->channels; ch++) {
996 			if (status_reg & SSI_DATAACCEPT(ch))
997 				ssi_pio_complete(port, &omap_port->txqueue[ch]);
998 			if (status_reg & SSI_DATAAVAILABLE(ch))
999 				ssi_pio_complete(port, &omap_port->rxqueue[ch]);
1000 		}
1001 		if (status_reg & SSI_BREAKDETECTED)
1002 			ssi_break_complete(port);
1003 		if (status_reg & SSI_ERROROCCURED)
1004 			ssi_error(port);
1005 
1006 		status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0));
1007 		status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0));
1008 
1009 		/* TODO: sleep if we retry? */
1010 	} while (status_reg);
1011 
1012 	pm_runtime_mark_last_busy(omap_port->pdev);
1013 	pm_runtime_put_autosuspend(omap_port->pdev);
1014 
1015 	return IRQ_HANDLED;
1016 }
1017 
1018 static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
1019 {
1020 	struct hsi_port *port = (struct hsi_port *)ssi_port;
1021 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1022 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1023 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1024 
1025 	if (ssi_wakein(port)) {
1026 		/**
1027 		 * We can have a quick High-Low-High transition in the line.
1028 		 * In such a case if we have long interrupt latencies,
1029 		 * we can miss the low event or get twice a high event.
1030 		 * This workaround will avoid breaking the clock reference
1031 		 * count when such a situation ocurrs.
1032 		 */
1033 		if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags))
1034 			pm_runtime_get_sync(omap_port->pdev);
1035 		dev_dbg(&ssi->device, "Wake in high\n");
1036 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1037 			writel(SSI_WAKE(0),
1038 				omap_ssi->sys + SSI_SET_WAKE_REG(port->num));
1039 		}
1040 		hsi_event(port, HSI_EVENT_START_RX);
1041 	} else {
1042 		dev_dbg(&ssi->device, "Wake in low\n");
1043 		if (omap_port->wktest) { /* FIXME: HACK ! To be removed */
1044 			writel(SSI_WAKE(0),
1045 				omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num));
1046 		}
1047 		hsi_event(port, HSI_EVENT_STOP_RX);
1048 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
1049 			pm_runtime_mark_last_busy(omap_port->pdev);
1050 			pm_runtime_put_autosuspend(omap_port->pdev);
1051 		}
1052 	}
1053 
1054 	return IRQ_HANDLED;
1055 }
1056 
1057 static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd)
1058 {
1059 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1060 	int err;
1061 
1062 	err = platform_get_irq(pd, 0);
1063 	if (err < 0) {
1064 		dev_err(&port->device, "Port IRQ resource missing\n");
1065 		return err;
1066 	}
1067 	omap_port->irq = err;
1068 	err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL,
1069 				ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port);
1070 	if (err < 0)
1071 		dev_err(&port->device, "Request IRQ %d failed (%d)\n",
1072 							omap_port->irq, err);
1073 	return err;
1074 }
1075 
1076 static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd)
1077 {
1078 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1079 	int cawake_irq;
1080 	int err;
1081 
1082 	if (!omap_port->wake_gpio) {
1083 		omap_port->wake_irq = -1;
1084 		return 0;
1085 	}
1086 
1087 	cawake_irq = gpiod_to_irq(omap_port->wake_gpio);
1088 	omap_port->wake_irq = cawake_irq;
1089 
1090 	err = devm_request_threaded_irq(&port->device, cawake_irq, NULL,
1091 		ssi_wake_thread,
1092 		IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1093 		"SSI cawake", port);
1094 	if (err < 0)
1095 		dev_err(&port->device, "Request Wake in IRQ %d failed %d\n",
1096 						cawake_irq, err);
1097 	err = enable_irq_wake(cawake_irq);
1098 	if (err < 0)
1099 		dev_err(&port->device, "Enable wake on the wakeline in irq %d failed %d\n",
1100 			cawake_irq, err);
1101 
1102 	return err;
1103 }
1104 
1105 static void ssi_queues_init(struct omap_ssi_port *omap_port)
1106 {
1107 	unsigned int ch;
1108 
1109 	for (ch = 0; ch < SSI_MAX_CHANNELS; ch++) {
1110 		INIT_LIST_HEAD(&omap_port->txqueue[ch]);
1111 		INIT_LIST_HEAD(&omap_port->rxqueue[ch]);
1112 	}
1113 	INIT_LIST_HEAD(&omap_port->brkqueue);
1114 }
1115 
1116 static int ssi_port_get_iomem(struct platform_device *pd,
1117 		const char *name, void __iomem **pbase, dma_addr_t *phy)
1118 {
1119 	struct hsi_port *port = platform_get_drvdata(pd);
1120 	struct resource *mem;
1121 	struct resource *ioarea;
1122 	void __iomem *base;
1123 
1124 	mem = platform_get_resource_byname(pd, IORESOURCE_MEM, name);
1125 	if (!mem) {
1126 		dev_err(&pd->dev, "IO memory region missing (%s)\n", name);
1127 		return -ENXIO;
1128 	}
1129 	ioarea = devm_request_mem_region(&port->device, mem->start,
1130 					resource_size(mem), dev_name(&pd->dev));
1131 	if (!ioarea) {
1132 		dev_err(&pd->dev, "%s IO memory region request failed\n",
1133 								mem->name);
1134 		return -ENXIO;
1135 	}
1136 	base = devm_ioremap(&port->device, mem->start, resource_size(mem));
1137 	if (!base) {
1138 		dev_err(&pd->dev, "%s IO remap failed\n", mem->name);
1139 		return -ENXIO;
1140 	}
1141 	*pbase = base;
1142 
1143 	if (phy)
1144 		*phy = mem->start;
1145 
1146 	return 0;
1147 }
1148 
1149 static int ssi_port_probe(struct platform_device *pd)
1150 {
1151 	struct device_node *np = pd->dev.of_node;
1152 	struct hsi_port *port;
1153 	struct omap_ssi_port *omap_port;
1154 	struct hsi_controller *ssi = dev_get_drvdata(pd->dev.parent);
1155 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1156 	struct gpio_desc *cawake_gpio = NULL;
1157 	u32 port_id;
1158 	int err;
1159 
1160 	dev_dbg(&pd->dev, "init ssi port...\n");
1161 
1162 	if (!ssi->port || !omap_ssi->port) {
1163 		dev_err(&pd->dev, "ssi controller not initialized!\n");
1164 		err = -ENODEV;
1165 		goto error;
1166 	}
1167 
1168 	/* get id of first uninitialized port in controller */
1169 	for (port_id = 0; port_id < ssi->num_ports && omap_ssi->port[port_id];
1170 		port_id++)
1171 		;
1172 
1173 	if (port_id >= ssi->num_ports) {
1174 		dev_err(&pd->dev, "port id out of range!\n");
1175 		err = -ENODEV;
1176 		goto error;
1177 	}
1178 
1179 	port = ssi->port[port_id];
1180 
1181 	if (!np) {
1182 		dev_err(&pd->dev, "missing device tree data\n");
1183 		err = -EINVAL;
1184 		goto error;
1185 	}
1186 
1187 	cawake_gpio = devm_gpiod_get(&pd->dev, "ti,ssi-cawake", GPIOD_IN);
1188 	if (IS_ERR(cawake_gpio)) {
1189 		err = PTR_ERR(cawake_gpio);
1190 		dev_err(&pd->dev, "couldn't get cawake gpio (err=%d)!\n", err);
1191 		goto error;
1192 	}
1193 
1194 	omap_port = devm_kzalloc(&port->device, sizeof(*omap_port), GFP_KERNEL);
1195 	if (!omap_port) {
1196 		err = -ENOMEM;
1197 		goto error;
1198 	}
1199 	omap_port->wake_gpio = cawake_gpio;
1200 	omap_port->pdev = &pd->dev;
1201 	omap_port->port_id = port_id;
1202 
1203 	INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue);
1204 	INIT_WORK(&omap_port->work, start_tx_work);
1205 
1206 	/* initialize HSI port */
1207 	port->async	= ssi_async;
1208 	port->setup	= ssi_setup;
1209 	port->flush	= ssi_flush;
1210 	port->start_tx	= ssi_start_tx;
1211 	port->stop_tx	= ssi_stop_tx;
1212 	port->release	= ssi_release;
1213 	hsi_port_set_drvdata(port, omap_port);
1214 	omap_ssi->port[port_id] = omap_port;
1215 
1216 	platform_set_drvdata(pd, port);
1217 
1218 	err = ssi_port_get_iomem(pd, "tx", &omap_port->sst_base,
1219 		&omap_port->sst_dma);
1220 	if (err < 0)
1221 		goto error;
1222 	err = ssi_port_get_iomem(pd, "rx", &omap_port->ssr_base,
1223 		&omap_port->ssr_dma);
1224 	if (err < 0)
1225 		goto error;
1226 
1227 	err = ssi_port_irq(port, pd);
1228 	if (err < 0)
1229 		goto error;
1230 	err = ssi_wake_irq(port, pd);
1231 	if (err < 0)
1232 		goto error;
1233 
1234 	ssi_queues_init(omap_port);
1235 	spin_lock_init(&omap_port->lock);
1236 	spin_lock_init(&omap_port->wk_lock);
1237 	omap_port->dev = &port->device;
1238 
1239 	pm_runtime_use_autosuspend(omap_port->pdev);
1240 	pm_runtime_set_autosuspend_delay(omap_port->pdev, 250);
1241 	pm_runtime_enable(omap_port->pdev);
1242 
1243 #ifdef CONFIG_DEBUG_FS
1244 	err = ssi_debug_add_port(omap_port, omap_ssi->dir);
1245 	if (err < 0) {
1246 		pm_runtime_disable(omap_port->pdev);
1247 		goto error;
1248 	}
1249 #endif
1250 
1251 	hsi_add_clients_from_dt(port, np);
1252 
1253 	dev_info(&pd->dev, "ssi port %u successfully initialized\n", port_id);
1254 
1255 	return 0;
1256 
1257 error:
1258 	return err;
1259 }
1260 
1261 static int ssi_port_remove(struct platform_device *pd)
1262 {
1263 	struct hsi_port *port = platform_get_drvdata(pd);
1264 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1265 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1266 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1267 
1268 #ifdef CONFIG_DEBUG_FS
1269 	ssi_debug_remove_port(port);
1270 #endif
1271 
1272 	cancel_delayed_work_sync(&omap_port->errqueue_work);
1273 
1274 	hsi_port_unregister_clients(port);
1275 
1276 	port->async	= hsi_dummy_msg;
1277 	port->setup	= hsi_dummy_cl;
1278 	port->flush	= hsi_dummy_cl;
1279 	port->start_tx	= hsi_dummy_cl;
1280 	port->stop_tx	= hsi_dummy_cl;
1281 	port->release	= hsi_dummy_cl;
1282 
1283 	omap_ssi->port[omap_port->port_id] = NULL;
1284 	platform_set_drvdata(pd, NULL);
1285 
1286 	pm_runtime_dont_use_autosuspend(&pd->dev);
1287 	pm_runtime_disable(&pd->dev);
1288 
1289 	return 0;
1290 }
1291 
1292 static int ssi_restore_divisor(struct omap_ssi_port *omap_port)
1293 {
1294 	writel_relaxed(omap_port->sst.divisor,
1295 				omap_port->sst_base + SSI_SST_DIVISOR_REG);
1296 
1297 	return 0;
1298 }
1299 
1300 void omap_ssi_port_update_fclk(struct hsi_controller *ssi,
1301 			       struct omap_ssi_port *omap_port)
1302 {
1303 	/* update divisor */
1304 	u32 div = ssi_calculate_div(ssi);
1305 	omap_port->sst.divisor = div;
1306 	ssi_restore_divisor(omap_port);
1307 }
1308 
1309 #ifdef CONFIG_PM
1310 static int ssi_save_port_ctx(struct omap_ssi_port *omap_port)
1311 {
1312 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1313 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1314 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1315 
1316 	omap_port->sys_mpu_enable = readl(omap_ssi->sys +
1317 					SSI_MPU_ENABLE_REG(port->num, 0));
1318 
1319 	return 0;
1320 }
1321 
1322 static int ssi_restore_port_ctx(struct omap_ssi_port *omap_port)
1323 {
1324 	struct hsi_port *port = to_hsi_port(omap_port->dev);
1325 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1326 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1327 	void __iomem	*base;
1328 
1329 	writel_relaxed(omap_port->sys_mpu_enable,
1330 			omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
1331 
1332 	/* SST context */
1333 	base = omap_port->sst_base;
1334 	writel_relaxed(omap_port->sst.frame_size, base + SSI_SST_FRAMESIZE_REG);
1335 	writel_relaxed(omap_port->sst.channels, base + SSI_SST_CHANNELS_REG);
1336 	writel_relaxed(omap_port->sst.arb_mode, base + SSI_SST_ARBMODE_REG);
1337 
1338 	/* SSR context */
1339 	base = omap_port->ssr_base;
1340 	writel_relaxed(omap_port->ssr.frame_size, base + SSI_SSR_FRAMESIZE_REG);
1341 	writel_relaxed(omap_port->ssr.channels, base + SSI_SSR_CHANNELS_REG);
1342 	writel_relaxed(omap_port->ssr.timeout, base + SSI_SSR_TIMEOUT_REG);
1343 
1344 	return 0;
1345 }
1346 
1347 static int ssi_restore_port_mode(struct omap_ssi_port *omap_port)
1348 {
1349 	u32 mode;
1350 
1351 	writel_relaxed(omap_port->sst.mode,
1352 				omap_port->sst_base + SSI_SST_MODE_REG);
1353 	writel_relaxed(omap_port->ssr.mode,
1354 				omap_port->ssr_base + SSI_SSR_MODE_REG);
1355 	/* OCP barrier */
1356 	mode = readl(omap_port->ssr_base + SSI_SSR_MODE_REG);
1357 
1358 	return 0;
1359 }
1360 
1361 static int omap_ssi_port_runtime_suspend(struct device *dev)
1362 {
1363 	struct hsi_port *port = dev_get_drvdata(dev);
1364 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1365 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1366 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1367 
1368 	dev_dbg(dev, "port runtime suspend!\n");
1369 
1370 	ssi_set_port_mode(omap_port, SSI_MODE_SLEEP);
1371 	if (omap_ssi->get_loss)
1372 		omap_port->loss_count =
1373 				omap_ssi->get_loss(ssi->device.parent);
1374 	ssi_save_port_ctx(omap_port);
1375 
1376 	return 0;
1377 }
1378 
1379 static int omap_ssi_port_runtime_resume(struct device *dev)
1380 {
1381 	struct hsi_port *port = dev_get_drvdata(dev);
1382 	struct omap_ssi_port *omap_port = hsi_port_drvdata(port);
1383 	struct hsi_controller *ssi = to_hsi_controller(port->device.parent);
1384 	struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi);
1385 
1386 	dev_dbg(dev, "port runtime resume!\n");
1387 
1388 	if ((omap_ssi->get_loss) && (omap_port->loss_count ==
1389 				omap_ssi->get_loss(ssi->device.parent)))
1390 		goto mode; /* We always need to restore the mode & TX divisor */
1391 
1392 	ssi_restore_port_ctx(omap_port);
1393 
1394 mode:
1395 	ssi_restore_divisor(omap_port);
1396 	ssi_restore_port_mode(omap_port);
1397 
1398 	return 0;
1399 }
1400 
1401 static const struct dev_pm_ops omap_ssi_port_pm_ops = {
1402 	SET_RUNTIME_PM_OPS(omap_ssi_port_runtime_suspend,
1403 		omap_ssi_port_runtime_resume, NULL)
1404 };
1405 
1406 #define DEV_PM_OPS     (&omap_ssi_port_pm_ops)
1407 #else
1408 #define DEV_PM_OPS     NULL
1409 #endif
1410 
1411 
1412 #ifdef CONFIG_OF
1413 static const struct of_device_id omap_ssi_port_of_match[] = {
1414 	{ .compatible = "ti,omap3-ssi-port", },
1415 	{},
1416 };
1417 MODULE_DEVICE_TABLE(of, omap_ssi_port_of_match);
1418 #else
1419 #define omap_ssi_port_of_match NULL
1420 #endif
1421 
1422 struct platform_driver ssi_port_pdriver = {
1423 	.probe = ssi_port_probe,
1424 	.remove	= ssi_port_remove,
1425 	.driver	= {
1426 		.name	= "omap_ssi_port",
1427 		.of_match_table = omap_ssi_port_of_match,
1428 		.pm	= DEV_PM_OPS,
1429 	},
1430 };
1431