xref: /linux/drivers/usb/renesas_usbhs/fifo.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Renesas USB driver
3  *
4  * Copyright (C) 2011 Renesas Solutions Corp.
5  * Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software
14  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
15  *
16  */
17 #include <linux/delay.h>
18 #include <linux/io.h>
19 #include <linux/scatterlist.h>
20 #include "./common.h"
21 #include "./pipe.h"
22 
23 #define usbhsf_get_cfifo(p)	(&((p)->fifo_info.cfifo))
24 #define usbhsf_get_d0fifo(p)	(&((p)->fifo_info.d0fifo))
25 #define usbhsf_get_d1fifo(p)	(&((p)->fifo_info.d1fifo))
26 
27 #define usbhsf_fifo_is_busy(f)	((f)->pipe) /* see usbhs_pipe_select_fifo */
28 
29 /*
30  *		packet initialize
31  */
32 void usbhs_pkt_init(struct usbhs_pkt *pkt)
33 {
34 	pkt->dma = DMA_ADDR_INVALID;
35 	INIT_LIST_HEAD(&pkt->node);
36 }
37 
38 /*
39  *		packet control function
40  */
41 static int usbhsf_null_handle(struct usbhs_pkt *pkt, int *is_done)
42 {
43 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pkt->pipe);
44 	struct device *dev = usbhs_priv_to_dev(priv);
45 
46 	dev_err(dev, "null handler\n");
47 
48 	return -EINVAL;
49 }
50 
51 static struct usbhs_pkt_handle usbhsf_null_handler = {
52 	.prepare = usbhsf_null_handle,
53 	.try_run = usbhsf_null_handle,
54 };
55 
56 void usbhs_pkt_push(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt,
57 		    void (*done)(struct usbhs_priv *priv,
58 				 struct usbhs_pkt *pkt),
59 		    void *buf, int len, int zero)
60 {
61 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
62 	struct device *dev = usbhs_priv_to_dev(priv);
63 	unsigned long flags;
64 
65 	if (!done) {
66 		dev_err(dev, "no done function\n");
67 		return;
68 	}
69 
70 	/********************  spin lock ********************/
71 	usbhs_lock(priv, flags);
72 
73 	if (!pipe->handler) {
74 		dev_err(dev, "no handler function\n");
75 		pipe->handler = &usbhsf_null_handler;
76 	}
77 
78 	list_del_init(&pkt->node);
79 	list_add_tail(&pkt->node, &pipe->list);
80 
81 	/*
82 	 * each pkt must hold own handler.
83 	 * because handler might be changed by its situation.
84 	 * dma handler -> pio handler.
85 	 */
86 	pkt->pipe	= pipe;
87 	pkt->buf	= buf;
88 	pkt->handler	= pipe->handler;
89 	pkt->length	= len;
90 	pkt->zero	= zero;
91 	pkt->actual	= 0;
92 	pkt->done	= done;
93 
94 	usbhs_unlock(priv, flags);
95 	/********************  spin unlock ******************/
96 }
97 
98 static void __usbhsf_pkt_del(struct usbhs_pkt *pkt)
99 {
100 	list_del_init(&pkt->node);
101 }
102 
103 static struct usbhs_pkt *__usbhsf_pkt_get(struct usbhs_pipe *pipe)
104 {
105 	if (list_empty(&pipe->list))
106 		return NULL;
107 
108 	return list_entry(pipe->list.next, struct usbhs_pkt, node);
109 }
110 
111 struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
112 {
113 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
114 	unsigned long flags;
115 
116 	/********************  spin lock ********************/
117 	usbhs_lock(priv, flags);
118 
119 	if (!pkt)
120 		pkt = __usbhsf_pkt_get(pipe);
121 
122 	if (pkt)
123 		__usbhsf_pkt_del(pkt);
124 
125 	usbhs_unlock(priv, flags);
126 	/********************  spin unlock ******************/
127 
128 	return pkt;
129 }
130 
131 enum {
132 	USBHSF_PKT_PREPARE,
133 	USBHSF_PKT_TRY_RUN,
134 	USBHSF_PKT_DMA_DONE,
135 };
136 
137 static int usbhsf_pkt_handler(struct usbhs_pipe *pipe, int type)
138 {
139 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
140 	struct usbhs_pkt *pkt;
141 	struct device *dev = usbhs_priv_to_dev(priv);
142 	int (*func)(struct usbhs_pkt *pkt, int *is_done);
143 	unsigned long flags;
144 	int ret = 0;
145 	int is_done = 0;
146 
147 	/********************  spin lock ********************/
148 	usbhs_lock(priv, flags);
149 
150 	pkt = __usbhsf_pkt_get(pipe);
151 	if (!pkt)
152 		goto __usbhs_pkt_handler_end;
153 
154 	switch (type) {
155 	case USBHSF_PKT_PREPARE:
156 		func = pkt->handler->prepare;
157 		break;
158 	case USBHSF_PKT_TRY_RUN:
159 		func = pkt->handler->try_run;
160 		break;
161 	case USBHSF_PKT_DMA_DONE:
162 		func = pkt->handler->dma_done;
163 		break;
164 	default:
165 		dev_err(dev, "unknown pkt hander\n");
166 		goto __usbhs_pkt_handler_end;
167 	}
168 
169 	ret = func(pkt, &is_done);
170 
171 	if (is_done)
172 		__usbhsf_pkt_del(pkt);
173 
174 __usbhs_pkt_handler_end:
175 	usbhs_unlock(priv, flags);
176 	/********************  spin unlock ******************/
177 
178 	if (is_done) {
179 		pkt->done(priv, pkt);
180 		usbhs_pkt_start(pipe);
181 	}
182 
183 	return ret;
184 }
185 
186 void usbhs_pkt_start(struct usbhs_pipe *pipe)
187 {
188 	usbhsf_pkt_handler(pipe, USBHSF_PKT_PREPARE);
189 }
190 
191 /*
192  *		irq enable/disable function
193  */
194 #define usbhsf_irq_empty_ctrl(p, e) usbhsf_irq_callback_ctrl(p, bempsts, e)
195 #define usbhsf_irq_ready_ctrl(p, e) usbhsf_irq_callback_ctrl(p, brdysts, e)
196 #define usbhsf_irq_callback_ctrl(pipe, status, enable)			\
197 	({								\
198 		struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);	\
199 		struct usbhs_mod *mod = usbhs_mod_get_current(priv);	\
200 		u16 status = (1 << usbhs_pipe_number(pipe));		\
201 		if (!mod)						\
202 			return;						\
203 		if (enable)						\
204 			mod->irq_##status |= status;			\
205 		else							\
206 			mod->irq_##status &= ~status;			\
207 		usbhs_irq_callback_update(priv, mod);			\
208 	})
209 
210 static void usbhsf_tx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
211 {
212 	/*
213 	 * And DCP pipe can NOT use "ready interrupt" for "send"
214 	 * it should use "empty" interrupt.
215 	 * see
216 	 *   "Operation" - "Interrupt Function" - "BRDY Interrupt"
217 	 *
218 	 * on the other hand, normal pipe can use "ready interrupt" for "send"
219 	 * even though it is single/double buffer
220 	 */
221 	if (usbhs_pipe_is_dcp(pipe))
222 		usbhsf_irq_empty_ctrl(pipe, enable);
223 	else
224 		usbhsf_irq_ready_ctrl(pipe, enable);
225 }
226 
227 static void usbhsf_rx_irq_ctrl(struct usbhs_pipe *pipe, int enable)
228 {
229 	usbhsf_irq_ready_ctrl(pipe, enable);
230 }
231 
232 /*
233  *		FIFO ctrl
234  */
235 static void usbhsf_send_terminator(struct usbhs_pipe *pipe,
236 				   struct usbhs_fifo *fifo)
237 {
238 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
239 
240 	usbhs_bset(priv, fifo->ctr, BVAL, BVAL);
241 }
242 
243 static int usbhsf_fifo_barrier(struct usbhs_priv *priv,
244 			       struct usbhs_fifo *fifo)
245 {
246 	int timeout = 1024;
247 
248 	do {
249 		/* The FIFO port is accessible */
250 		if (usbhs_read(priv, fifo->ctr) & FRDY)
251 			return 0;
252 
253 		udelay(10);
254 	} while (timeout--);
255 
256 	return -EBUSY;
257 }
258 
259 static void usbhsf_fifo_clear(struct usbhs_pipe *pipe,
260 			      struct usbhs_fifo *fifo)
261 {
262 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
263 
264 	if (!usbhs_pipe_is_dcp(pipe))
265 		usbhsf_fifo_barrier(priv, fifo);
266 
267 	usbhs_write(priv, fifo->ctr, BCLR);
268 }
269 
270 static int usbhsf_fifo_rcv_len(struct usbhs_priv *priv,
271 			       struct usbhs_fifo *fifo)
272 {
273 	return usbhs_read(priv, fifo->ctr) & DTLN_MASK;
274 }
275 
276 static void usbhsf_fifo_unselect(struct usbhs_pipe *pipe,
277 				 struct usbhs_fifo *fifo)
278 {
279 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
280 
281 	usbhs_pipe_select_fifo(pipe, NULL);
282 	usbhs_write(priv, fifo->sel, 0);
283 }
284 
285 static int usbhsf_fifo_select(struct usbhs_pipe *pipe,
286 			      struct usbhs_fifo *fifo,
287 			      int write)
288 {
289 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
290 	struct device *dev = usbhs_priv_to_dev(priv);
291 	int timeout = 1024;
292 	u16 mask = ((1 << 5) | 0xF);		/* mask of ISEL | CURPIPE */
293 	u16 base = usbhs_pipe_number(pipe);	/* CURPIPE */
294 
295 	if (usbhs_pipe_is_busy(pipe) ||
296 	    usbhsf_fifo_is_busy(fifo))
297 		return -EBUSY;
298 
299 	if (usbhs_pipe_is_dcp(pipe)) {
300 		base |= (1 == write) << 5;	/* ISEL */
301 
302 		if (usbhs_mod_is_host(priv))
303 			usbhs_dcp_dir_for_host(pipe, write);
304 	}
305 
306 	/* "base" will be used below  */
307 	usbhs_write(priv, fifo->sel, base | MBW_32);
308 
309 	/* check ISEL and CURPIPE value */
310 	while (timeout--) {
311 		if (base == (mask & usbhs_read(priv, fifo->sel))) {
312 			usbhs_pipe_select_fifo(pipe, fifo);
313 			return 0;
314 		}
315 		udelay(10);
316 	}
317 
318 	dev_err(dev, "fifo select error\n");
319 
320 	return -EIO;
321 }
322 
323 /*
324  *		DCP status stage
325  */
326 static int usbhs_dcp_dir_switch_to_write(struct usbhs_pkt *pkt, int *is_done)
327 {
328 	struct usbhs_pipe *pipe = pkt->pipe;
329 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
330 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
331 	struct device *dev = usbhs_priv_to_dev(priv);
332 	int ret;
333 
334 	usbhs_pipe_disable(pipe);
335 
336 	ret = usbhsf_fifo_select(pipe, fifo, 1);
337 	if (ret < 0) {
338 		dev_err(dev, "%s() faile\n", __func__);
339 		return ret;
340 	}
341 
342 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
343 
344 	usbhsf_fifo_clear(pipe, fifo);
345 	usbhsf_send_terminator(pipe, fifo);
346 
347 	usbhsf_fifo_unselect(pipe, fifo);
348 
349 	usbhsf_tx_irq_ctrl(pipe, 1);
350 	usbhs_pipe_enable(pipe);
351 
352 	return ret;
353 }
354 
355 static int usbhs_dcp_dir_switch_to_read(struct usbhs_pkt *pkt, int *is_done)
356 {
357 	struct usbhs_pipe *pipe = pkt->pipe;
358 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
359 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
360 	struct device *dev = usbhs_priv_to_dev(priv);
361 	int ret;
362 
363 	usbhs_pipe_disable(pipe);
364 
365 	ret = usbhsf_fifo_select(pipe, fifo, 0);
366 	if (ret < 0) {
367 		dev_err(dev, "%s() fail\n", __func__);
368 		return ret;
369 	}
370 
371 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
372 	usbhsf_fifo_clear(pipe, fifo);
373 
374 	usbhsf_fifo_unselect(pipe, fifo);
375 
376 	usbhsf_rx_irq_ctrl(pipe, 1);
377 	usbhs_pipe_enable(pipe);
378 
379 	return ret;
380 
381 }
382 
383 static int usbhs_dcp_dir_switch_done(struct usbhs_pkt *pkt, int *is_done)
384 {
385 	struct usbhs_pipe *pipe = pkt->pipe;
386 
387 	if (pkt->handler == &usbhs_dcp_status_stage_in_handler)
388 		usbhsf_tx_irq_ctrl(pipe, 0);
389 	else
390 		usbhsf_rx_irq_ctrl(pipe, 0);
391 
392 	pkt->actual = pkt->length;
393 	*is_done = 1;
394 
395 	return 0;
396 }
397 
398 struct usbhs_pkt_handle usbhs_dcp_status_stage_in_handler = {
399 	.prepare = usbhs_dcp_dir_switch_to_write,
400 	.try_run = usbhs_dcp_dir_switch_done,
401 };
402 
403 struct usbhs_pkt_handle usbhs_dcp_status_stage_out_handler = {
404 	.prepare = usbhs_dcp_dir_switch_to_read,
405 	.try_run = usbhs_dcp_dir_switch_done,
406 };
407 
408 /*
409  *		DCP data stage (push)
410  */
411 static int usbhsf_dcp_data_stage_try_push(struct usbhs_pkt *pkt, int *is_done)
412 {
413 	struct usbhs_pipe *pipe = pkt->pipe;
414 
415 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
416 
417 	/*
418 	 * change handler to PIO push
419 	 */
420 	pkt->handler = &usbhs_fifo_pio_push_handler;
421 
422 	return pkt->handler->prepare(pkt, is_done);
423 }
424 
425 struct usbhs_pkt_handle usbhs_dcp_data_stage_out_handler = {
426 	.prepare = usbhsf_dcp_data_stage_try_push,
427 };
428 
429 /*
430  *		DCP data stage (pop)
431  */
432 static int usbhsf_dcp_data_stage_prepare_pop(struct usbhs_pkt *pkt,
433 					     int *is_done)
434 {
435 	struct usbhs_pipe *pipe = pkt->pipe;
436 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
437 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
438 
439 	if (usbhs_pipe_is_busy(pipe))
440 		return 0;
441 
442 	/*
443 	 * prepare pop for DCP should
444 	 *  - change DCP direction,
445 	 *  - clear fifo
446 	 *  - DATA1
447 	 */
448 	usbhs_pipe_disable(pipe);
449 
450 	usbhs_pipe_sequence_data1(pipe); /* DATA1 */
451 
452 	usbhsf_fifo_select(pipe, fifo, 0);
453 	usbhsf_fifo_clear(pipe, fifo);
454 	usbhsf_fifo_unselect(pipe, fifo);
455 
456 	/*
457 	 * change handler to PIO pop
458 	 */
459 	pkt->handler = &usbhs_fifo_pio_pop_handler;
460 
461 	return pkt->handler->prepare(pkt, is_done);
462 }
463 
464 struct usbhs_pkt_handle usbhs_dcp_data_stage_in_handler = {
465 	.prepare = usbhsf_dcp_data_stage_prepare_pop,
466 };
467 
468 /*
469  *		PIO push handler
470  */
471 static int usbhsf_pio_try_push(struct usbhs_pkt *pkt, int *is_done)
472 {
473 	struct usbhs_pipe *pipe = pkt->pipe;
474 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
475 	struct device *dev = usbhs_priv_to_dev(priv);
476 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
477 	void __iomem *addr = priv->base + fifo->port;
478 	u8 *buf;
479 	int maxp = usbhs_pipe_get_maxpacket(pipe);
480 	int total_len;
481 	int i, ret, len;
482 	int is_short;
483 
484 	ret = usbhsf_fifo_select(pipe, fifo, 1);
485 	if (ret < 0)
486 		return 0;
487 
488 	ret = usbhs_pipe_is_accessible(pipe);
489 	if (ret < 0) {
490 		/* inaccessible pipe is not an error */
491 		ret = 0;
492 		goto usbhs_fifo_write_busy;
493 	}
494 
495 	ret = usbhsf_fifo_barrier(priv, fifo);
496 	if (ret < 0)
497 		goto usbhs_fifo_write_busy;
498 
499 	buf		= pkt->buf    + pkt->actual;
500 	len		= pkt->length - pkt->actual;
501 	len		= min(len, maxp);
502 	total_len	= len;
503 	is_short	= total_len < maxp;
504 
505 	/*
506 	 * FIXME
507 	 *
508 	 * 32-bit access only
509 	 */
510 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
511 		iowrite32_rep(addr, buf, len / 4);
512 		len %= 4;
513 		buf += total_len - len;
514 	}
515 
516 	/* the rest operation */
517 	for (i = 0; i < len; i++)
518 		iowrite8(buf[i], addr + (0x03 - (i & 0x03)));
519 
520 	/*
521 	 * variable update
522 	 */
523 	pkt->actual += total_len;
524 
525 	if (pkt->actual < pkt->length)
526 		*is_done = 0;		/* there are remainder data */
527 	else if (is_short)
528 		*is_done = 1;		/* short packet */
529 	else
530 		*is_done = !pkt->zero;	/* send zero packet ? */
531 
532 	/*
533 	 * pipe/irq handling
534 	 */
535 	if (is_short)
536 		usbhsf_send_terminator(pipe, fifo);
537 
538 	usbhsf_tx_irq_ctrl(pipe, !*is_done);
539 	usbhs_pipe_enable(pipe);
540 
541 	dev_dbg(dev, "  send %d (%d/ %d/ %d/ %d)\n",
542 		usbhs_pipe_number(pipe),
543 		pkt->length, pkt->actual, *is_done, pkt->zero);
544 
545 	/*
546 	 * Transmission end
547 	 */
548 	if (*is_done) {
549 		if (usbhs_pipe_is_dcp(pipe))
550 			usbhs_dcp_control_transfer_done(pipe);
551 	}
552 
553 	usbhsf_fifo_unselect(pipe, fifo);
554 
555 	return 0;
556 
557 usbhs_fifo_write_busy:
558 	usbhsf_fifo_unselect(pipe, fifo);
559 
560 	/*
561 	 * pipe is busy.
562 	 * retry in interrupt
563 	 */
564 	usbhsf_tx_irq_ctrl(pipe, 1);
565 
566 	return ret;
567 }
568 
569 struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
570 	.prepare = usbhsf_pio_try_push,
571 	.try_run = usbhsf_pio_try_push,
572 };
573 
574 /*
575  *		PIO pop handler
576  */
577 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
578 {
579 	struct usbhs_pipe *pipe = pkt->pipe;
580 
581 	if (usbhs_pipe_is_busy(pipe))
582 		return 0;
583 
584 	/*
585 	 * pipe enable to prepare packet receive
586 	 */
587 
588 	usbhs_pipe_enable(pipe);
589 	usbhsf_rx_irq_ctrl(pipe, 1);
590 
591 	return 0;
592 }
593 
594 static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
595 {
596 	struct usbhs_pipe *pipe = pkt->pipe;
597 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
598 	struct device *dev = usbhs_priv_to_dev(priv);
599 	struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv); /* CFIFO */
600 	void __iomem *addr = priv->base + fifo->port;
601 	u8 *buf;
602 	u32 data = 0;
603 	int maxp = usbhs_pipe_get_maxpacket(pipe);
604 	int rcv_len, len;
605 	int i, ret;
606 	int total_len = 0;
607 
608 	ret = usbhsf_fifo_select(pipe, fifo, 0);
609 	if (ret < 0)
610 		return 0;
611 
612 	ret = usbhsf_fifo_barrier(priv, fifo);
613 	if (ret < 0)
614 		goto usbhs_fifo_read_busy;
615 
616 	rcv_len = usbhsf_fifo_rcv_len(priv, fifo);
617 
618 	buf		= pkt->buf    + pkt->actual;
619 	len		= pkt->length - pkt->actual;
620 	len		= min(len, rcv_len);
621 	total_len	= len;
622 
623 	/*
624 	 * update actual length first here to decide disable pipe.
625 	 * if this pipe keeps BUF status and all data were popped,
626 	 * then, next interrupt/token will be issued again
627 	 */
628 	pkt->actual += total_len;
629 
630 	if ((pkt->actual == pkt->length) ||	/* receive all data */
631 	    (total_len < maxp)) {		/* short packet */
632 		*is_done = 1;
633 		usbhsf_rx_irq_ctrl(pipe, 0);
634 		usbhs_pipe_disable(pipe);	/* disable pipe first */
635 	}
636 
637 	/*
638 	 * Buffer clear if Zero-Length packet
639 	 *
640 	 * see
641 	 * "Operation" - "FIFO Buffer Memory" - "FIFO Port Function"
642 	 */
643 	if (0 == rcv_len) {
644 		usbhsf_fifo_clear(pipe, fifo);
645 		goto usbhs_fifo_read_end;
646 	}
647 
648 	/*
649 	 * FIXME
650 	 *
651 	 * 32-bit access only
652 	 */
653 	if (len >= 4 && !((unsigned long)buf & 0x03)) {
654 		ioread32_rep(addr, buf, len / 4);
655 		len %= 4;
656 		buf += total_len - len;
657 	}
658 
659 	/* the rest operation */
660 	for (i = 0; i < len; i++) {
661 		if (!(i & 0x03))
662 			data = ioread32(addr);
663 
664 		buf[i] = (data >> ((i & 0x03) * 8)) & 0xff;
665 	}
666 
667 usbhs_fifo_read_end:
668 	dev_dbg(dev, "  recv %d (%d/ %d/ %d/ %d)\n",
669 		usbhs_pipe_number(pipe),
670 		pkt->length, pkt->actual, *is_done, pkt->zero);
671 
672 usbhs_fifo_read_busy:
673 	usbhsf_fifo_unselect(pipe, fifo);
674 
675 	return ret;
676 }
677 
678 struct usbhs_pkt_handle usbhs_fifo_pio_pop_handler = {
679 	.prepare = usbhsf_prepare_pop,
680 	.try_run = usbhsf_pio_try_pop,
681 };
682 
683 /*
684  *		DCP ctrol statge handler
685  */
686 static int usbhsf_ctrl_stage_end(struct usbhs_pkt *pkt, int *is_done)
687 {
688 	usbhs_dcp_control_transfer_done(pkt->pipe);
689 
690 	*is_done = 1;
691 
692 	return 0;
693 }
694 
695 struct usbhs_pkt_handle usbhs_ctrl_stage_end_handler = {
696 	.prepare = usbhsf_ctrl_stage_end,
697 	.try_run = usbhsf_ctrl_stage_end,
698 };
699 
700 /*
701  *		DMA fifo functions
702  */
703 static struct dma_chan *usbhsf_dma_chan_get(struct usbhs_fifo *fifo,
704 					    struct usbhs_pkt *pkt)
705 {
706 	if (&usbhs_fifo_dma_push_handler == pkt->handler)
707 		return fifo->tx_chan;
708 
709 	if (&usbhs_fifo_dma_pop_handler == pkt->handler)
710 		return fifo->rx_chan;
711 
712 	return NULL;
713 }
714 
715 static struct usbhs_fifo *usbhsf_get_dma_fifo(struct usbhs_priv *priv,
716 					      struct usbhs_pkt *pkt)
717 {
718 	struct usbhs_fifo *fifo;
719 
720 	/* DMA :: D0FIFO */
721 	fifo = usbhsf_get_d0fifo(priv);
722 	if (usbhsf_dma_chan_get(fifo, pkt) &&
723 	    !usbhsf_fifo_is_busy(fifo))
724 		return fifo;
725 
726 	/* DMA :: D1FIFO */
727 	fifo = usbhsf_get_d1fifo(priv);
728 	if (usbhsf_dma_chan_get(fifo, pkt) &&
729 	    !usbhsf_fifo_is_busy(fifo))
730 		return fifo;
731 
732 	return NULL;
733 }
734 
735 #define usbhsf_dma_start(p, f)	__usbhsf_dma_ctrl(p, f, DREQE)
736 #define usbhsf_dma_stop(p, f)	__usbhsf_dma_ctrl(p, f, 0)
737 static void __usbhsf_dma_ctrl(struct usbhs_pipe *pipe,
738 			      struct usbhs_fifo *fifo,
739 			      u16 dreqe)
740 {
741 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
742 
743 	usbhs_bset(priv, fifo->sel, DREQE, dreqe);
744 }
745 
746 #define usbhsf_dma_map(p)	__usbhsf_dma_map_ctrl(p, 1)
747 #define usbhsf_dma_unmap(p)	__usbhsf_dma_map_ctrl(p, 0)
748 static int __usbhsf_dma_map_ctrl(struct usbhs_pkt *pkt, int map)
749 {
750 	struct usbhs_pipe *pipe = pkt->pipe;
751 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
752 	struct usbhs_pipe_info *info = usbhs_priv_to_pipeinfo(priv);
753 
754 	return info->dma_map_ctrl(pkt, map);
755 }
756 
757 static void usbhsf_dma_complete(void *arg);
758 static void usbhsf_dma_prepare_tasklet(unsigned long data)
759 {
760 	struct usbhs_pkt *pkt = (struct usbhs_pkt *)data;
761 	struct usbhs_pipe *pipe = pkt->pipe;
762 	struct usbhs_fifo *fifo = usbhs_pipe_to_fifo(pipe);
763 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
764 	struct scatterlist sg;
765 	struct dma_async_tx_descriptor *desc;
766 	struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
767 	struct device *dev = usbhs_priv_to_dev(priv);
768 	enum dma_data_direction dir;
769 	dma_cookie_t cookie;
770 
771 	dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
772 
773 	sg_init_table(&sg, 1);
774 	sg_set_page(&sg, virt_to_page(pkt->dma),
775 		    pkt->length, offset_in_page(pkt->dma));
776 	sg_dma_address(&sg) = pkt->dma + pkt->actual;
777 	sg_dma_len(&sg) = pkt->trans;
778 
779 	desc = chan->device->device_prep_slave_sg(chan, &sg, 1, dir,
780 						  DMA_PREP_INTERRUPT |
781 						  DMA_CTRL_ACK);
782 	if (!desc)
783 		return;
784 
785 	desc->callback		= usbhsf_dma_complete;
786 	desc->callback_param	= pipe;
787 
788 	cookie = desc->tx_submit(desc);
789 	if (cookie < 0) {
790 		dev_err(dev, "Failed to submit dma descriptor\n");
791 		return;
792 	}
793 
794 	dev_dbg(dev, "  %s %d (%d/ %d)\n",
795 		fifo->name, usbhs_pipe_number(pipe), pkt->length, pkt->zero);
796 
797 	usbhsf_dma_start(pipe, fifo);
798 	dma_async_issue_pending(chan);
799 }
800 
801 /*
802  *		DMA push handler
803  */
804 static int usbhsf_dma_prepare_push(struct usbhs_pkt *pkt, int *is_done)
805 {
806 	struct usbhs_pipe *pipe = pkt->pipe;
807 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
808 	struct usbhs_fifo *fifo;
809 	int len = pkt->length - pkt->actual;
810 	int ret;
811 
812 	if (usbhs_pipe_is_busy(pipe))
813 		return 0;
814 
815 	/* use PIO if packet is less than pio_dma_border or pipe is DCP */
816 	if ((len < usbhs_get_dparam(priv, pio_dma_border)) ||
817 	    usbhs_pipe_is_dcp(pipe))
818 		goto usbhsf_pio_prepare_push;
819 
820 	if (len % 4) /* 32bit alignment */
821 		goto usbhsf_pio_prepare_push;
822 
823 	if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
824 		goto usbhsf_pio_prepare_push;
825 
826 	/* get enable DMA fifo */
827 	fifo = usbhsf_get_dma_fifo(priv, pkt);
828 	if (!fifo)
829 		goto usbhsf_pio_prepare_push;
830 
831 	if (usbhsf_dma_map(pkt) < 0)
832 		goto usbhsf_pio_prepare_push;
833 
834 	ret = usbhsf_fifo_select(pipe, fifo, 0);
835 	if (ret < 0)
836 		goto usbhsf_pio_prepare_push_unmap;
837 
838 	pkt->trans = len;
839 
840 	tasklet_init(&fifo->tasklet,
841 		     usbhsf_dma_prepare_tasklet,
842 		     (unsigned long)pkt);
843 
844 	tasklet_schedule(&fifo->tasklet);
845 
846 	return 0;
847 
848 usbhsf_pio_prepare_push_unmap:
849 	usbhsf_dma_unmap(pkt);
850 usbhsf_pio_prepare_push:
851 	/*
852 	 * change handler to PIO
853 	 */
854 	pkt->handler = &usbhs_fifo_pio_push_handler;
855 
856 	return pkt->handler->prepare(pkt, is_done);
857 }
858 
859 static int usbhsf_dma_push_done(struct usbhs_pkt *pkt, int *is_done)
860 {
861 	struct usbhs_pipe *pipe = pkt->pipe;
862 
863 	pkt->actual = pkt->trans;
864 
865 	*is_done = !pkt->zero;	/* send zero packet ? */
866 
867 	usbhsf_dma_stop(pipe, pipe->fifo);
868 	usbhsf_dma_unmap(pkt);
869 	usbhsf_fifo_unselect(pipe, pipe->fifo);
870 
871 	return 0;
872 }
873 
874 struct usbhs_pkt_handle usbhs_fifo_dma_push_handler = {
875 	.prepare	= usbhsf_dma_prepare_push,
876 	.dma_done	= usbhsf_dma_push_done,
877 };
878 
879 /*
880  *		DMA pop handler
881  */
882 static int usbhsf_dma_try_pop(struct usbhs_pkt *pkt, int *is_done)
883 {
884 	struct usbhs_pipe *pipe = pkt->pipe;
885 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
886 	struct usbhs_fifo *fifo;
887 	int len, ret;
888 
889 	if (usbhs_pipe_is_busy(pipe))
890 		return 0;
891 
892 	if (usbhs_pipe_is_dcp(pipe))
893 		goto usbhsf_pio_prepare_pop;
894 
895 	/* get enable DMA fifo */
896 	fifo = usbhsf_get_dma_fifo(priv, pkt);
897 	if (!fifo)
898 		goto usbhsf_pio_prepare_pop;
899 
900 	if ((*(u32 *) pkt->buf + pkt->actual) & 0x7) /* 8byte alignment */
901 		goto usbhsf_pio_prepare_pop;
902 
903 	ret = usbhsf_fifo_select(pipe, fifo, 0);
904 	if (ret < 0)
905 		goto usbhsf_pio_prepare_pop;
906 
907 	/* use PIO if packet is less than pio_dma_border */
908 	len = usbhsf_fifo_rcv_len(priv, fifo);
909 	len = min(pkt->length - pkt->actual, len);
910 	if (len % 4) /* 32bit alignment */
911 		goto usbhsf_pio_prepare_pop_unselect;
912 
913 	if (len < usbhs_get_dparam(priv, pio_dma_border))
914 		goto usbhsf_pio_prepare_pop_unselect;
915 
916 	ret = usbhsf_fifo_barrier(priv, fifo);
917 	if (ret < 0)
918 		goto usbhsf_pio_prepare_pop_unselect;
919 
920 	if (usbhsf_dma_map(pkt) < 0)
921 		goto usbhsf_pio_prepare_pop_unselect;
922 
923 	/* DMA */
924 
925 	/*
926 	 * usbhs_fifo_dma_pop_handler :: prepare
927 	 * enabled irq to come here.
928 	 * but it is no longer needed for DMA. disable it.
929 	 */
930 	usbhsf_rx_irq_ctrl(pipe, 0);
931 
932 	pkt->trans = len;
933 
934 	tasklet_init(&fifo->tasklet,
935 		     usbhsf_dma_prepare_tasklet,
936 		     (unsigned long)pkt);
937 
938 	tasklet_schedule(&fifo->tasklet);
939 
940 	return 0;
941 
942 usbhsf_pio_prepare_pop_unselect:
943 	usbhsf_fifo_unselect(pipe, fifo);
944 usbhsf_pio_prepare_pop:
945 
946 	/*
947 	 * change handler to PIO
948 	 */
949 	pkt->handler = &usbhs_fifo_pio_pop_handler;
950 
951 	return pkt->handler->try_run(pkt, is_done);
952 }
953 
954 static int usbhsf_dma_pop_done(struct usbhs_pkt *pkt, int *is_done)
955 {
956 	struct usbhs_pipe *pipe = pkt->pipe;
957 	int maxp = usbhs_pipe_get_maxpacket(pipe);
958 
959 	usbhsf_dma_stop(pipe, pipe->fifo);
960 	usbhsf_dma_unmap(pkt);
961 	usbhsf_fifo_unselect(pipe, pipe->fifo);
962 
963 	pkt->actual += pkt->trans;
964 
965 	if ((pkt->actual == pkt->length) ||	/* receive all data */
966 	    (pkt->trans < maxp)) {		/* short packet */
967 		*is_done = 1;
968 	} else {
969 		/* re-enable */
970 		usbhsf_prepare_pop(pkt, is_done);
971 	}
972 
973 	return 0;
974 }
975 
976 struct usbhs_pkt_handle usbhs_fifo_dma_pop_handler = {
977 	.prepare	= usbhsf_prepare_pop,
978 	.try_run	= usbhsf_dma_try_pop,
979 	.dma_done	= usbhsf_dma_pop_done
980 };
981 
982 /*
983  *		DMA setting
984  */
985 static bool usbhsf_dma_filter(struct dma_chan *chan, void *param)
986 {
987 	struct sh_dmae_slave *slave = param;
988 
989 	/*
990 	 * FIXME
991 	 *
992 	 * usbhs doesn't recognize id = 0 as valid DMA
993 	 */
994 	if (0 == slave->slave_id)
995 		return false;
996 
997 	chan->private = slave;
998 
999 	return true;
1000 }
1001 
1002 static void usbhsf_dma_quit(struct usbhs_priv *priv, struct usbhs_fifo *fifo)
1003 {
1004 	if (fifo->tx_chan)
1005 		dma_release_channel(fifo->tx_chan);
1006 	if (fifo->rx_chan)
1007 		dma_release_channel(fifo->rx_chan);
1008 
1009 	fifo->tx_chan = NULL;
1010 	fifo->rx_chan = NULL;
1011 }
1012 
1013 static void usbhsf_dma_init(struct usbhs_priv *priv,
1014 			    struct usbhs_fifo *fifo)
1015 {
1016 	struct device *dev = usbhs_priv_to_dev(priv);
1017 	dma_cap_mask_t mask;
1018 
1019 	dma_cap_zero(mask);
1020 	dma_cap_set(DMA_SLAVE, mask);
1021 	fifo->tx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1022 					    &fifo->tx_slave);
1023 
1024 	dma_cap_zero(mask);
1025 	dma_cap_set(DMA_SLAVE, mask);
1026 	fifo->rx_chan = dma_request_channel(mask, usbhsf_dma_filter,
1027 					    &fifo->rx_slave);
1028 
1029 	if (fifo->tx_chan || fifo->rx_chan)
1030 		dev_dbg(dev, "enable DMAEngine (%s%s%s)\n",
1031 			 fifo->name,
1032 			 fifo->tx_chan ? "[TX]" : "    ",
1033 			 fifo->rx_chan ? "[RX]" : "    ");
1034 }
1035 
1036 /*
1037  *		irq functions
1038  */
1039 static int usbhsf_irq_empty(struct usbhs_priv *priv,
1040 			    struct usbhs_irq_state *irq_state)
1041 {
1042 	struct usbhs_pipe *pipe;
1043 	struct device *dev = usbhs_priv_to_dev(priv);
1044 	int i, ret;
1045 
1046 	if (!irq_state->bempsts) {
1047 		dev_err(dev, "debug %s !!\n", __func__);
1048 		return -EIO;
1049 	}
1050 
1051 	dev_dbg(dev, "irq empty [0x%04x]\n", irq_state->bempsts);
1052 
1053 	/*
1054 	 * search interrupted "pipe"
1055 	 * not "uep".
1056 	 */
1057 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1058 		if (!(irq_state->bempsts & (1 << i)))
1059 			continue;
1060 
1061 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1062 		if (ret < 0)
1063 			dev_err(dev, "irq_empty run_error %d : %d\n", i, ret);
1064 	}
1065 
1066 	return 0;
1067 }
1068 
1069 static int usbhsf_irq_ready(struct usbhs_priv *priv,
1070 			    struct usbhs_irq_state *irq_state)
1071 {
1072 	struct usbhs_pipe *pipe;
1073 	struct device *dev = usbhs_priv_to_dev(priv);
1074 	int i, ret;
1075 
1076 	if (!irq_state->brdysts) {
1077 		dev_err(dev, "debug %s !!\n", __func__);
1078 		return -EIO;
1079 	}
1080 
1081 	dev_dbg(dev, "irq ready [0x%04x]\n", irq_state->brdysts);
1082 
1083 	/*
1084 	 * search interrupted "pipe"
1085 	 * not "uep".
1086 	 */
1087 	usbhs_for_each_pipe_with_dcp(pipe, priv, i) {
1088 		if (!(irq_state->brdysts & (1 << i)))
1089 			continue;
1090 
1091 		ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_TRY_RUN);
1092 		if (ret < 0)
1093 			dev_err(dev, "irq_ready run_error %d : %d\n", i, ret);
1094 	}
1095 
1096 	return 0;
1097 }
1098 
1099 static void usbhsf_dma_complete(void *arg)
1100 {
1101 	struct usbhs_pipe *pipe = arg;
1102 	struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
1103 	struct device *dev = usbhs_priv_to_dev(priv);
1104 	int ret;
1105 
1106 	ret = usbhsf_pkt_handler(pipe, USBHSF_PKT_DMA_DONE);
1107 	if (ret < 0)
1108 		dev_err(dev, "dma_complete run_error %d : %d\n",
1109 			usbhs_pipe_number(pipe), ret);
1110 }
1111 
1112 /*
1113  *		fifo init
1114  */
1115 void usbhs_fifo_init(struct usbhs_priv *priv)
1116 {
1117 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1118 	struct usbhs_fifo *cfifo = usbhsf_get_cfifo(priv);
1119 	struct usbhs_fifo *d0fifo = usbhsf_get_d0fifo(priv);
1120 	struct usbhs_fifo *d1fifo = usbhsf_get_d1fifo(priv);
1121 
1122 	mod->irq_empty		= usbhsf_irq_empty;
1123 	mod->irq_ready		= usbhsf_irq_ready;
1124 	mod->irq_bempsts	= 0;
1125 	mod->irq_brdysts	= 0;
1126 
1127 	cfifo->pipe	= NULL;
1128 	cfifo->tx_chan	= NULL;
1129 	cfifo->rx_chan	= NULL;
1130 
1131 	d0fifo->pipe	= NULL;
1132 	d0fifo->tx_chan	= NULL;
1133 	d0fifo->rx_chan	= NULL;
1134 
1135 	d1fifo->pipe	= NULL;
1136 	d1fifo->tx_chan	= NULL;
1137 	d1fifo->rx_chan	= NULL;
1138 
1139 	usbhsf_dma_init(priv, usbhsf_get_d0fifo(priv));
1140 	usbhsf_dma_init(priv, usbhsf_get_d1fifo(priv));
1141 }
1142 
1143 void usbhs_fifo_quit(struct usbhs_priv *priv)
1144 {
1145 	struct usbhs_mod *mod = usbhs_mod_get_current(priv);
1146 
1147 	mod->irq_empty		= NULL;
1148 	mod->irq_ready		= NULL;
1149 	mod->irq_bempsts	= 0;
1150 	mod->irq_brdysts	= 0;
1151 
1152 	usbhsf_dma_quit(priv, usbhsf_get_d0fifo(priv));
1153 	usbhsf_dma_quit(priv, usbhsf_get_d1fifo(priv));
1154 }
1155 
1156 int usbhs_fifo_probe(struct usbhs_priv *priv)
1157 {
1158 	struct usbhs_fifo *fifo;
1159 
1160 	/* CFIFO */
1161 	fifo = usbhsf_get_cfifo(priv);
1162 	fifo->name	= "CFIFO";
1163 	fifo->port	= CFIFO;
1164 	fifo->sel	= CFIFOSEL;
1165 	fifo->ctr	= CFIFOCTR;
1166 
1167 	/* D0FIFO */
1168 	fifo = usbhsf_get_d0fifo(priv);
1169 	fifo->name	= "D0FIFO";
1170 	fifo->port	= D0FIFO;
1171 	fifo->sel	= D0FIFOSEL;
1172 	fifo->ctr	= D0FIFOCTR;
1173 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d0_tx_id);
1174 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d0_rx_id);
1175 
1176 	/* D1FIFO */
1177 	fifo = usbhsf_get_d1fifo(priv);
1178 	fifo->name	= "D1FIFO";
1179 	fifo->port	= D1FIFO;
1180 	fifo->sel	= D1FIFOSEL;
1181 	fifo->ctr	= D1FIFOCTR;
1182 	fifo->tx_slave.slave_id	= usbhs_get_dparam(priv, d1_tx_id);
1183 	fifo->rx_slave.slave_id	= usbhs_get_dparam(priv, d1_rx_id);
1184 
1185 	return 0;
1186 }
1187 
1188 void usbhs_fifo_remove(struct usbhs_priv *priv)
1189 {
1190 }
1191