xref: /linux/drivers/s390/net/ctcm_fsms.c (revision bcefe12eff5dca6fdfa94ed85e5bee66380d5cd9)
1 /*
2  * drivers/s390/net/ctcm_fsms.c
3  *
4  * Copyright IBM Corp. 2001, 2007
5  * Authors:	Fritz Elfert (felfert@millenux.com)
6  * 		Peter Tiedemann (ptiedem@de.ibm.com)
7  *	MPC additions :
8  *		Belinda Thompson (belindat@us.ibm.com)
9  *		Andy Richter (richtera@us.ibm.com)
10  */
11 
12 #undef DEBUG
13 #undef DEBUGDATA
14 #undef DEBUGCCW
15 
16 #define KMSG_COMPONENT "ctcm"
17 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
18 
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/kernel.h>
22 #include <linux/slab.h>
23 #include <linux/errno.h>
24 #include <linux/types.h>
25 #include <linux/interrupt.h>
26 #include <linux/timer.h>
27 #include <linux/bitops.h>
28 
29 #include <linux/signal.h>
30 #include <linux/string.h>
31 
32 #include <linux/ip.h>
33 #include <linux/if_arp.h>
34 #include <linux/tcp.h>
35 #include <linux/skbuff.h>
36 #include <linux/ctype.h>
37 #include <net/dst.h>
38 
39 #include <linux/io.h>
40 #include <asm/ccwdev.h>
41 #include <asm/ccwgroup.h>
42 #include <linux/uaccess.h>
43 
44 #include <asm/idals.h>
45 
46 #include "fsm.h"
47 #include "cu3088.h"
48 
49 #include "ctcm_dbug.h"
50 #include "ctcm_main.h"
51 #include "ctcm_fsms.h"
52 
53 const char *dev_state_names[] = {
54 	[DEV_STATE_STOPPED]		= "Stopped",
55 	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
56 	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
57 	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
58 	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
59 	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
60 	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
61 	[DEV_STATE_RUNNING]		= "Running",
62 };
63 
64 const char *dev_event_names[] = {
65 	[DEV_EVENT_START]	= "Start",
66 	[DEV_EVENT_STOP]	= "Stop",
67 	[DEV_EVENT_RXUP]	= "RX up",
68 	[DEV_EVENT_TXUP]	= "TX up",
69 	[DEV_EVENT_RXDOWN]	= "RX down",
70 	[DEV_EVENT_TXDOWN]	= "TX down",
71 	[DEV_EVENT_RESTART]	= "Restart",
72 };
73 
74 const char *ctc_ch_event_names[] = {
75 	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
76 	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
77 	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
78 	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
79 	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
80 	[CTC_EVENT_ATTN]	= "Status ATTN",
81 	[CTC_EVENT_BUSY]	= "Status BUSY",
82 	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
83 	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
84 	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
85 	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
86 	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
87 	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
88 	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
89 	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
90 	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
91 	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
92 	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
93 	[CTC_EVENT_IRQ]		= "IRQ normal",
94 	[CTC_EVENT_FINSTAT]	= "IRQ final",
95 	[CTC_EVENT_TIMER]	= "Timer",
96 	[CTC_EVENT_START]	= "Start",
97 	[CTC_EVENT_STOP]	= "Stop",
98 	/*
99 	* additional MPC events
100 	*/
101 	[CTC_EVENT_SEND_XID]	= "XID Exchange",
102 	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
103 };
104 
105 const char *ctc_ch_state_names[] = {
106 	[CTC_STATE_IDLE]	= "Idle",
107 	[CTC_STATE_STOPPED]	= "Stopped",
108 	[CTC_STATE_STARTWAIT]	= "StartWait",
109 	[CTC_STATE_STARTRETRY]	= "StartRetry",
110 	[CTC_STATE_SETUPWAIT]	= "SetupWait",
111 	[CTC_STATE_RXINIT]	= "RX init",
112 	[CTC_STATE_TXINIT]	= "TX init",
113 	[CTC_STATE_RX]		= "RX",
114 	[CTC_STATE_TX]		= "TX",
115 	[CTC_STATE_RXIDLE]	= "RX idle",
116 	[CTC_STATE_TXIDLE]	= "TX idle",
117 	[CTC_STATE_RXERR]	= "RX error",
118 	[CTC_STATE_TXERR]	= "TX error",
119 	[CTC_STATE_TERM]	= "Terminating",
120 	[CTC_STATE_DTERM]	= "Restarting",
121 	[CTC_STATE_NOTOP]	= "Not operational",
122 	/*
123 	* additional MPC states
124 	*/
125 	[CH_XID0_PENDING]	= "Pending XID0 Start",
126 	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
127 	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
128 	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
129 	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
130 	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
131 	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
132 };
133 
134 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
135 
136 /*
137  * ----- static ctcm actions for channel statemachine -----
138  *
139 */
140 static void chx_txdone(fsm_instance *fi, int event, void *arg);
141 static void chx_rx(fsm_instance *fi, int event, void *arg);
142 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
143 static void chx_firstio(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
155 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
156 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
157 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
158 
159 /*
160  * ----- static ctcmpc actions for ctcmpc channel statemachine -----
161  *
162 */
163 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
164 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
165 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
166 /* shared :
167 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
178 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
179 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
180 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
181 */
182 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
183 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
184 static void ctcmpc_chx_resend(fsm_instance *, int, void *);
185 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
186 
187 /**
188  * Check return code of a preceeding ccw_device call, halt_IO etc...
189  *
190  * ch	:	The channel, the error belongs to.
191  * Returns the error code (!= 0) to inspect.
192  */
193 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
194 {
195 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
196 		"%s(%s): %s: %04x\n",
197 		CTCM_FUNTAIL, ch->id, msg, rc);
198 	switch (rc) {
199 	case -EBUSY:
200 		pr_info("%s: The communication peer is busy\n",
201 			ch->id);
202 		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
203 		break;
204 	case -ENODEV:
205 		pr_err("%s: The specified target device is not valid\n",
206 		       ch->id);
207 		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
208 		break;
209 	default:
210 		pr_err("An I/O operation resulted in error %04x\n",
211 		       rc);
212 		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
213 	}
214 }
215 
216 void ctcm_purge_skb_queue(struct sk_buff_head *q)
217 {
218 	struct sk_buff *skb;
219 
220 	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
221 
222 	while ((skb = skb_dequeue(q))) {
223 		atomic_dec(&skb->users);
224 		dev_kfree_skb_any(skb);
225 	}
226 }
227 
228 /**
229  * NOP action for statemachines
230  */
231 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
232 {
233 }
234 
235 /*
236  * Actions for channel - statemachines.
237  */
238 
239 /**
240  * Normal data has been send. Free the corresponding
241  * skb (it's in io_queue), reset dev->tbusy and
242  * revert to idle state.
243  *
244  * fi		An instance of a channel statemachine.
245  * event	The event, just happened.
246  * arg		Generic pointer, casted from channel * upon call.
247  */
248 static void chx_txdone(fsm_instance *fi, int event, void *arg)
249 {
250 	struct channel *ch = arg;
251 	struct net_device *dev = ch->netdev;
252 	struct ctcm_priv *priv = dev->ml_priv;
253 	struct sk_buff *skb;
254 	int first = 1;
255 	int i;
256 	unsigned long duration;
257 	struct timespec done_stamp = current_kernel_time(); /* xtime */
258 
259 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
260 
261 	duration =
262 	    (done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
263 	    (done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
264 	if (duration > ch->prof.tx_time)
265 		ch->prof.tx_time = duration;
266 
267 	if (ch->irb->scsw.cmd.count != 0)
268 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
269 			"%s(%s): TX not complete, remaining %d bytes",
270 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
271 	fsm_deltimer(&ch->timer);
272 	while ((skb = skb_dequeue(&ch->io_queue))) {
273 		priv->stats.tx_packets++;
274 		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
275 		if (first) {
276 			priv->stats.tx_bytes += 2;
277 			first = 0;
278 		}
279 		atomic_dec(&skb->users);
280 		dev_kfree_skb_irq(skb);
281 	}
282 	spin_lock(&ch->collect_lock);
283 	clear_normalized_cda(&ch->ccw[4]);
284 	if (ch->collect_len > 0) {
285 		int rc;
286 
287 		if (ctcm_checkalloc_buffer(ch)) {
288 			spin_unlock(&ch->collect_lock);
289 			return;
290 		}
291 		ch->trans_skb->data = ch->trans_skb_data;
292 		skb_reset_tail_pointer(ch->trans_skb);
293 		ch->trans_skb->len = 0;
294 		if (ch->prof.maxmulti < (ch->collect_len + 2))
295 			ch->prof.maxmulti = ch->collect_len + 2;
296 		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
297 			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
298 		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
299 		i = 0;
300 		while ((skb = skb_dequeue(&ch->collect_queue))) {
301 			skb_copy_from_linear_data(skb,
302 				skb_put(ch->trans_skb, skb->len), skb->len);
303 			priv->stats.tx_packets++;
304 			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
305 			atomic_dec(&skb->users);
306 			dev_kfree_skb_irq(skb);
307 			i++;
308 		}
309 		ch->collect_len = 0;
310 		spin_unlock(&ch->collect_lock);
311 		ch->ccw[1].count = ch->trans_skb->len;
312 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
313 		ch->prof.send_stamp = current_kernel_time(); /* xtime */
314 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
315 						(unsigned long)ch, 0xff, 0);
316 		ch->prof.doios_multi++;
317 		if (rc != 0) {
318 			priv->stats.tx_dropped += i;
319 			priv->stats.tx_errors += i;
320 			fsm_deltimer(&ch->timer);
321 			ctcm_ccw_check_rc(ch, rc, "chained TX");
322 		}
323 	} else {
324 		spin_unlock(&ch->collect_lock);
325 		fsm_newstate(fi, CTC_STATE_TXIDLE);
326 	}
327 	ctcm_clear_busy_do(dev);
328 }
329 
330 /**
331  * Initial data is sent.
332  * Notify device statemachine that we are up and
333  * running.
334  *
335  * fi		An instance of a channel statemachine.
336  * event	The event, just happened.
337  * arg		Generic pointer, casted from channel * upon call.
338  */
339 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
340 {
341 	struct channel *ch = arg;
342 	struct net_device *dev = ch->netdev;
343 	struct ctcm_priv *priv = dev->ml_priv;
344 
345 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
346 
347 	fsm_deltimer(&ch->timer);
348 	fsm_newstate(fi, CTC_STATE_TXIDLE);
349 	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
350 }
351 
352 /**
353  * Got normal data, check for sanity, queue it up, allocate new buffer
354  * trigger bottom half, and initiate next read.
355  *
356  * fi		An instance of a channel statemachine.
357  * event	The event, just happened.
358  * arg		Generic pointer, casted from channel * upon call.
359  */
360 static void chx_rx(fsm_instance *fi, int event, void *arg)
361 {
362 	struct channel *ch = arg;
363 	struct net_device *dev = ch->netdev;
364 	struct ctcm_priv *priv = dev->ml_priv;
365 	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
366 	struct sk_buff *skb = ch->trans_skb;
367 	__u16 block_len = *((__u16 *)skb->data);
368 	int check_len;
369 	int rc;
370 
371 	fsm_deltimer(&ch->timer);
372 	if (len < 8) {
373 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
374 			"%s(%s): got packet with length %d < 8\n",
375 					CTCM_FUNTAIL, dev->name, len);
376 		priv->stats.rx_dropped++;
377 		priv->stats.rx_length_errors++;
378 						goto again;
379 	}
380 	if (len > ch->max_bufsize) {
381 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
382 			"%s(%s): got packet with length %d > %d\n",
383 				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
384 		priv->stats.rx_dropped++;
385 		priv->stats.rx_length_errors++;
386 						goto again;
387 	}
388 
389 	/*
390 	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
391 	 */
392 	switch (ch->protocol) {
393 	case CTCM_PROTO_S390:
394 	case CTCM_PROTO_OS390:
395 		check_len = block_len + 2;
396 		break;
397 	default:
398 		check_len = block_len;
399 		break;
400 	}
401 	if ((len < block_len) || (len > check_len)) {
402 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
403 			"%s(%s): got block length %d != rx length %d\n",
404 				CTCM_FUNTAIL, dev->name, block_len, len);
405 		if (do_debug)
406 			ctcmpc_dump_skb(skb, 0);
407 
408 		*((__u16 *)skb->data) = len;
409 		priv->stats.rx_dropped++;
410 		priv->stats.rx_length_errors++;
411 						goto again;
412 	}
413 	if (block_len > 2) {
414 		*((__u16 *)skb->data) = block_len - 2;
415 		ctcm_unpack_skb(ch, skb);
416 	}
417  again:
418 	skb->data = ch->trans_skb_data;
419 	skb_reset_tail_pointer(skb);
420 	skb->len = 0;
421 	if (ctcm_checkalloc_buffer(ch))
422 		return;
423 	ch->ccw[1].count = ch->max_bufsize;
424 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
425 					(unsigned long)ch, 0xff, 0);
426 	if (rc != 0)
427 		ctcm_ccw_check_rc(ch, rc, "normal RX");
428 }
429 
430 /**
431  * Initialize connection by sending a __u16 of value 0.
432  *
433  * fi		An instance of a channel statemachine.
434  * event	The event, just happened.
435  * arg		Generic pointer, casted from channel * upon call.
436  */
437 static void chx_firstio(fsm_instance *fi, int event, void *arg)
438 {
439 	int rc;
440 	struct channel *ch = arg;
441 	int fsmstate = fsm_getstate(fi);
442 
443 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
444 		"%s(%s) : %02x",
445 		CTCM_FUNTAIL, ch->id, fsmstate);
446 
447 	ch->sense_rc = 0;	/* reset unit check report control */
448 	if (fsmstate == CTC_STATE_TXIDLE)
449 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
450 			"%s(%s): remote side issued READ?, init.\n",
451 				CTCM_FUNTAIL, ch->id);
452 	fsm_deltimer(&ch->timer);
453 	if (ctcm_checkalloc_buffer(ch))
454 		return;
455 	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
456 	    (ch->protocol == CTCM_PROTO_OS390)) {
457 		/* OS/390 resp. z/OS */
458 		if (CHANNEL_DIRECTION(ch->flags) == READ) {
459 			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
460 			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
461 				     CTC_EVENT_TIMER, ch);
462 			chx_rxidle(fi, event, arg);
463 		} else {
464 			struct net_device *dev = ch->netdev;
465 			struct ctcm_priv *priv = dev->ml_priv;
466 			fsm_newstate(fi, CTC_STATE_TXIDLE);
467 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
468 		}
469 		return;
470 	}
471 	/*
472 	 * Don't setup a timer for receiving the initial RX frame
473 	 * if in compatibility mode, since VM TCP delays the initial
474 	 * frame until it has some data to send.
475 	 */
476 	if ((CHANNEL_DIRECTION(ch->flags) == WRITE) ||
477 	    (ch->protocol != CTCM_PROTO_S390))
478 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
479 
480 	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
481 	ch->ccw[1].count = 2;	/* Transfer only length */
482 
483 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
484 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
485 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
486 					(unsigned long)ch, 0xff, 0);
487 	if (rc != 0) {
488 		fsm_deltimer(&ch->timer);
489 		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
490 		ctcm_ccw_check_rc(ch, rc, "init IO");
491 	}
492 	/*
493 	 * If in compatibility mode since we don't setup a timer, we
494 	 * also signal RX channel up immediately. This enables us
495 	 * to send packets early which in turn usually triggers some
496 	 * reply from VM TCP which brings up the RX channel to it's
497 	 * final state.
498 	 */
499 	if ((CHANNEL_DIRECTION(ch->flags) == READ) &&
500 	    (ch->protocol == CTCM_PROTO_S390)) {
501 		struct net_device *dev = ch->netdev;
502 		struct ctcm_priv *priv = dev->ml_priv;
503 		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
504 	}
505 }
506 
507 /**
508  * Got initial data, check it. If OK,
509  * notify device statemachine that we are up and
510  * running.
511  *
512  * fi		An instance of a channel statemachine.
513  * event	The event, just happened.
514  * arg		Generic pointer, casted from channel * upon call.
515  */
516 static void chx_rxidle(fsm_instance *fi, int event, void *arg)
517 {
518 	struct channel *ch = arg;
519 	struct net_device *dev = ch->netdev;
520 	struct ctcm_priv *priv = dev->ml_priv;
521 	__u16 buflen;
522 	int rc;
523 
524 	fsm_deltimer(&ch->timer);
525 	buflen = *((__u16 *)ch->trans_skb->data);
526 	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
527 			__func__, dev->name, buflen);
528 
529 	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
530 		if (ctcm_checkalloc_buffer(ch))
531 			return;
532 		ch->ccw[1].count = ch->max_bufsize;
533 		fsm_newstate(fi, CTC_STATE_RXIDLE);
534 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
535 						(unsigned long)ch, 0xff, 0);
536 		if (rc != 0) {
537 			fsm_newstate(fi, CTC_STATE_RXINIT);
538 			ctcm_ccw_check_rc(ch, rc, "initial RX");
539 		} else
540 			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
541 	} else {
542 		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
543 				__func__, dev->name,
544 					buflen, CTCM_INITIAL_BLOCKLEN);
545 		chx_firstio(fi, event, arg);
546 	}
547 }
548 
549 /**
550  * Set channel into extended mode.
551  *
552  * fi		An instance of a channel statemachine.
553  * event	The event, just happened.
554  * arg		Generic pointer, casted from channel * upon call.
555  */
556 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
557 {
558 	struct channel *ch = arg;
559 	int rc;
560 	unsigned long saveflags = 0;
561 	int timeout = CTCM_TIME_5_SEC;
562 
563 	fsm_deltimer(&ch->timer);
564 	if (IS_MPC(ch)) {
565 		timeout = 1500;
566 		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
567 				__func__, smp_processor_id(), ch, ch->id);
568 	}
569 	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
570 	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
571 	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
572 
573 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
574 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
575 			/* Such conditional locking is undeterministic in
576 			 * static view. => ignore sparse warnings here. */
577 
578 	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
579 					(unsigned long)ch, 0xff, 0);
580 	if (event == CTC_EVENT_TIMER)	/* see above comments */
581 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
582 	if (rc != 0) {
583 		fsm_deltimer(&ch->timer);
584 		fsm_newstate(fi, CTC_STATE_STARTWAIT);
585 		ctcm_ccw_check_rc(ch, rc, "set Mode");
586 	} else
587 		ch->retry = 0;
588 }
589 
590 /**
591  * Setup channel.
592  *
593  * fi		An instance of a channel statemachine.
594  * event	The event, just happened.
595  * arg		Generic pointer, casted from channel * upon call.
596  */
597 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
598 {
599 	struct channel *ch	= arg;
600 	unsigned long saveflags;
601 	int rc;
602 
603 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
604 			CTCM_FUNTAIL, ch->id,
605 			(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
606 
607 	if (ch->trans_skb != NULL) {
608 		clear_normalized_cda(&ch->ccw[1]);
609 		dev_kfree_skb(ch->trans_skb);
610 		ch->trans_skb = NULL;
611 	}
612 	if (CHANNEL_DIRECTION(ch->flags) == READ) {
613 		ch->ccw[1].cmd_code = CCW_CMD_READ;
614 		ch->ccw[1].flags = CCW_FLAG_SLI;
615 		ch->ccw[1].count = 0;
616 	} else {
617 		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
618 		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
619 		ch->ccw[1].count = 0;
620 	}
621 	if (ctcm_checkalloc_buffer(ch)) {
622 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
623 			"%s(%s): %s trans_skb alloc delayed "
624 			"until first transfer",
625 			CTCM_FUNTAIL, ch->id,
626 			(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
627 	}
628 	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
629 	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
630 	ch->ccw[0].count = 0;
631 	ch->ccw[0].cda = 0;
632 	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
633 	ch->ccw[2].flags = CCW_FLAG_SLI;
634 	ch->ccw[2].count = 0;
635 	ch->ccw[2].cda = 0;
636 	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
637 	ch->ccw[4].cda = 0;
638 	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
639 
640 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
641 	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
642 	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
643 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
644 	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
645 	if (rc != 0) {
646 		if (rc != -EBUSY)
647 			fsm_deltimer(&ch->timer);
648 		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
649 	}
650 }
651 
652 /**
653  * Shutdown a channel.
654  *
655  * fi		An instance of a channel statemachine.
656  * event	The event, just happened.
657  * arg		Generic pointer, casted from channel * upon call.
658  */
659 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
660 {
661 	struct channel *ch = arg;
662 	unsigned long saveflags = 0;
663 	int rc;
664 	int oldstate;
665 
666 	fsm_deltimer(&ch->timer);
667 	if (IS_MPC(ch))
668 		fsm_deltimer(&ch->sweep_timer);
669 
670 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
671 
672 	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
673 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
674 			/* Such conditional locking is undeterministic in
675 			 * static view. => ignore sparse warnings here. */
676 	oldstate = fsm_getstate(fi);
677 	fsm_newstate(fi, CTC_STATE_TERM);
678 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
679 
680 	if (event == CTC_EVENT_STOP)
681 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
682 			/* see remark above about conditional locking */
683 
684 	if (rc != 0 && rc != -EBUSY) {
685 		fsm_deltimer(&ch->timer);
686 		if (event != CTC_EVENT_STOP) {
687 			fsm_newstate(fi, oldstate);
688 			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
689 		}
690 	}
691 }
692 
693 /**
694  * Cleanup helper for chx_fail and chx_stopped
695  * cleanup channels queue and notify interface statemachine.
696  *
697  * fi		An instance of a channel statemachine.
698  * state	The next state (depending on caller).
699  * ch		The channel to operate on.
700  */
701 static void ctcm_chx_cleanup(fsm_instance *fi, int state,
702 		struct channel *ch)
703 {
704 	struct net_device *dev = ch->netdev;
705 	struct ctcm_priv *priv = dev->ml_priv;
706 
707 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
708 			"%s(%s): %s[%d]\n",
709 			CTCM_FUNTAIL, dev->name, ch->id, state);
710 
711 	fsm_deltimer(&ch->timer);
712 	if (IS_MPC(ch))
713 		fsm_deltimer(&ch->sweep_timer);
714 
715 	fsm_newstate(fi, state);
716 	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
717 		clear_normalized_cda(&ch->ccw[1]);
718 		dev_kfree_skb_any(ch->trans_skb);
719 		ch->trans_skb = NULL;
720 	}
721 
722 	ch->th_seg = 0x00;
723 	ch->th_seq_num = 0x00;
724 	if (CHANNEL_DIRECTION(ch->flags) == READ) {
725 		skb_queue_purge(&ch->io_queue);
726 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
727 	} else {
728 		ctcm_purge_skb_queue(&ch->io_queue);
729 		if (IS_MPC(ch))
730 			ctcm_purge_skb_queue(&ch->sweep_queue);
731 		spin_lock(&ch->collect_lock);
732 		ctcm_purge_skb_queue(&ch->collect_queue);
733 		ch->collect_len = 0;
734 		spin_unlock(&ch->collect_lock);
735 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
736 	}
737 }
738 
739 /**
740  * A channel has successfully been halted.
741  * Cleanup it's queue and notify interface statemachine.
742  *
743  * fi		An instance of a channel statemachine.
744  * event	The event, just happened.
745  * arg		Generic pointer, casted from channel * upon call.
746  */
747 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
748 {
749 	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
750 }
751 
752 /**
753  * A stop command from device statemachine arrived and we are in
754  * not operational mode. Set state to stopped.
755  *
756  * fi		An instance of a channel statemachine.
757  * event	The event, just happened.
758  * arg		Generic pointer, casted from channel * upon call.
759  */
760 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
761 {
762 	fsm_newstate(fi, CTC_STATE_STOPPED);
763 }
764 
765 /**
766  * A machine check for no path, not operational status or gone device has
767  * happened.
768  * Cleanup queue and notify interface statemachine.
769  *
770  * fi		An instance of a channel statemachine.
771  * event	The event, just happened.
772  * arg		Generic pointer, casted from channel * upon call.
773  */
774 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
775 {
776 	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
777 }
778 
779 /**
780  * Handle error during setup of channel.
781  *
782  * fi		An instance of a channel statemachine.
783  * event	The event, just happened.
784  * arg		Generic pointer, casted from channel * upon call.
785  */
786 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
787 {
788 	struct channel *ch = arg;
789 	struct net_device *dev = ch->netdev;
790 	struct ctcm_priv *priv = dev->ml_priv;
791 
792 	/*
793 	 * Special case: Got UC_RCRESET on setmode.
794 	 * This means that remote side isn't setup. In this case
795 	 * simply retry after some 10 secs...
796 	 */
797 	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
798 	    ((event == CTC_EVENT_UC_RCRESET) ||
799 	     (event == CTC_EVENT_UC_RSRESET))) {
800 		fsm_newstate(fi, CTC_STATE_STARTRETRY);
801 		fsm_deltimer(&ch->timer);
802 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
803 		if (!IS_MPC(ch) && (CHANNEL_DIRECTION(ch->flags) == READ)) {
804 			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
805 			if (rc != 0)
806 				ctcm_ccw_check_rc(ch, rc,
807 					"HaltIO in chx_setuperr");
808 		}
809 		return;
810 	}
811 
812 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
813 		"%s(%s) : %s error during %s channel setup state=%s\n",
814 		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
815 		(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX",
816 		fsm_getstate_str(fi));
817 
818 	if (CHANNEL_DIRECTION(ch->flags) == READ) {
819 		fsm_newstate(fi, CTC_STATE_RXERR);
820 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
821 	} else {
822 		fsm_newstate(fi, CTC_STATE_TXERR);
823 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
824 	}
825 }
826 
827 /**
828  * Restart a channel after an error.
829  *
830  * fi		An instance of a channel statemachine.
831  * event	The event, just happened.
832  * arg		Generic pointer, casted from channel * upon call.
833  */
834 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
835 {
836 	struct channel *ch = arg;
837 	struct net_device *dev = ch->netdev;
838 	unsigned long saveflags = 0;
839 	int oldstate;
840 	int rc;
841 
842 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
843 		"%s: %s[%d] of %s\n",
844 			CTCM_FUNTAIL, ch->id, event, dev->name);
845 
846 	fsm_deltimer(&ch->timer);
847 
848 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
849 	oldstate = fsm_getstate(fi);
850 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
851 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
852 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
853 			/* Such conditional locking is a known problem for
854 			 * sparse because its undeterministic in static view.
855 			 * Warnings should be ignored here. */
856 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
857 	if (event == CTC_EVENT_TIMER)
858 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
859 	if (rc != 0) {
860 		if (rc != -EBUSY) {
861 		    fsm_deltimer(&ch->timer);
862 		    fsm_newstate(fi, oldstate);
863 		}
864 		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
865 	}
866 }
867 
868 /**
869  * Handle error during RX initial handshake (exchange of
870  * 0-length block header)
871  *
872  * fi		An instance of a channel statemachine.
873  * event	The event, just happened.
874  * arg		Generic pointer, casted from channel * upon call.
875  */
876 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
877 {
878 	struct channel *ch = arg;
879 	struct net_device *dev = ch->netdev;
880 	struct ctcm_priv *priv = dev->ml_priv;
881 
882 	if (event == CTC_EVENT_TIMER) {
883 		if (!IS_MPCDEV(dev))
884 			/* TODO : check if MPC deletes timer somewhere */
885 			fsm_deltimer(&ch->timer);
886 		if (ch->retry++ < 3)
887 			ctcm_chx_restart(fi, event, arg);
888 		else {
889 			fsm_newstate(fi, CTC_STATE_RXERR);
890 			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
891 		}
892 	} else {
893 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
894 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
895 			ctc_ch_event_names[event], fsm_getstate_str(fi));
896 
897 		dev_warn(&dev->dev,
898 			"Initialization failed with RX/TX init handshake "
899 			"error %s\n", ctc_ch_event_names[event]);
900 	}
901 }
902 
903 /**
904  * Notify device statemachine if we gave up initialization
905  * of RX channel.
906  *
907  * fi		An instance of a channel statemachine.
908  * event	The event, just happened.
909  * arg		Generic pointer, casted from channel * upon call.
910  */
911 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
912 {
913 	struct channel *ch = arg;
914 	struct net_device *dev = ch->netdev;
915 	struct ctcm_priv *priv = dev->ml_priv;
916 
917 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
918 			"%s(%s): RX %s busy, init. fail",
919 				CTCM_FUNTAIL, dev->name, ch->id);
920 	fsm_newstate(fi, CTC_STATE_RXERR);
921 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
922 }
923 
924 /**
925  * Handle RX Unit check remote reset (remote disconnected)
926  *
927  * fi		An instance of a channel statemachine.
928  * event	The event, just happened.
929  * arg		Generic pointer, casted from channel * upon call.
930  */
931 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
932 {
933 	struct channel *ch = arg;
934 	struct channel *ch2;
935 	struct net_device *dev = ch->netdev;
936 	struct ctcm_priv *priv = dev->ml_priv;
937 
938 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
939 			"%s: %s: remote disconnect - re-init ...",
940 				CTCM_FUNTAIL, dev->name);
941 	fsm_deltimer(&ch->timer);
942 	/*
943 	 * Notify device statemachine
944 	 */
945 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
946 	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
947 
948 	fsm_newstate(fi, CTC_STATE_DTERM);
949 	ch2 = priv->channel[WRITE];
950 	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
951 
952 	ccw_device_halt(ch->cdev, (unsigned long)ch);
953 	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
954 }
955 
956 /**
957  * Handle error during TX channel initialization.
958  *
959  * fi		An instance of a channel statemachine.
960  * event	The event, just happened.
961  * arg		Generic pointer, casted from channel * upon call.
962  */
963 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
964 {
965 	struct channel *ch = arg;
966 	struct net_device *dev = ch->netdev;
967 	struct ctcm_priv *priv = dev->ml_priv;
968 
969 	if (event == CTC_EVENT_TIMER) {
970 		fsm_deltimer(&ch->timer);
971 		if (ch->retry++ < 3)
972 			ctcm_chx_restart(fi, event, arg);
973 		else {
974 			fsm_newstate(fi, CTC_STATE_TXERR);
975 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
976 		}
977 	} else {
978 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
979 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
980 			ctc_ch_event_names[event], fsm_getstate_str(fi));
981 
982 		dev_warn(&dev->dev,
983 			"Initialization failed with RX/TX init handshake "
984 			"error %s\n", ctc_ch_event_names[event]);
985 	}
986 }
987 
988 /**
989  * Handle TX timeout by retrying operation.
990  *
991  * fi		An instance of a channel statemachine.
992  * event	The event, just happened.
993  * arg		Generic pointer, casted from channel * upon call.
994  */
995 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
996 {
997 	struct channel *ch = arg;
998 	struct net_device *dev = ch->netdev;
999 	struct ctcm_priv *priv = dev->ml_priv;
1000 	struct sk_buff *skb;
1001 
1002 	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1003 			__func__, smp_processor_id(), ch, ch->id);
1004 
1005 	fsm_deltimer(&ch->timer);
1006 	if (ch->retry++ > 3) {
1007 		struct mpc_group *gptr = priv->mpcg;
1008 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1009 				"%s: %s: retries exceeded",
1010 					CTCM_FUNTAIL, ch->id);
1011 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1012 		/* call restart if not MPC or if MPC and mpcg fsm is ready.
1013 			use gptr as mpc indicator */
1014 		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1015 			ctcm_chx_restart(fi, event, arg);
1016 				goto done;
1017 	}
1018 
1019 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1020 			"%s : %s: retry %d",
1021 				CTCM_FUNTAIL, ch->id, ch->retry);
1022 	skb = skb_peek(&ch->io_queue);
1023 	if (skb) {
1024 		int rc = 0;
1025 		unsigned long saveflags = 0;
1026 		clear_normalized_cda(&ch->ccw[4]);
1027 		ch->ccw[4].count = skb->len;
1028 		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1029 			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1030 				"%s: %s: IDAL alloc failed",
1031 						CTCM_FUNTAIL, ch->id);
1032 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1033 			ctcm_chx_restart(fi, event, arg);
1034 				goto done;
1035 		}
1036 		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1037 		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1038 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1039 			/* Such conditional locking is a known problem for
1040 			 * sparse because its undeterministic in static view.
1041 			 * Warnings should be ignored here. */
1042 		if (do_debug_ccw)
1043 			ctcmpc_dumpit((char *)&ch->ccw[3],
1044 					sizeof(struct ccw1) * 3);
1045 
1046 		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1047 						(unsigned long)ch, 0xff, 0);
1048 		if (event == CTC_EVENT_TIMER)
1049 			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1050 					saveflags);
1051 		if (rc != 0) {
1052 			fsm_deltimer(&ch->timer);
1053 			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1054 			ctcm_purge_skb_queue(&ch->io_queue);
1055 		}
1056 	}
1057 done:
1058 	return;
1059 }
1060 
1061 /**
1062  * Handle fatal errors during an I/O command.
1063  *
1064  * fi		An instance of a channel statemachine.
1065  * event	The event, just happened.
1066  * arg		Generic pointer, casted from channel * upon call.
1067  */
1068 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1069 {
1070 	struct channel *ch = arg;
1071 	struct net_device *dev = ch->netdev;
1072 	struct ctcm_priv *priv = dev->ml_priv;
1073 	int rd = CHANNEL_DIRECTION(ch->flags);
1074 
1075 	fsm_deltimer(&ch->timer);
1076 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1077 		"%s: %s: %s unrecoverable channel error",
1078 			CTCM_FUNTAIL, ch->id, rd == READ ? "RX" : "TX");
1079 
1080 	if (IS_MPC(ch)) {
1081 		priv->stats.tx_dropped++;
1082 		priv->stats.tx_errors++;
1083 	}
1084 	if (rd == READ) {
1085 		fsm_newstate(fi, CTC_STATE_RXERR);
1086 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1087 	} else {
1088 		fsm_newstate(fi, CTC_STATE_TXERR);
1089 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1090 	}
1091 }
1092 
1093 /*
1094  * The ctcm statemachine for a channel.
1095  */
1096 const fsm_node ch_fsm[] = {
1097 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1098 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1099 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1100 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1101 
1102 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1103 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1104 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1105 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1106 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1107 
1108 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1109 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1110 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1111 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1112 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1113 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1114 
1115 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1116 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1117 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1118 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1119 
1120 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1121 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1122 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
1123 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1124 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1125 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1126 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1127 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1128 
1129 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1130 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1131 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
1132 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1133 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1134 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1135 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1136 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1137 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
1138 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1139 
1140 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1141 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1142 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
1143 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1144 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1145 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1146 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
1147 
1148 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1149 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1150 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1151 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
1152 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
1153 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
1154 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1155 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1156 
1157 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1158 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1159 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
1160 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1161 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1162 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1163 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1164 
1165 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
1166 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1167 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
1168 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1169 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1170 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1171 
1172 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1173 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1174 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1175 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1176 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1177 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1178 
1179 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
1180 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
1181 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
1182 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
1183 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
1184 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
1185 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1186 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1187 
1188 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1189 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1190 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1191 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1192 };
1193 
1194 int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1195 
1196 /*
1197  * MPC actions for mpc channel statemachine
1198  * handling of MPC protocol requires extra
1199  * statemachine and actions which are prefixed ctcmpc_ .
1200  * The ctc_ch_states and ctc_ch_state_names,
1201  * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1202  * which are expanded by some elements.
1203  */
1204 
1205 /*
1206  * Actions for mpc channel statemachine.
1207  */
1208 
1209 /**
1210  * Normal data has been send. Free the corresponding
1211  * skb (it's in io_queue), reset dev->tbusy and
1212  * revert to idle state.
1213  *
1214  * fi		An instance of a channel statemachine.
1215  * event	The event, just happened.
1216  * arg		Generic pointer, casted from channel * upon call.
1217  */
1218 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1219 {
1220 	struct channel		*ch = arg;
1221 	struct net_device	*dev = ch->netdev;
1222 	struct ctcm_priv	*priv = dev->ml_priv;
1223 	struct mpc_group	*grp = priv->mpcg;
1224 	struct sk_buff		*skb;
1225 	int		first = 1;
1226 	int		i;
1227 	__u32		data_space;
1228 	unsigned long	duration;
1229 	struct sk_buff	*peekskb;
1230 	int		rc;
1231 	struct th_header *header;
1232 	struct pdu	*p_header;
1233 	struct timespec done_stamp = current_kernel_time(); /* xtime */
1234 
1235 	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1236 			__func__, dev->name, smp_processor_id());
1237 
1238 	duration =
1239 		(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
1240 		(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
1241 	if (duration > ch->prof.tx_time)
1242 		ch->prof.tx_time = duration;
1243 
1244 	if (ch->irb->scsw.cmd.count != 0)
1245 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1246 			"%s(%s): TX not complete, remaining %d bytes",
1247 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1248 	fsm_deltimer(&ch->timer);
1249 	while ((skb = skb_dequeue(&ch->io_queue))) {
1250 		priv->stats.tx_packets++;
1251 		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1252 		if (first) {
1253 			priv->stats.tx_bytes += 2;
1254 			first = 0;
1255 		}
1256 		atomic_dec(&skb->users);
1257 		dev_kfree_skb_irq(skb);
1258 	}
1259 	spin_lock(&ch->collect_lock);
1260 	clear_normalized_cda(&ch->ccw[4]);
1261 	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1262 		spin_unlock(&ch->collect_lock);
1263 		fsm_newstate(fi, CTC_STATE_TXIDLE);
1264 				goto done;
1265 	}
1266 
1267 	if (ctcm_checkalloc_buffer(ch)) {
1268 		spin_unlock(&ch->collect_lock);
1269 				goto done;
1270 	}
1271 	ch->trans_skb->data = ch->trans_skb_data;
1272 	skb_reset_tail_pointer(ch->trans_skb);
1273 	ch->trans_skb->len = 0;
1274 	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1275 		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1276 	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1277 		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1278 	i = 0;
1279 	p_header = NULL;
1280 	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1281 
1282 	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1283 		       " data_space:%04x\n",
1284 		       __func__, data_space);
1285 
1286 	while ((skb = skb_dequeue(&ch->collect_queue))) {
1287 		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1288 		p_header = (struct pdu *)
1289 			(skb_tail_pointer(ch->trans_skb) - skb->len);
1290 		p_header->pdu_flag = 0x00;
1291 		if (skb->protocol == ntohs(ETH_P_SNAP))
1292 			p_header->pdu_flag |= 0x60;
1293 		else
1294 			p_header->pdu_flag |= 0x20;
1295 
1296 		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1297 				__func__, ch->trans_skb->len);
1298 		CTCM_PR_DBGDATA("%s: pdu header and data for up"
1299 				" to 32 bytes sent to vtam\n", __func__);
1300 		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1301 
1302 		ch->collect_len -= skb->len;
1303 		data_space -= skb->len;
1304 		priv->stats.tx_packets++;
1305 		priv->stats.tx_bytes += skb->len;
1306 		atomic_dec(&skb->users);
1307 		dev_kfree_skb_any(skb);
1308 		peekskb = skb_peek(&ch->collect_queue);
1309 		if (peekskb->len > data_space)
1310 			break;
1311 		i++;
1312 	}
1313 	/* p_header points to the last one we handled */
1314 	if (p_header)
1315 		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
1316 	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1317 	if (!header) {
1318 		spin_unlock(&ch->collect_lock);
1319 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1320 				goto done;
1321 	}
1322 	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
1323 	ch->th_seq_num++;
1324 	header->th_seq_num = ch->th_seq_num;
1325 
1326 	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1327 					__func__, ch->th_seq_num);
1328 
1329 	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1330 		TH_HEADER_LENGTH);	/* put the TH on the packet */
1331 
1332 	kfree(header);
1333 
1334 	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1335 		       __func__, ch->trans_skb->len);
1336 	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1337 			"data to vtam from collect_q\n", __func__);
1338 	CTCM_D3_DUMP((char *)ch->trans_skb->data,
1339 				min_t(int, ch->trans_skb->len, 50));
1340 
1341 	spin_unlock(&ch->collect_lock);
1342 	clear_normalized_cda(&ch->ccw[1]);
1343 	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1344 		dev_kfree_skb_any(ch->trans_skb);
1345 		ch->trans_skb = NULL;
1346 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1347 			"%s: %s: IDAL alloc failed",
1348 				CTCM_FUNTAIL, ch->id);
1349 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1350 		return;
1351 	}
1352 	ch->ccw[1].count = ch->trans_skb->len;
1353 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1354 	ch->prof.send_stamp = current_kernel_time(); /* xtime */
1355 	if (do_debug_ccw)
1356 		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1357 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1358 					(unsigned long)ch, 0xff, 0);
1359 	ch->prof.doios_multi++;
1360 	if (rc != 0) {
1361 		priv->stats.tx_dropped += i;
1362 		priv->stats.tx_errors += i;
1363 		fsm_deltimer(&ch->timer);
1364 		ctcm_ccw_check_rc(ch, rc, "chained TX");
1365 	}
1366 done:
1367 	ctcm_clear_busy(dev);
1368 	return;
1369 }
1370 
1371 /**
1372  * Got normal data, check for sanity, queue it up, allocate new buffer
1373  * trigger bottom half, and initiate next read.
1374  *
1375  * fi		An instance of a channel statemachine.
1376  * event	The event, just happened.
1377  * arg		Generic pointer, casted from channel * upon call.
1378  */
1379 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1380 {
1381 	struct channel		*ch = arg;
1382 	struct net_device	*dev = ch->netdev;
1383 	struct ctcm_priv	*priv = dev->ml_priv;
1384 	struct mpc_group	*grp = priv->mpcg;
1385 	struct sk_buff		*skb = ch->trans_skb;
1386 	struct sk_buff		*new_skb;
1387 	unsigned long		saveflags = 0;	/* avoids compiler warning */
1388 	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
1389 
1390 	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1391 			CTCM_FUNTAIL, dev->name, smp_processor_id(),
1392 				ch->id, ch->max_bufsize, len);
1393 	fsm_deltimer(&ch->timer);
1394 
1395 	if (skb == NULL) {
1396 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1397 			"%s(%s): TRANS_SKB = NULL",
1398 				CTCM_FUNTAIL, dev->name);
1399 			goto again;
1400 	}
1401 
1402 	if (len < TH_HEADER_LENGTH) {
1403 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1404 				"%s(%s): packet length %d to short",
1405 					CTCM_FUNTAIL, dev->name, len);
1406 		priv->stats.rx_dropped++;
1407 		priv->stats.rx_length_errors++;
1408 	} else {
1409 		/* must have valid th header or game over */
1410 		__u32	block_len = len;
1411 		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1412 		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1413 
1414 		if (new_skb == NULL) {
1415 			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1416 				"%s(%d): skb allocation failed",
1417 						CTCM_FUNTAIL, dev->name);
1418 			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1419 					goto again;
1420 		}
1421 		switch (fsm_getstate(grp->fsm)) {
1422 		case MPCG_STATE_RESET:
1423 		case MPCG_STATE_INOP:
1424 			dev_kfree_skb_any(new_skb);
1425 			break;
1426 		case MPCG_STATE_FLOWC:
1427 		case MPCG_STATE_READY:
1428 			memcpy(skb_put(new_skb, block_len),
1429 					       skb->data, block_len);
1430 			skb_queue_tail(&ch->io_queue, new_skb);
1431 			tasklet_schedule(&ch->ch_tasklet);
1432 			break;
1433 		default:
1434 			memcpy(skb_put(new_skb, len), skb->data, len);
1435 			skb_queue_tail(&ch->io_queue, new_skb);
1436 			tasklet_hi_schedule(&ch->ch_tasklet);
1437 			break;
1438 		}
1439 	}
1440 
1441 again:
1442 	switch (fsm_getstate(grp->fsm)) {
1443 	int rc, dolock;
1444 	case MPCG_STATE_FLOWC:
1445 	case MPCG_STATE_READY:
1446 		if (ctcm_checkalloc_buffer(ch))
1447 			break;
1448 		ch->trans_skb->data = ch->trans_skb_data;
1449 		skb_reset_tail_pointer(ch->trans_skb);
1450 		ch->trans_skb->len = 0;
1451 		ch->ccw[1].count = ch->max_bufsize;
1452 			if (do_debug_ccw)
1453 			ctcmpc_dumpit((char *)&ch->ccw[0],
1454 					sizeof(struct ccw1) * 3);
1455 		dolock = !in_irq();
1456 		if (dolock)
1457 			spin_lock_irqsave(
1458 				get_ccwdev_lock(ch->cdev), saveflags);
1459 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1460 						(unsigned long)ch, 0xff, 0);
1461 		if (dolock) /* see remark about conditional locking */
1462 			spin_unlock_irqrestore(
1463 				get_ccwdev_lock(ch->cdev), saveflags);
1464 		if (rc != 0)
1465 			ctcm_ccw_check_rc(ch, rc, "normal RX");
1466 	default:
1467 		break;
1468 	}
1469 
1470 	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1471 			__func__, dev->name, ch, ch->id);
1472 
1473 }
1474 
1475 /**
1476  * Initialize connection by sending a __u16 of value 0.
1477  *
1478  * fi		An instance of a channel statemachine.
1479  * event	The event, just happened.
1480  * arg		Generic pointer, casted from channel * upon call.
1481  */
1482 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1483 {
1484 	struct channel		*ch = arg;
1485 	struct net_device	*dev = ch->netdev;
1486 	struct ctcm_priv	*priv = dev->ml_priv;
1487 	struct mpc_group	*gptr = priv->mpcg;
1488 
1489 	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1490 				__func__, ch->id, ch);
1491 
1492 	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1493 			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1494 			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1495 			fsm_getstate(gptr->fsm), ch->protocol);
1496 
1497 	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1498 		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1499 
1500 	fsm_deltimer(&ch->timer);
1501 	if (ctcm_checkalloc_buffer(ch))
1502 				goto done;
1503 
1504 	switch (fsm_getstate(fi)) {
1505 	case CTC_STATE_STARTRETRY:
1506 	case CTC_STATE_SETUPWAIT:
1507 		if (CHANNEL_DIRECTION(ch->flags) == READ) {
1508 			ctcmpc_chx_rxidle(fi, event, arg);
1509 		} else {
1510 			fsm_newstate(fi, CTC_STATE_TXIDLE);
1511 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1512 		}
1513 				goto done;
1514 	default:
1515 		break;
1516 	};
1517 
1518 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == READ)
1519 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1520 
1521 done:
1522 	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1523 				__func__, ch->id, ch);
1524 	return;
1525 }
1526 
1527 /**
1528  * Got initial data, check it. If OK,
1529  * notify device statemachine that we are up and
1530  * running.
1531  *
1532  * fi		An instance of a channel statemachine.
1533  * event	The event, just happened.
1534  * arg		Generic pointer, casted from channel * upon call.
1535  */
1536 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1537 {
1538 	struct channel *ch = arg;
1539 	struct net_device *dev = ch->netdev;
1540 	struct ctcm_priv  *priv = dev->ml_priv;
1541 	struct mpc_group  *grp = priv->mpcg;
1542 	int rc;
1543 	unsigned long saveflags = 0;	/* avoids compiler warning */
1544 
1545 	fsm_deltimer(&ch->timer);
1546 	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1547 			__func__, ch->id, dev->name, smp_processor_id(),
1548 				fsm_getstate(fi), fsm_getstate(grp->fsm));
1549 
1550 	fsm_newstate(fi, CTC_STATE_RXIDLE);
1551 	/* XID processing complete */
1552 
1553 	switch (fsm_getstate(grp->fsm)) {
1554 	case MPCG_STATE_FLOWC:
1555 	case MPCG_STATE_READY:
1556 		if (ctcm_checkalloc_buffer(ch))
1557 				goto done;
1558 		ch->trans_skb->data = ch->trans_skb_data;
1559 		skb_reset_tail_pointer(ch->trans_skb);
1560 		ch->trans_skb->len = 0;
1561 		ch->ccw[1].count = ch->max_bufsize;
1562 		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1563 		if (event == CTC_EVENT_START)
1564 			/* see remark about conditional locking */
1565 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1566 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1567 						(unsigned long)ch, 0xff, 0);
1568 		if (event == CTC_EVENT_START)
1569 			spin_unlock_irqrestore(
1570 					get_ccwdev_lock(ch->cdev), saveflags);
1571 		if (rc != 0) {
1572 			fsm_newstate(fi, CTC_STATE_RXINIT);
1573 			ctcm_ccw_check_rc(ch, rc, "initial RX");
1574 				goto done;
1575 		}
1576 		break;
1577 	default:
1578 		break;
1579 	}
1580 
1581 	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1582 done:
1583 	return;
1584 }
1585 
1586 /*
1587  * ctcmpc channel FSM action
1588  * called from several points in ctcmpc_ch_fsm
1589  * ctcmpc only
1590  */
1591 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1592 {
1593 	struct channel	  *ch     = arg;
1594 	struct net_device *dev    = ch->netdev;
1595 	struct ctcm_priv  *priv   = dev->ml_priv;
1596 	struct mpc_group  *grp = priv->mpcg;
1597 
1598 	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1599 		__func__, dev->name, ch->id, ch, smp_processor_id(),
1600 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1601 
1602 	switch (fsm_getstate(grp->fsm)) {
1603 	case MPCG_STATE_XID2INITW:
1604 		/* ok..start yside xid exchanges */
1605 		if (!ch->in_mpcgroup)
1606 			break;
1607 		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
1608 			fsm_deltimer(&grp->timer);
1609 			fsm_addtimer(&grp->timer,
1610 				MPC_XID_TIMEOUT_VALUE,
1611 				MPCG_EVENT_TIMER, dev);
1612 			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1613 
1614 		} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1615 			/* attn rcvd before xid0 processed via bh */
1616 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1617 		break;
1618 	case MPCG_STATE_XID2INITX:
1619 	case MPCG_STATE_XID0IOWAIT:
1620 	case MPCG_STATE_XID0IOWAIX:
1621 		/* attn rcvd before xid0 processed on ch
1622 		but mid-xid0 processing for group    */
1623 		if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1624 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1625 		break;
1626 	case MPCG_STATE_XID7INITW:
1627 	case MPCG_STATE_XID7INITX:
1628 	case MPCG_STATE_XID7INITI:
1629 	case MPCG_STATE_XID7INITZ:
1630 		switch (fsm_getstate(ch->fsm)) {
1631 		case CH_XID7_PENDING:
1632 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1633 			break;
1634 		case CH_XID7_PENDING2:
1635 			fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1636 			break;
1637 		}
1638 		fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1639 		break;
1640 	}
1641 
1642 	return;
1643 }
1644 
1645 /*
1646  * ctcmpc channel FSM action
1647  * called from one point in ctcmpc_ch_fsm
1648  * ctcmpc only
1649  */
1650 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1651 {
1652 	struct channel	  *ch     = arg;
1653 	struct net_device *dev    = ch->netdev;
1654 	struct ctcm_priv  *priv   = dev->ml_priv;
1655 	struct mpc_group  *grp    = priv->mpcg;
1656 
1657 	CTCM_PR_DEBUG("%s(%s): %s\n  ChState:%s GrpState:%s\n",
1658 			__func__, dev->name, ch->id,
1659 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1660 
1661 	fsm_deltimer(&ch->timer);
1662 
1663 	switch (fsm_getstate(grp->fsm)) {
1664 	case MPCG_STATE_XID0IOWAIT:
1665 		/* vtam wants to be primary.start yside xid exchanges*/
1666 		/* only receive one attn-busy at a time so must not  */
1667 		/* change state each time			     */
1668 		grp->changed_side = 1;
1669 		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1670 		break;
1671 	case MPCG_STATE_XID2INITW:
1672 		if (grp->changed_side == 1) {
1673 			grp->changed_side = 2;
1674 			break;
1675 		}
1676 		/* process began via call to establish_conn	 */
1677 		/* so must report failure instead of reverting	 */
1678 		/* back to ready-for-xid passive state		 */
1679 		if (grp->estconnfunc)
1680 				goto done;
1681 		/* this attnbusy is NOT the result of xside xid  */
1682 		/* collisions so yside must have been triggered  */
1683 		/* by an ATTN that was not intended to start XID */
1684 		/* processing. Revert back to ready-for-xid and  */
1685 		/* wait for ATTN interrupt to signal xid start	 */
1686 		if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1687 			fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1688 			fsm_deltimer(&grp->timer);
1689 				goto done;
1690 		}
1691 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1692 				goto done;
1693 	case MPCG_STATE_XID2INITX:
1694 		/* XID2 was received before ATTN Busy for second
1695 		   channel.Send yside xid for second channel.
1696 		*/
1697 		if (grp->changed_side == 1) {
1698 			grp->changed_side = 2;
1699 			break;
1700 		}
1701 	case MPCG_STATE_XID0IOWAIX:
1702 	case MPCG_STATE_XID7INITW:
1703 	case MPCG_STATE_XID7INITX:
1704 	case MPCG_STATE_XID7INITI:
1705 	case MPCG_STATE_XID7INITZ:
1706 	default:
1707 		/* multiple attn-busy indicates too out-of-sync      */
1708 		/* and they are certainly not being received as part */
1709 		/* of valid mpc group negotiations..		     */
1710 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1711 				goto done;
1712 	}
1713 
1714 	if (grp->changed_side == 1) {
1715 		fsm_deltimer(&grp->timer);
1716 		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1717 			     MPCG_EVENT_TIMER, dev);
1718 	}
1719 	if (ch->in_mpcgroup)
1720 		fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1721 	else
1722 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1723 			"%s(%s): channel %s not added to group",
1724 				CTCM_FUNTAIL, dev->name, ch->id);
1725 
1726 done:
1727 	return;
1728 }
1729 
1730 /*
1731  * ctcmpc channel FSM action
1732  * called from several points in ctcmpc_ch_fsm
1733  * ctcmpc only
1734  */
1735 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1736 {
1737 	struct channel	   *ch	   = arg;
1738 	struct net_device  *dev    = ch->netdev;
1739 	struct ctcm_priv   *priv   = dev->ml_priv;
1740 	struct mpc_group   *grp    = priv->mpcg;
1741 
1742 	fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1743 	return;
1744 }
1745 
1746 /*
1747  * ctcmpc channel FSM action
1748  * called from several points in ctcmpc_ch_fsm
1749  * ctcmpc only
1750  */
1751 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1752 {
1753 	struct channel *ach = arg;
1754 	struct net_device *dev = ach->netdev;
1755 	struct ctcm_priv *priv = dev->ml_priv;
1756 	struct mpc_group *grp = priv->mpcg;
1757 	struct channel *wch = priv->channel[WRITE];
1758 	struct channel *rch = priv->channel[READ];
1759 	struct sk_buff *skb;
1760 	struct th_sweep *header;
1761 	int rc = 0;
1762 	unsigned long saveflags = 0;
1763 
1764 	CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1765 			__func__, smp_processor_id(), ach, ach->id);
1766 
1767 	if (grp->in_sweep == 0)
1768 				goto done;
1769 
1770 	CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1771 				__func__, wch->th_seq_num);
1772 	CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1773 				__func__, rch->th_seq_num);
1774 
1775 	if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1776 		/* give the previous IO time to complete */
1777 		fsm_addtimer(&wch->sweep_timer,
1778 			200, CTC_EVENT_RSWEEP_TIMER, wch);
1779 				goto done;
1780 	}
1781 
1782 	skb = skb_dequeue(&wch->sweep_queue);
1783 	if (!skb)
1784 				goto done;
1785 
1786 	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1787 		grp->in_sweep = 0;
1788 		ctcm_clear_busy_do(dev);
1789 		dev_kfree_skb_any(skb);
1790 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1791 				goto done;
1792 	} else {
1793 		atomic_inc(&skb->users);
1794 		skb_queue_tail(&wch->io_queue, skb);
1795 	}
1796 
1797 	/* send out the sweep */
1798 	wch->ccw[4].count = skb->len;
1799 
1800 	header = (struct th_sweep *)skb->data;
1801 	switch (header->th.th_ch_flag) {
1802 	case TH_SWEEP_REQ:
1803 		grp->sweep_req_pend_num--;
1804 		break;
1805 	case TH_SWEEP_RESP:
1806 		grp->sweep_rsp_pend_num--;
1807 		break;
1808 	}
1809 
1810 	header->sw.th_last_seq = wch->th_seq_num;
1811 
1812 	CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1813 	CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1814 	CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1815 
1816 	fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1817 	fsm_newstate(wch->fsm, CTC_STATE_TX);
1818 
1819 	spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1820 	wch->prof.send_stamp = current_kernel_time(); /* xtime */
1821 	rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1822 					(unsigned long) wch, 0xff, 0);
1823 	spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1824 
1825 	if ((grp->sweep_req_pend_num == 0) &&
1826 	   (grp->sweep_rsp_pend_num == 0)) {
1827 		grp->in_sweep = 0;
1828 		rch->th_seq_num = 0x00;
1829 		wch->th_seq_num = 0x00;
1830 		ctcm_clear_busy_do(dev);
1831 	}
1832 
1833 	CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1834 			__func__, wch->th_seq_num, rch->th_seq_num);
1835 
1836 	if (rc != 0)
1837 		ctcm_ccw_check_rc(wch, rc, "send sweep");
1838 
1839 done:
1840 	return;
1841 }
1842 
1843 
1844 /*
1845  * The ctcmpc statemachine for a channel.
1846  */
1847 
1848 const fsm_node ctcmpc_ch_fsm[] = {
1849 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1850 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1851 	{ CTC_STATE_STOPPED,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1852 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1853 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1854 
1855 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1856 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1857 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1858 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1859 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1860 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RCRESET,	ctcm_chx_stop  },
1861 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RSRESET,	ctcm_chx_stop  },
1862 	{ CTC_STATE_NOTOP,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1863 
1864 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1865 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1866 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1867 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1868 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1869 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1870 
1871 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1872 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1873 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1874 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1875 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1876 
1877 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1878 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1879 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
1880 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1881 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1882 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1883 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1884 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1885 
1886 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1887 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1888 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rxidle  },
1889 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1890 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1891 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1892 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1893 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1894 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_firstio  },
1895 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1896 
1897 	{ CH_XID0_PENDING,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1898 	{ CH_XID0_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1899 	{ CH_XID0_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1900 	{ CH_XID0_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1901 	{ CH_XID0_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1902 	{ CH_XID0_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1903 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1904 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1905 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1906 	{ CH_XID0_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1907 
1908 	{ CH_XID0_INPROGRESS,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1909 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1910 	{ CH_XID0_INPROGRESS,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1911 	{ CH_XID0_INPROGRESS,	CTC_EVENT_START,	ctcm_action_nop  },
1912 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1913 	{ CH_XID0_INPROGRESS,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1914 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1915 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr },
1916 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTNBUSY,	ctcmpc_chx_attnbusy  },
1917 	{ CH_XID0_INPROGRESS,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1918 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1919 
1920 	{ CH_XID7_PENDING,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1921 	{ CH_XID7_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1922 	{ CH_XID7_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1923 	{ CH_XID7_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1924 	{ CH_XID7_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1925 	{ CH_XID7_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1926 	{ CH_XID7_PENDING,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1927 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1928 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1929 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1930 	{ CH_XID7_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1931 	{ CH_XID7_PENDING,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1932 	{ CH_XID7_PENDING,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1933 
1934 	{ CH_XID7_PENDING1,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1935 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1936 	{ CH_XID7_PENDING1,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1937 	{ CH_XID7_PENDING1,	CTC_EVENT_START,	ctcm_action_nop  },
1938 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1939 	{ CH_XID7_PENDING1,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1940 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1941 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1942 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1943 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1944 	{ CH_XID7_PENDING1,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1945 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1946 
1947 	{ CH_XID7_PENDING2,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1948 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1949 	{ CH_XID7_PENDING2,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1950 	{ CH_XID7_PENDING2,	CTC_EVENT_START,	ctcm_action_nop  },
1951 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1952 	{ CH_XID7_PENDING2,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1953 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1954 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1955 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1956 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1957 	{ CH_XID7_PENDING2,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1958 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1959 
1960 	{ CH_XID7_PENDING3,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1961 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1962 	{ CH_XID7_PENDING3,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1963 	{ CH_XID7_PENDING3,	CTC_EVENT_START,	ctcm_action_nop  },
1964 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1965 	{ CH_XID7_PENDING3,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1966 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1967 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1968 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1969 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1970 	{ CH_XID7_PENDING3,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1971 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1972 
1973 	{ CH_XID7_PENDING4,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1974 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1975 	{ CH_XID7_PENDING4,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1976 	{ CH_XID7_PENDING4,	CTC_EVENT_START,	ctcm_action_nop  },
1977 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1978 	{ CH_XID7_PENDING4,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1979 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1980 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1981 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1982 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1983 	{ CH_XID7_PENDING4,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1984 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1985 
1986 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1987 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1988 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1989 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1990 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
1991 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1992 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1993 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1994 
1995 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1996 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1997 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1998 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
1999 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
2000 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
2001 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2002 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2003 	{ CTC_STATE_TXINIT,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2004 
2005 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2006 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
2007 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
2008 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2009 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2010 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2011 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2012 	{ CTC_STATE_TXIDLE,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2013 
2014 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
2015 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2016 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
2017 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2018 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2019 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2020 	{ CTC_STATE_TERM,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2021 	{ CTC_STATE_TERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2022 
2023 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2024 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2025 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
2026 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2027 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2028 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2029 	{ CTC_STATE_DTERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2030 
2031 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
2032 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
2033 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	ctcmpc_chx_txdone  },
2034 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2035 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2036 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
2037 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2038 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2039 	{ CTC_STATE_TX,		CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2040 	{ CTC_STATE_TX,		CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2041 
2042 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2043 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2044 	{ CTC_STATE_TXERR,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2045 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2046 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2047 };
2048 
2049 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2050 
2051 /*
2052  * Actions for interface - statemachine.
2053  */
2054 
2055 /**
2056  * Startup channels by sending CTC_EVENT_START to each channel.
2057  *
2058  * fi		An instance of an interface statemachine.
2059  * event	The event, just happened.
2060  * arg		Generic pointer, casted from struct net_device * upon call.
2061  */
2062 static void dev_action_start(fsm_instance *fi, int event, void *arg)
2063 {
2064 	struct net_device *dev = arg;
2065 	struct ctcm_priv *priv = dev->ml_priv;
2066 	int direction;
2067 
2068 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2069 
2070 	fsm_deltimer(&priv->restart_timer);
2071 	fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2072 	if (IS_MPC(priv))
2073 		priv->mpcg->channels_terminating = 0;
2074 	for (direction = READ; direction <= WRITE; direction++) {
2075 		struct channel *ch = priv->channel[direction];
2076 		fsm_event(ch->fsm, CTC_EVENT_START, ch);
2077 	}
2078 }
2079 
2080 /**
2081  * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2082  *
2083  * fi		An instance of an interface statemachine.
2084  * event	The event, just happened.
2085  * arg		Generic pointer, casted from struct net_device * upon call.
2086  */
2087 static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2088 {
2089 	int direction;
2090 	struct net_device *dev = arg;
2091 	struct ctcm_priv *priv = dev->ml_priv;
2092 
2093 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2094 
2095 	fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2096 	for (direction = READ; direction <= WRITE; direction++) {
2097 		struct channel *ch = priv->channel[direction];
2098 		fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2099 		ch->th_seq_num = 0x00;
2100 		CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2101 				__func__, ch->th_seq_num);
2102 	}
2103 	if (IS_MPC(priv))
2104 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2105 }
2106 
2107 static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2108 {
2109 	int restart_timer;
2110 	struct net_device *dev = arg;
2111 	struct ctcm_priv *priv = dev->ml_priv;
2112 
2113 	CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2114 
2115 	if (IS_MPC(priv)) {
2116 		restart_timer = CTCM_TIME_1_SEC;
2117 	} else {
2118 		restart_timer = CTCM_TIME_5_SEC;
2119 	}
2120 	dev_info(&dev->dev, "Restarting device\n");
2121 
2122 	dev_action_stop(fi, event, arg);
2123 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2124 	if (IS_MPC(priv))
2125 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2126 
2127 	/* going back into start sequence too quickly can	  */
2128 	/* result in the other side becoming unreachable   due	  */
2129 	/* to sense reported when IO is aborted			  */
2130 	fsm_addtimer(&priv->restart_timer, restart_timer,
2131 			DEV_EVENT_START, dev);
2132 }
2133 
2134 /**
2135  * Called from channel statemachine
2136  * when a channel is up and running.
2137  *
2138  * fi		An instance of an interface statemachine.
2139  * event	The event, just happened.
2140  * arg		Generic pointer, casted from struct net_device * upon call.
2141  */
2142 static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2143 {
2144 	struct net_device *dev = arg;
2145 	struct ctcm_priv *priv = dev->ml_priv;
2146 	int dev_stat = fsm_getstate(fi);
2147 
2148 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
2149 			"%s(%s): priv = %p [%d,%d]\n ",	CTCM_FUNTAIL,
2150 				dev->name, dev->ml_priv, dev_stat, event);
2151 
2152 	switch (fsm_getstate(fi)) {
2153 	case DEV_STATE_STARTWAIT_RXTX:
2154 		if (event == DEV_EVENT_RXUP)
2155 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2156 		else
2157 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2158 		break;
2159 	case DEV_STATE_STARTWAIT_RX:
2160 		if (event == DEV_EVENT_RXUP) {
2161 			fsm_newstate(fi, DEV_STATE_RUNNING);
2162 			dev_info(&dev->dev,
2163 				"Connected with remote side\n");
2164 			ctcm_clear_busy(dev);
2165 		}
2166 		break;
2167 	case DEV_STATE_STARTWAIT_TX:
2168 		if (event == DEV_EVENT_TXUP) {
2169 			fsm_newstate(fi, DEV_STATE_RUNNING);
2170 			dev_info(&dev->dev,
2171 				"Connected with remote side\n");
2172 			ctcm_clear_busy(dev);
2173 		}
2174 		break;
2175 	case DEV_STATE_STOPWAIT_TX:
2176 		if (event == DEV_EVENT_RXUP)
2177 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2178 		break;
2179 	case DEV_STATE_STOPWAIT_RX:
2180 		if (event == DEV_EVENT_TXUP)
2181 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2182 		break;
2183 	}
2184 
2185 	if (IS_MPC(priv)) {
2186 		if (event == DEV_EVENT_RXUP)
2187 			mpc_channel_action(priv->channel[READ],
2188 				READ, MPC_CHANNEL_ADD);
2189 		else
2190 			mpc_channel_action(priv->channel[WRITE],
2191 				WRITE, MPC_CHANNEL_ADD);
2192 	}
2193 }
2194 
2195 /**
2196  * Called from device statemachine
2197  * when a channel has been shutdown.
2198  *
2199  * fi		An instance of an interface statemachine.
2200  * event	The event, just happened.
2201  * arg		Generic pointer, casted from struct net_device * upon call.
2202  */
2203 static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2204 {
2205 
2206 	struct net_device *dev = arg;
2207 	struct ctcm_priv *priv = dev->ml_priv;
2208 
2209 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2210 
2211 	switch (fsm_getstate(fi)) {
2212 	case DEV_STATE_RUNNING:
2213 		if (event == DEV_EVENT_TXDOWN)
2214 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2215 		else
2216 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2217 		break;
2218 	case DEV_STATE_STARTWAIT_RX:
2219 		if (event == DEV_EVENT_TXDOWN)
2220 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2221 		break;
2222 	case DEV_STATE_STARTWAIT_TX:
2223 		if (event == DEV_EVENT_RXDOWN)
2224 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2225 		break;
2226 	case DEV_STATE_STOPWAIT_RXTX:
2227 		if (event == DEV_EVENT_TXDOWN)
2228 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2229 		else
2230 			fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2231 		break;
2232 	case DEV_STATE_STOPWAIT_RX:
2233 		if (event == DEV_EVENT_RXDOWN)
2234 			fsm_newstate(fi, DEV_STATE_STOPPED);
2235 		break;
2236 	case DEV_STATE_STOPWAIT_TX:
2237 		if (event == DEV_EVENT_TXDOWN)
2238 			fsm_newstate(fi, DEV_STATE_STOPPED);
2239 		break;
2240 	}
2241 	if (IS_MPC(priv)) {
2242 		if (event == DEV_EVENT_RXDOWN)
2243 			mpc_channel_action(priv->channel[READ],
2244 				READ, MPC_CHANNEL_REMOVE);
2245 		else
2246 			mpc_channel_action(priv->channel[WRITE],
2247 				WRITE, MPC_CHANNEL_REMOVE);
2248 	}
2249 }
2250 
2251 const fsm_node dev_fsm[] = {
2252 	{ DEV_STATE_STOPPED,        DEV_EVENT_START,   dev_action_start   },
2253 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
2254 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
2255 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
2256 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
2257 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
2258 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
2259 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
2260 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
2261 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
2262 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
2263 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
2264 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
2265 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
2266 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
2267 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
2268 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
2269 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
2270 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
2271 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
2272 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2273 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
2274 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
2275 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
2276 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
2277 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
2278 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
2279 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
2280 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
2281 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
2282 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
2283 	{ DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
2284 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
2285 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
2286 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    ctcm_action_nop    },
2287 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    ctcm_action_nop    },
2288 	{ DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
2289 };
2290 
2291 int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2292 
2293 /* --- This is the END my friend --- */
2294 
2295