xref: /linux/drivers/s390/net/ctcm_fsms.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright IBM Corp. 2001, 2007
3  * Authors:	Fritz Elfert (felfert@millenux.com)
4  * 		Peter Tiedemann (ptiedem@de.ibm.com)
5  *	MPC additions :
6  *		Belinda Thompson (belindat@us.ibm.com)
7  *		Andy Richter (richtera@us.ibm.com)
8  */
9 
10 #undef DEBUG
11 #undef DEBUGDATA
12 #undef DEBUGCCW
13 
14 #define KMSG_COMPONENT "ctcm"
15 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
16 
17 #include <linux/module.h>
18 #include <linux/init.h>
19 #include <linux/kernel.h>
20 #include <linux/slab.h>
21 #include <linux/errno.h>
22 #include <linux/types.h>
23 #include <linux/interrupt.h>
24 #include <linux/timer.h>
25 #include <linux/bitops.h>
26 
27 #include <linux/signal.h>
28 #include <linux/string.h>
29 
30 #include <linux/ip.h>
31 #include <linux/if_arp.h>
32 #include <linux/tcp.h>
33 #include <linux/skbuff.h>
34 #include <linux/ctype.h>
35 #include <net/dst.h>
36 
37 #include <linux/io.h>
38 #include <asm/ccwdev.h>
39 #include <asm/ccwgroup.h>
40 #include <linux/uaccess.h>
41 
42 #include <asm/idals.h>
43 
44 #include "fsm.h"
45 
46 #include "ctcm_dbug.h"
47 #include "ctcm_main.h"
48 #include "ctcm_fsms.h"
49 
50 const char *dev_state_names[] = {
51 	[DEV_STATE_STOPPED]		= "Stopped",
52 	[DEV_STATE_STARTWAIT_RXTX]	= "StartWait RXTX",
53 	[DEV_STATE_STARTWAIT_RX]	= "StartWait RX",
54 	[DEV_STATE_STARTWAIT_TX]	= "StartWait TX",
55 	[DEV_STATE_STOPWAIT_RXTX]	= "StopWait RXTX",
56 	[DEV_STATE_STOPWAIT_RX]		= "StopWait RX",
57 	[DEV_STATE_STOPWAIT_TX]		= "StopWait TX",
58 	[DEV_STATE_RUNNING]		= "Running",
59 };
60 
61 const char *dev_event_names[] = {
62 	[DEV_EVENT_START]	= "Start",
63 	[DEV_EVENT_STOP]	= "Stop",
64 	[DEV_EVENT_RXUP]	= "RX up",
65 	[DEV_EVENT_TXUP]	= "TX up",
66 	[DEV_EVENT_RXDOWN]	= "RX down",
67 	[DEV_EVENT_TXDOWN]	= "TX down",
68 	[DEV_EVENT_RESTART]	= "Restart",
69 };
70 
71 const char *ctc_ch_event_names[] = {
72 	[CTC_EVENT_IO_SUCCESS]	= "ccw_device success",
73 	[CTC_EVENT_IO_EBUSY]	= "ccw_device busy",
74 	[CTC_EVENT_IO_ENODEV]	= "ccw_device enodev",
75 	[CTC_EVENT_IO_UNKNOWN]	= "ccw_device unknown",
76 	[CTC_EVENT_ATTNBUSY]	= "Status ATTN & BUSY",
77 	[CTC_EVENT_ATTN]	= "Status ATTN",
78 	[CTC_EVENT_BUSY]	= "Status BUSY",
79 	[CTC_EVENT_UC_RCRESET]	= "Unit check remote reset",
80 	[CTC_EVENT_UC_RSRESET]	= "Unit check remote system reset",
81 	[CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout",
82 	[CTC_EVENT_UC_TXPARITY]	= "Unit check TX parity",
83 	[CTC_EVENT_UC_HWFAIL]	= "Unit check Hardware failure",
84 	[CTC_EVENT_UC_RXPARITY]	= "Unit check RX parity",
85 	[CTC_EVENT_UC_ZERO]	= "Unit check ZERO",
86 	[CTC_EVENT_UC_UNKNOWN]	= "Unit check Unknown",
87 	[CTC_EVENT_SC_UNKNOWN]	= "SubChannel check Unknown",
88 	[CTC_EVENT_MC_FAIL]	= "Machine check failure",
89 	[CTC_EVENT_MC_GOOD]	= "Machine check operational",
90 	[CTC_EVENT_IRQ]		= "IRQ normal",
91 	[CTC_EVENT_FINSTAT]	= "IRQ final",
92 	[CTC_EVENT_TIMER]	= "Timer",
93 	[CTC_EVENT_START]	= "Start",
94 	[CTC_EVENT_STOP]	= "Stop",
95 	/*
96 	* additional MPC events
97 	*/
98 	[CTC_EVENT_SEND_XID]	= "XID Exchange",
99 	[CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer",
100 };
101 
102 const char *ctc_ch_state_names[] = {
103 	[CTC_STATE_IDLE]	= "Idle",
104 	[CTC_STATE_STOPPED]	= "Stopped",
105 	[CTC_STATE_STARTWAIT]	= "StartWait",
106 	[CTC_STATE_STARTRETRY]	= "StartRetry",
107 	[CTC_STATE_SETUPWAIT]	= "SetupWait",
108 	[CTC_STATE_RXINIT]	= "RX init",
109 	[CTC_STATE_TXINIT]	= "TX init",
110 	[CTC_STATE_RX]		= "RX",
111 	[CTC_STATE_TX]		= "TX",
112 	[CTC_STATE_RXIDLE]	= "RX idle",
113 	[CTC_STATE_TXIDLE]	= "TX idle",
114 	[CTC_STATE_RXERR]	= "RX error",
115 	[CTC_STATE_TXERR]	= "TX error",
116 	[CTC_STATE_TERM]	= "Terminating",
117 	[CTC_STATE_DTERM]	= "Restarting",
118 	[CTC_STATE_NOTOP]	= "Not operational",
119 	/*
120 	* additional MPC states
121 	*/
122 	[CH_XID0_PENDING]	= "Pending XID0 Start",
123 	[CH_XID0_INPROGRESS]	= "In XID0 Negotiations ",
124 	[CH_XID7_PENDING]	= "Pending XID7 P1 Start",
125 	[CH_XID7_PENDING1]	= "Active XID7 P1 Exchange ",
126 	[CH_XID7_PENDING2]	= "Pending XID7 P2 Start ",
127 	[CH_XID7_PENDING3]	= "Active XID7 P2 Exchange ",
128 	[CH_XID7_PENDING4]	= "XID7 Complete - Pending READY ",
129 };
130 
131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg);
132 
133 /*
134  * ----- static ctcm actions for channel statemachine -----
135  *
136 */
137 static void chx_txdone(fsm_instance *fi, int event, void *arg);
138 static void chx_rx(fsm_instance *fi, int event, void *arg);
139 static void chx_rxidle(fsm_instance *fi, int event, void *arg);
140 static void chx_firstio(fsm_instance *fi, int event, void *arg);
141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
155 
156 /*
157  * ----- static ctcmpc actions for ctcmpc channel statemachine -----
158  *
159 */
160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg);
161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg);
162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg);
163 /* shared :
164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg);
165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg);
166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg);
167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg);
168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg);
169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg);
170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg);
171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg);
172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg);
173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg);
174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg);
175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg);
176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg);
177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg);
178 */
179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg);
180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *);
181 static void ctcmpc_chx_resend(fsm_instance *, int, void *);
182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg);
183 
184 /**
185  * Check return code of a preceding ccw_device call, halt_IO etc...
186  *
187  * ch	:	The channel, the error belongs to.
188  * Returns the error code (!= 0) to inspect.
189  */
190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg)
191 {
192 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
193 		"%s(%s): %s: %04x\n",
194 		CTCM_FUNTAIL, ch->id, msg, rc);
195 	switch (rc) {
196 	case -EBUSY:
197 		pr_info("%s: The communication peer is busy\n",
198 			ch->id);
199 		fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch);
200 		break;
201 	case -ENODEV:
202 		pr_err("%s: The specified target device is not valid\n",
203 		       ch->id);
204 		fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch);
205 		break;
206 	default:
207 		pr_err("An I/O operation resulted in error %04x\n",
208 		       rc);
209 		fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch);
210 	}
211 }
212 
213 void ctcm_purge_skb_queue(struct sk_buff_head *q)
214 {
215 	struct sk_buff *skb;
216 
217 	CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__);
218 
219 	while ((skb = skb_dequeue(q))) {
220 		atomic_dec(&skb->users);
221 		dev_kfree_skb_any(skb);
222 	}
223 }
224 
225 /**
226  * NOP action for statemachines
227  */
228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg)
229 {
230 }
231 
232 /*
233  * Actions for channel - statemachines.
234  */
235 
236 /**
237  * Normal data has been send. Free the corresponding
238  * skb (it's in io_queue), reset dev->tbusy and
239  * revert to idle state.
240  *
241  * fi		An instance of a channel statemachine.
242  * event	The event, just happened.
243  * arg		Generic pointer, casted from channel * upon call.
244  */
245 static void chx_txdone(fsm_instance *fi, int event, void *arg)
246 {
247 	struct channel *ch = arg;
248 	struct net_device *dev = ch->netdev;
249 	struct ctcm_priv *priv = dev->ml_priv;
250 	struct sk_buff *skb;
251 	int first = 1;
252 	int i;
253 	unsigned long duration;
254 	unsigned long done_stamp = jiffies;
255 
256 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
257 
258 	duration = done_stamp - ch->prof.send_stamp;
259 	if (duration > ch->prof.tx_time)
260 		ch->prof.tx_time = duration;
261 
262 	if (ch->irb->scsw.cmd.count != 0)
263 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
264 			"%s(%s): TX not complete, remaining %d bytes",
265 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
266 	fsm_deltimer(&ch->timer);
267 	while ((skb = skb_dequeue(&ch->io_queue))) {
268 		priv->stats.tx_packets++;
269 		priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
270 		if (first) {
271 			priv->stats.tx_bytes += 2;
272 			first = 0;
273 		}
274 		atomic_dec(&skb->users);
275 		dev_kfree_skb_irq(skb);
276 	}
277 	spin_lock(&ch->collect_lock);
278 	clear_normalized_cda(&ch->ccw[4]);
279 	if (ch->collect_len > 0) {
280 		int rc;
281 
282 		if (ctcm_checkalloc_buffer(ch)) {
283 			spin_unlock(&ch->collect_lock);
284 			return;
285 		}
286 		ch->trans_skb->data = ch->trans_skb_data;
287 		skb_reset_tail_pointer(ch->trans_skb);
288 		ch->trans_skb->len = 0;
289 		if (ch->prof.maxmulti < (ch->collect_len + 2))
290 			ch->prof.maxmulti = ch->collect_len + 2;
291 		if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
292 			ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
293 		*((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2;
294 		i = 0;
295 		while ((skb = skb_dequeue(&ch->collect_queue))) {
296 			skb_copy_from_linear_data(skb,
297 				skb_put(ch->trans_skb, skb->len), skb->len);
298 			priv->stats.tx_packets++;
299 			priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH;
300 			atomic_dec(&skb->users);
301 			dev_kfree_skb_irq(skb);
302 			i++;
303 		}
304 		ch->collect_len = 0;
305 		spin_unlock(&ch->collect_lock);
306 		ch->ccw[1].count = ch->trans_skb->len;
307 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
308 		ch->prof.send_stamp = jiffies;
309 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
310 						(unsigned long)ch, 0xff, 0);
311 		ch->prof.doios_multi++;
312 		if (rc != 0) {
313 			priv->stats.tx_dropped += i;
314 			priv->stats.tx_errors += i;
315 			fsm_deltimer(&ch->timer);
316 			ctcm_ccw_check_rc(ch, rc, "chained TX");
317 		}
318 	} else {
319 		spin_unlock(&ch->collect_lock);
320 		fsm_newstate(fi, CTC_STATE_TXIDLE);
321 	}
322 	ctcm_clear_busy_do(dev);
323 }
324 
325 /**
326  * Initial data is sent.
327  * Notify device statemachine that we are up and
328  * running.
329  *
330  * fi		An instance of a channel statemachine.
331  * event	The event, just happened.
332  * arg		Generic pointer, casted from channel * upon call.
333  */
334 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg)
335 {
336 	struct channel *ch = arg;
337 	struct net_device *dev = ch->netdev;
338 	struct ctcm_priv *priv = dev->ml_priv;
339 
340 	CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name);
341 
342 	fsm_deltimer(&ch->timer);
343 	fsm_newstate(fi, CTC_STATE_TXIDLE);
344 	fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev);
345 }
346 
347 /**
348  * Got normal data, check for sanity, queue it up, allocate new buffer
349  * trigger bottom half, and initiate next read.
350  *
351  * fi		An instance of a channel statemachine.
352  * event	The event, just happened.
353  * arg		Generic pointer, casted from channel * upon call.
354  */
355 static void chx_rx(fsm_instance *fi, int event, void *arg)
356 {
357 	struct channel *ch = arg;
358 	struct net_device *dev = ch->netdev;
359 	struct ctcm_priv *priv = dev->ml_priv;
360 	int len = ch->max_bufsize - ch->irb->scsw.cmd.count;
361 	struct sk_buff *skb = ch->trans_skb;
362 	__u16 block_len = *((__u16 *)skb->data);
363 	int check_len;
364 	int rc;
365 
366 	fsm_deltimer(&ch->timer);
367 	if (len < 8) {
368 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
369 			"%s(%s): got packet with length %d < 8\n",
370 					CTCM_FUNTAIL, dev->name, len);
371 		priv->stats.rx_dropped++;
372 		priv->stats.rx_length_errors++;
373 						goto again;
374 	}
375 	if (len > ch->max_bufsize) {
376 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
377 			"%s(%s): got packet with length %d > %d\n",
378 				CTCM_FUNTAIL, dev->name, len, ch->max_bufsize);
379 		priv->stats.rx_dropped++;
380 		priv->stats.rx_length_errors++;
381 						goto again;
382 	}
383 
384 	/*
385 	 * VM TCP seems to have a bug sending 2 trailing bytes of garbage.
386 	 */
387 	switch (ch->protocol) {
388 	case CTCM_PROTO_S390:
389 	case CTCM_PROTO_OS390:
390 		check_len = block_len + 2;
391 		break;
392 	default:
393 		check_len = block_len;
394 		break;
395 	}
396 	if ((len < block_len) || (len > check_len)) {
397 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
398 			"%s(%s): got block length %d != rx length %d\n",
399 				CTCM_FUNTAIL, dev->name, block_len, len);
400 		if (do_debug)
401 			ctcmpc_dump_skb(skb, 0);
402 
403 		*((__u16 *)skb->data) = len;
404 		priv->stats.rx_dropped++;
405 		priv->stats.rx_length_errors++;
406 						goto again;
407 	}
408 	if (block_len > 2) {
409 		*((__u16 *)skb->data) = block_len - 2;
410 		ctcm_unpack_skb(ch, skb);
411 	}
412  again:
413 	skb->data = ch->trans_skb_data;
414 	skb_reset_tail_pointer(skb);
415 	skb->len = 0;
416 	if (ctcm_checkalloc_buffer(ch))
417 		return;
418 	ch->ccw[1].count = ch->max_bufsize;
419 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
420 					(unsigned long)ch, 0xff, 0);
421 	if (rc != 0)
422 		ctcm_ccw_check_rc(ch, rc, "normal RX");
423 }
424 
425 /**
426  * Initialize connection by sending a __u16 of value 0.
427  *
428  * fi		An instance of a channel statemachine.
429  * event	The event, just happened.
430  * arg		Generic pointer, casted from channel * upon call.
431  */
432 static void chx_firstio(fsm_instance *fi, int event, void *arg)
433 {
434 	int rc;
435 	struct channel *ch = arg;
436 	int fsmstate = fsm_getstate(fi);
437 
438 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
439 		"%s(%s) : %02x",
440 		CTCM_FUNTAIL, ch->id, fsmstate);
441 
442 	ch->sense_rc = 0;	/* reset unit check report control */
443 	if (fsmstate == CTC_STATE_TXIDLE)
444 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
445 			"%s(%s): remote side issued READ?, init.\n",
446 				CTCM_FUNTAIL, ch->id);
447 	fsm_deltimer(&ch->timer);
448 	if (ctcm_checkalloc_buffer(ch))
449 		return;
450 	if ((fsmstate == CTC_STATE_SETUPWAIT) &&
451 	    (ch->protocol == CTCM_PROTO_OS390)) {
452 		/* OS/390 resp. z/OS */
453 		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
454 			*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
455 			fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC,
456 				     CTC_EVENT_TIMER, ch);
457 			chx_rxidle(fi, event, arg);
458 		} else {
459 			struct net_device *dev = ch->netdev;
460 			struct ctcm_priv *priv = dev->ml_priv;
461 			fsm_newstate(fi, CTC_STATE_TXIDLE);
462 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
463 		}
464 		return;
465 	}
466 	/*
467 	 * Don't setup a timer for receiving the initial RX frame
468 	 * if in compatibility mode, since VM TCP delays the initial
469 	 * frame until it has some data to send.
470 	 */
471 	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) ||
472 	    (ch->protocol != CTCM_PROTO_S390))
473 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
474 
475 	*((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN;
476 	ch->ccw[1].count = 2;	/* Transfer only length */
477 
478 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
479 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
480 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
481 					(unsigned long)ch, 0xff, 0);
482 	if (rc != 0) {
483 		fsm_deltimer(&ch->timer);
484 		fsm_newstate(fi, CTC_STATE_SETUPWAIT);
485 		ctcm_ccw_check_rc(ch, rc, "init IO");
486 	}
487 	/*
488 	 * If in compatibility mode since we don't setup a timer, we
489 	 * also signal RX channel up immediately. This enables us
490 	 * to send packets early which in turn usually triggers some
491 	 * reply from VM TCP which brings up the RX channel to it's
492 	 * final state.
493 	 */
494 	if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) &&
495 	    (ch->protocol == CTCM_PROTO_S390)) {
496 		struct net_device *dev = ch->netdev;
497 		struct ctcm_priv *priv = dev->ml_priv;
498 		fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
499 	}
500 }
501 
502 /**
503  * Got initial data, check it. If OK,
504  * notify device statemachine that we are up and
505  * running.
506  *
507  * fi		An instance of a channel statemachine.
508  * event	The event, just happened.
509  * arg		Generic pointer, casted from channel * upon call.
510  */
511 static void chx_rxidle(fsm_instance *fi, int event, void *arg)
512 {
513 	struct channel *ch = arg;
514 	struct net_device *dev = ch->netdev;
515 	struct ctcm_priv *priv = dev->ml_priv;
516 	__u16 buflen;
517 	int rc;
518 
519 	fsm_deltimer(&ch->timer);
520 	buflen = *((__u16 *)ch->trans_skb->data);
521 	CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n",
522 			__func__, dev->name, buflen);
523 
524 	if (buflen >= CTCM_INITIAL_BLOCKLEN) {
525 		if (ctcm_checkalloc_buffer(ch))
526 			return;
527 		ch->ccw[1].count = ch->max_bufsize;
528 		fsm_newstate(fi, CTC_STATE_RXIDLE);
529 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
530 						(unsigned long)ch, 0xff, 0);
531 		if (rc != 0) {
532 			fsm_newstate(fi, CTC_STATE_RXINIT);
533 			ctcm_ccw_check_rc(ch, rc, "initial RX");
534 		} else
535 			fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
536 	} else {
537 		CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n",
538 				__func__, dev->name,
539 					buflen, CTCM_INITIAL_BLOCKLEN);
540 		chx_firstio(fi, event, arg);
541 	}
542 }
543 
544 /**
545  * Set channel into extended mode.
546  *
547  * fi		An instance of a channel statemachine.
548  * event	The event, just happened.
549  * arg		Generic pointer, casted from channel * upon call.
550  */
551 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg)
552 {
553 	struct channel *ch = arg;
554 	int rc;
555 	unsigned long saveflags = 0;
556 	int timeout = CTCM_TIME_5_SEC;
557 
558 	fsm_deltimer(&ch->timer);
559 	if (IS_MPC(ch)) {
560 		timeout = 1500;
561 		CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n",
562 				__func__, smp_processor_id(), ch, ch->id);
563 	}
564 	fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch);
565 	fsm_newstate(fi, CTC_STATE_SETUPWAIT);
566 	CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2);
567 
568 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
569 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
570 			/* Such conditional locking is undeterministic in
571 			 * static view. => ignore sparse warnings here. */
572 
573 	rc = ccw_device_start(ch->cdev, &ch->ccw[6],
574 					(unsigned long)ch, 0xff, 0);
575 	if (event == CTC_EVENT_TIMER)	/* see above comments */
576 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
577 	if (rc != 0) {
578 		fsm_deltimer(&ch->timer);
579 		fsm_newstate(fi, CTC_STATE_STARTWAIT);
580 		ctcm_ccw_check_rc(ch, rc, "set Mode");
581 	} else
582 		ch->retry = 0;
583 }
584 
585 /**
586  * Setup channel.
587  *
588  * fi		An instance of a channel statemachine.
589  * event	The event, just happened.
590  * arg		Generic pointer, casted from channel * upon call.
591  */
592 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg)
593 {
594 	struct channel *ch	= arg;
595 	unsigned long saveflags;
596 	int rc;
597 
598 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s",
599 		CTCM_FUNTAIL, ch->id,
600 		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX");
601 
602 	if (ch->trans_skb != NULL) {
603 		clear_normalized_cda(&ch->ccw[1]);
604 		dev_kfree_skb(ch->trans_skb);
605 		ch->trans_skb = NULL;
606 	}
607 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
608 		ch->ccw[1].cmd_code = CCW_CMD_READ;
609 		ch->ccw[1].flags = CCW_FLAG_SLI;
610 		ch->ccw[1].count = 0;
611 	} else {
612 		ch->ccw[1].cmd_code = CCW_CMD_WRITE;
613 		ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
614 		ch->ccw[1].count = 0;
615 	}
616 	if (ctcm_checkalloc_buffer(ch)) {
617 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
618 			"%s(%s): %s trans_skb alloc delayed "
619 			"until first transfer",
620 			CTCM_FUNTAIL, ch->id,
621 			(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ?
622 				"RX" : "TX");
623 	}
624 	ch->ccw[0].cmd_code = CCW_CMD_PREPARE;
625 	ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
626 	ch->ccw[0].count = 0;
627 	ch->ccw[0].cda = 0;
628 	ch->ccw[2].cmd_code = CCW_CMD_NOOP;	/* jointed CE + DE */
629 	ch->ccw[2].flags = CCW_FLAG_SLI;
630 	ch->ccw[2].count = 0;
631 	ch->ccw[2].cda = 0;
632 	memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3);
633 	ch->ccw[4].cda = 0;
634 	ch->ccw[4].flags &= ~CCW_FLAG_IDA;
635 
636 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
637 	fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
638 	spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
639 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
640 	spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
641 	if (rc != 0) {
642 		if (rc != -EBUSY)
643 			fsm_deltimer(&ch->timer);
644 		ctcm_ccw_check_rc(ch, rc, "initial HaltIO");
645 	}
646 }
647 
648 /**
649  * Shutdown a channel.
650  *
651  * fi		An instance of a channel statemachine.
652  * event	The event, just happened.
653  * arg		Generic pointer, casted from channel * upon call.
654  */
655 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg)
656 {
657 	struct channel *ch = arg;
658 	unsigned long saveflags = 0;
659 	int rc;
660 	int oldstate;
661 
662 	fsm_deltimer(&ch->timer);
663 	if (IS_MPC(ch))
664 		fsm_deltimer(&ch->sweep_timer);
665 
666 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
667 
668 	if (event == CTC_EVENT_STOP)	/* only for STOP not yet locked */
669 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
670 			/* Such conditional locking is undeterministic in
671 			 * static view. => ignore sparse warnings here. */
672 	oldstate = fsm_getstate(fi);
673 	fsm_newstate(fi, CTC_STATE_TERM);
674 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
675 
676 	if (event == CTC_EVENT_STOP)
677 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
678 			/* see remark above about conditional locking */
679 
680 	if (rc != 0 && rc != -EBUSY) {
681 		fsm_deltimer(&ch->timer);
682 		if (event != CTC_EVENT_STOP) {
683 			fsm_newstate(fi, oldstate);
684 			ctcm_ccw_check_rc(ch, rc, (char *)__func__);
685 		}
686 	}
687 }
688 
689 /**
690  * Cleanup helper for chx_fail and chx_stopped
691  * cleanup channels queue and notify interface statemachine.
692  *
693  * fi		An instance of a channel statemachine.
694  * state	The next state (depending on caller).
695  * ch		The channel to operate on.
696  */
697 static void ctcm_chx_cleanup(fsm_instance *fi, int state,
698 		struct channel *ch)
699 {
700 	struct net_device *dev = ch->netdev;
701 	struct ctcm_priv *priv = dev->ml_priv;
702 
703 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
704 			"%s(%s): %s[%d]\n",
705 			CTCM_FUNTAIL, dev->name, ch->id, state);
706 
707 	fsm_deltimer(&ch->timer);
708 	if (IS_MPC(ch))
709 		fsm_deltimer(&ch->sweep_timer);
710 
711 	fsm_newstate(fi, state);
712 	if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) {
713 		clear_normalized_cda(&ch->ccw[1]);
714 		dev_kfree_skb_any(ch->trans_skb);
715 		ch->trans_skb = NULL;
716 	}
717 
718 	ch->th_seg = 0x00;
719 	ch->th_seq_num = 0x00;
720 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
721 		skb_queue_purge(&ch->io_queue);
722 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
723 	} else {
724 		ctcm_purge_skb_queue(&ch->io_queue);
725 		if (IS_MPC(ch))
726 			ctcm_purge_skb_queue(&ch->sweep_queue);
727 		spin_lock(&ch->collect_lock);
728 		ctcm_purge_skb_queue(&ch->collect_queue);
729 		ch->collect_len = 0;
730 		spin_unlock(&ch->collect_lock);
731 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
732 	}
733 }
734 
735 /**
736  * A channel has successfully been halted.
737  * Cleanup it's queue and notify interface statemachine.
738  *
739  * fi		An instance of a channel statemachine.
740  * event	The event, just happened.
741  * arg		Generic pointer, casted from channel * upon call.
742  */
743 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg)
744 {
745 	ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg);
746 }
747 
748 /**
749  * A stop command from device statemachine arrived and we are in
750  * not operational mode. Set state to stopped.
751  *
752  * fi		An instance of a channel statemachine.
753  * event	The event, just happened.
754  * arg		Generic pointer, casted from channel * upon call.
755  */
756 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg)
757 {
758 	fsm_newstate(fi, CTC_STATE_STOPPED);
759 }
760 
761 /**
762  * A machine check for no path, not operational status or gone device has
763  * happened.
764  * Cleanup queue and notify interface statemachine.
765  *
766  * fi		An instance of a channel statemachine.
767  * event	The event, just happened.
768  * arg		Generic pointer, casted from channel * upon call.
769  */
770 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg)
771 {
772 	ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg);
773 }
774 
775 /**
776  * Handle error during setup of channel.
777  *
778  * fi		An instance of a channel statemachine.
779  * event	The event, just happened.
780  * arg		Generic pointer, casted from channel * upon call.
781  */
782 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg)
783 {
784 	struct channel *ch = arg;
785 	struct net_device *dev = ch->netdev;
786 	struct ctcm_priv *priv = dev->ml_priv;
787 
788 	/*
789 	 * Special case: Got UC_RCRESET on setmode.
790 	 * This means that remote side isn't setup. In this case
791 	 * simply retry after some 10 secs...
792 	 */
793 	if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) &&
794 	    ((event == CTC_EVENT_UC_RCRESET) ||
795 	     (event == CTC_EVENT_UC_RSRESET))) {
796 		fsm_newstate(fi, CTC_STATE_STARTRETRY);
797 		fsm_deltimer(&ch->timer);
798 		fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
799 		if (!IS_MPC(ch) &&
800 		    (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) {
801 			int rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
802 			if (rc != 0)
803 				ctcm_ccw_check_rc(ch, rc,
804 					"HaltIO in chx_setuperr");
805 		}
806 		return;
807 	}
808 
809 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
810 		"%s(%s) : %s error during %s channel setup state=%s\n",
811 		CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event],
812 		(CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX",
813 		fsm_getstate_str(fi));
814 
815 	if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
816 		fsm_newstate(fi, CTC_STATE_RXERR);
817 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
818 	} else {
819 		fsm_newstate(fi, CTC_STATE_TXERR);
820 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
821 	}
822 }
823 
824 /**
825  * Restart a channel after an error.
826  *
827  * fi		An instance of a channel statemachine.
828  * event	The event, just happened.
829  * arg		Generic pointer, casted from channel * upon call.
830  */
831 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg)
832 {
833 	struct channel *ch = arg;
834 	struct net_device *dev = ch->netdev;
835 	unsigned long saveflags = 0;
836 	int oldstate;
837 	int rc;
838 
839 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
840 		"%s: %s[%d] of %s\n",
841 			CTCM_FUNTAIL, ch->id, event, dev->name);
842 
843 	fsm_deltimer(&ch->timer);
844 
845 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
846 	oldstate = fsm_getstate(fi);
847 	fsm_newstate(fi, CTC_STATE_STARTWAIT);
848 	if (event == CTC_EVENT_TIMER)	/* only for timer not yet locked */
849 		spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
850 			/* Such conditional locking is a known problem for
851 			 * sparse because its undeterministic in static view.
852 			 * Warnings should be ignored here. */
853 	rc = ccw_device_halt(ch->cdev, (unsigned long)ch);
854 	if (event == CTC_EVENT_TIMER)
855 		spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags);
856 	if (rc != 0) {
857 		if (rc != -EBUSY) {
858 		    fsm_deltimer(&ch->timer);
859 		    fsm_newstate(fi, oldstate);
860 		}
861 		ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart");
862 	}
863 }
864 
865 /**
866  * Handle error during RX initial handshake (exchange of
867  * 0-length block header)
868  *
869  * fi		An instance of a channel statemachine.
870  * event	The event, just happened.
871  * arg		Generic pointer, casted from channel * upon call.
872  */
873 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg)
874 {
875 	struct channel *ch = arg;
876 	struct net_device *dev = ch->netdev;
877 	struct ctcm_priv *priv = dev->ml_priv;
878 
879 	if (event == CTC_EVENT_TIMER) {
880 		if (!IS_MPCDEV(dev))
881 			/* TODO : check if MPC deletes timer somewhere */
882 			fsm_deltimer(&ch->timer);
883 		if (ch->retry++ < 3)
884 			ctcm_chx_restart(fi, event, arg);
885 		else {
886 			fsm_newstate(fi, CTC_STATE_RXERR);
887 			fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
888 		}
889 	} else {
890 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
891 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
892 			ctc_ch_event_names[event], fsm_getstate_str(fi));
893 
894 		dev_warn(&dev->dev,
895 			"Initialization failed with RX/TX init handshake "
896 			"error %s\n", ctc_ch_event_names[event]);
897 	}
898 }
899 
900 /**
901  * Notify device statemachine if we gave up initialization
902  * of RX channel.
903  *
904  * fi		An instance of a channel statemachine.
905  * event	The event, just happened.
906  * arg		Generic pointer, casted from channel * upon call.
907  */
908 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg)
909 {
910 	struct channel *ch = arg;
911 	struct net_device *dev = ch->netdev;
912 	struct ctcm_priv *priv = dev->ml_priv;
913 
914 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
915 			"%s(%s): RX %s busy, init. fail",
916 				CTCM_FUNTAIL, dev->name, ch->id);
917 	fsm_newstate(fi, CTC_STATE_RXERR);
918 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
919 }
920 
921 /**
922  * Handle RX Unit check remote reset (remote disconnected)
923  *
924  * fi		An instance of a channel statemachine.
925  * event	The event, just happened.
926  * arg		Generic pointer, casted from channel * upon call.
927  */
928 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg)
929 {
930 	struct channel *ch = arg;
931 	struct channel *ch2;
932 	struct net_device *dev = ch->netdev;
933 	struct ctcm_priv *priv = dev->ml_priv;
934 
935 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
936 			"%s: %s: remote disconnect - re-init ...",
937 				CTCM_FUNTAIL, dev->name);
938 	fsm_deltimer(&ch->timer);
939 	/*
940 	 * Notify device statemachine
941 	 */
942 	fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
943 	fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
944 
945 	fsm_newstate(fi, CTC_STATE_DTERM);
946 	ch2 = priv->channel[CTCM_WRITE];
947 	fsm_newstate(ch2->fsm, CTC_STATE_DTERM);
948 
949 	ccw_device_halt(ch->cdev, (unsigned long)ch);
950 	ccw_device_halt(ch2->cdev, (unsigned long)ch2);
951 }
952 
953 /**
954  * Handle error during TX channel initialization.
955  *
956  * fi		An instance of a channel statemachine.
957  * event	The event, just happened.
958  * arg		Generic pointer, casted from channel * upon call.
959  */
960 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg)
961 {
962 	struct channel *ch = arg;
963 	struct net_device *dev = ch->netdev;
964 	struct ctcm_priv *priv = dev->ml_priv;
965 
966 	if (event == CTC_EVENT_TIMER) {
967 		fsm_deltimer(&ch->timer);
968 		if (ch->retry++ < 3)
969 			ctcm_chx_restart(fi, event, arg);
970 		else {
971 			fsm_newstate(fi, CTC_STATE_TXERR);
972 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
973 		}
974 	} else {
975 		CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
976 			"%s(%s): %s in %s", CTCM_FUNTAIL, ch->id,
977 			ctc_ch_event_names[event], fsm_getstate_str(fi));
978 
979 		dev_warn(&dev->dev,
980 			"Initialization failed with RX/TX init handshake "
981 			"error %s\n", ctc_ch_event_names[event]);
982 	}
983 }
984 
985 /**
986  * Handle TX timeout by retrying operation.
987  *
988  * fi		An instance of a channel statemachine.
989  * event	The event, just happened.
990  * arg		Generic pointer, casted from channel * upon call.
991  */
992 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg)
993 {
994 	struct channel *ch = arg;
995 	struct net_device *dev = ch->netdev;
996 	struct ctcm_priv *priv = dev->ml_priv;
997 	struct sk_buff *skb;
998 
999 	CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n",
1000 			__func__, smp_processor_id(), ch, ch->id);
1001 
1002 	fsm_deltimer(&ch->timer);
1003 	if (ch->retry++ > 3) {
1004 		struct mpc_group *gptr = priv->mpcg;
1005 		CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1006 				"%s: %s: retries exceeded",
1007 					CTCM_FUNTAIL, ch->id);
1008 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1009 		/* call restart if not MPC or if MPC and mpcg fsm is ready.
1010 			use gptr as mpc indicator */
1011 		if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY)))
1012 			ctcm_chx_restart(fi, event, arg);
1013 				goto done;
1014 	}
1015 
1016 	CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG,
1017 			"%s : %s: retry %d",
1018 				CTCM_FUNTAIL, ch->id, ch->retry);
1019 	skb = skb_peek(&ch->io_queue);
1020 	if (skb) {
1021 		int rc = 0;
1022 		unsigned long saveflags = 0;
1023 		clear_normalized_cda(&ch->ccw[4]);
1024 		ch->ccw[4].count = skb->len;
1025 		if (set_normalized_cda(&ch->ccw[4], skb->data)) {
1026 			CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO,
1027 				"%s: %s: IDAL alloc failed",
1028 						CTCM_FUNTAIL, ch->id);
1029 			fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1030 			ctcm_chx_restart(fi, event, arg);
1031 				goto done;
1032 		}
1033 		fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch);
1034 		if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */
1035 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1036 			/* Such conditional locking is a known problem for
1037 			 * sparse because its undeterministic in static view.
1038 			 * Warnings should be ignored here. */
1039 		if (do_debug_ccw)
1040 			ctcmpc_dumpit((char *)&ch->ccw[3],
1041 					sizeof(struct ccw1) * 3);
1042 
1043 		rc = ccw_device_start(ch->cdev, &ch->ccw[3],
1044 						(unsigned long)ch, 0xff, 0);
1045 		if (event == CTC_EVENT_TIMER)
1046 			spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev),
1047 					saveflags);
1048 		if (rc != 0) {
1049 			fsm_deltimer(&ch->timer);
1050 			ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry");
1051 			ctcm_purge_skb_queue(&ch->io_queue);
1052 		}
1053 	}
1054 done:
1055 	return;
1056 }
1057 
1058 /**
1059  * Handle fatal errors during an I/O command.
1060  *
1061  * fi		An instance of a channel statemachine.
1062  * event	The event, just happened.
1063  * arg		Generic pointer, casted from channel * upon call.
1064  */
1065 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg)
1066 {
1067 	struct channel *ch = arg;
1068 	struct net_device *dev = ch->netdev;
1069 	struct ctcm_priv *priv = dev->ml_priv;
1070 	int rd = CHANNEL_DIRECTION(ch->flags);
1071 
1072 	fsm_deltimer(&ch->timer);
1073 	CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
1074 		"%s: %s: %s unrecoverable channel error",
1075 			CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX");
1076 
1077 	if (IS_MPC(ch)) {
1078 		priv->stats.tx_dropped++;
1079 		priv->stats.tx_errors++;
1080 	}
1081 	if (rd == CTCM_READ) {
1082 		fsm_newstate(fi, CTC_STATE_RXERR);
1083 		fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev);
1084 	} else {
1085 		fsm_newstate(fi, CTC_STATE_TXERR);
1086 		fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev);
1087 	}
1088 }
1089 
1090 /*
1091  * The ctcm statemachine for a channel.
1092  */
1093 const fsm_node ch_fsm[] = {
1094 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1095 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1096 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1097 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1098 
1099 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1100 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1101 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1102 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1103 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1104 
1105 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1106 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1107 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1108 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1109 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1110 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1111 
1112 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1113 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1114 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1115 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1116 
1117 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1118 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1119 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	chx_firstio  },
1120 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1121 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1122 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1123 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1124 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1125 
1126 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1127 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1128 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	chx_rxidle  },
1129 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1130 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1131 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1132 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1133 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1134 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	chx_firstio  },
1135 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1136 
1137 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1138 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1139 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	chx_rx  },
1140 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1141 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1142 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1143 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	chx_rx  },
1144 
1145 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1146 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1147 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
1148 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
1149 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
1150 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
1151 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1152 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1153 
1154 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1155 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1156 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	chx_firstio  },
1157 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1158 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1159 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1160 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1161 
1162 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
1163 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1164 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
1165 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1166 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1167 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1168 
1169 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1170 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
1171 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1172 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
1173 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
1174 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1175 
1176 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
1177 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
1178 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	chx_txdone  },
1179 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_txretry  },
1180 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_txretry  },
1181 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
1182 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1183 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1184 
1185 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1186 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1187 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1188 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1189 };
1190 
1191 int ch_fsm_len = ARRAY_SIZE(ch_fsm);
1192 
1193 /*
1194  * MPC actions for mpc channel statemachine
1195  * handling of MPC protocol requires extra
1196  * statemachine and actions which are prefixed ctcmpc_ .
1197  * The ctc_ch_states and ctc_ch_state_names,
1198  * ctc_ch_events and ctc_ch_event_names share the ctcm definitions
1199  * which are expanded by some elements.
1200  */
1201 
1202 /*
1203  * Actions for mpc channel statemachine.
1204  */
1205 
1206 /**
1207  * Normal data has been send. Free the corresponding
1208  * skb (it's in io_queue), reset dev->tbusy and
1209  * revert to idle state.
1210  *
1211  * fi		An instance of a channel statemachine.
1212  * event	The event, just happened.
1213  * arg		Generic pointer, casted from channel * upon call.
1214  */
1215 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
1216 {
1217 	struct channel		*ch = arg;
1218 	struct net_device	*dev = ch->netdev;
1219 	struct ctcm_priv	*priv = dev->ml_priv;
1220 	struct mpc_group	*grp = priv->mpcg;
1221 	struct sk_buff		*skb;
1222 	int		first = 1;
1223 	int		i;
1224 	__u32		data_space;
1225 	unsigned long	duration;
1226 	struct sk_buff	*peekskb;
1227 	int		rc;
1228 	struct th_header *header;
1229 	struct pdu	*p_header;
1230 	unsigned long done_stamp = jiffies;
1231 
1232 	CTCM_PR_DEBUG("Enter %s: %s cp:%i\n",
1233 			__func__, dev->name, smp_processor_id());
1234 
1235 	duration = done_stamp - ch->prof.send_stamp;
1236 	if (duration > ch->prof.tx_time)
1237 		ch->prof.tx_time = duration;
1238 
1239 	if (ch->irb->scsw.cmd.count != 0)
1240 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG,
1241 			"%s(%s): TX not complete, remaining %d bytes",
1242 			     CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count);
1243 	fsm_deltimer(&ch->timer);
1244 	while ((skb = skb_dequeue(&ch->io_queue))) {
1245 		priv->stats.tx_packets++;
1246 		priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH;
1247 		if (first) {
1248 			priv->stats.tx_bytes += 2;
1249 			first = 0;
1250 		}
1251 		atomic_dec(&skb->users);
1252 		dev_kfree_skb_irq(skb);
1253 	}
1254 	spin_lock(&ch->collect_lock);
1255 	clear_normalized_cda(&ch->ccw[4]);
1256 	if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) {
1257 		spin_unlock(&ch->collect_lock);
1258 		fsm_newstate(fi, CTC_STATE_TXIDLE);
1259 				goto done;
1260 	}
1261 
1262 	if (ctcm_checkalloc_buffer(ch)) {
1263 		spin_unlock(&ch->collect_lock);
1264 				goto done;
1265 	}
1266 	ch->trans_skb->data = ch->trans_skb_data;
1267 	skb_reset_tail_pointer(ch->trans_skb);
1268 	ch->trans_skb->len = 0;
1269 	if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH))
1270 		ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH;
1271 	if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue))
1272 		ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue);
1273 	i = 0;
1274 	p_header = NULL;
1275 	data_space = grp->group_max_buflen - TH_HEADER_LENGTH;
1276 
1277 	CTCM_PR_DBGDATA("%s: building trans_skb from collect_q"
1278 		       " data_space:%04x\n",
1279 		       __func__, data_space);
1280 
1281 	while ((skb = skb_dequeue(&ch->collect_queue))) {
1282 		memcpy(skb_put(ch->trans_skb, skb->len), skb->data, skb->len);
1283 		p_header = (struct pdu *)
1284 			(skb_tail_pointer(ch->trans_skb) - skb->len);
1285 		p_header->pdu_flag = 0x00;
1286 		if (be16_to_cpu(skb->protocol) == ETH_P_SNAP)
1287 			p_header->pdu_flag |= 0x60;
1288 		else
1289 			p_header->pdu_flag |= 0x20;
1290 
1291 		CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1292 				__func__, ch->trans_skb->len);
1293 		CTCM_PR_DBGDATA("%s: pdu header and data for up"
1294 				" to 32 bytes sent to vtam\n", __func__);
1295 		CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32));
1296 
1297 		ch->collect_len -= skb->len;
1298 		data_space -= skb->len;
1299 		priv->stats.tx_packets++;
1300 		priv->stats.tx_bytes += skb->len;
1301 		atomic_dec(&skb->users);
1302 		dev_kfree_skb_any(skb);
1303 		peekskb = skb_peek(&ch->collect_queue);
1304 		if (peekskb->len > data_space)
1305 			break;
1306 		i++;
1307 	}
1308 	/* p_header points to the last one we handled */
1309 	if (p_header)
1310 		p_header->pdu_flag |= PDU_LAST;	/*Say it's the last one*/
1311 	header = kzalloc(TH_HEADER_LENGTH, gfp_type());
1312 	if (!header) {
1313 		spin_unlock(&ch->collect_lock);
1314 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1315 				goto done;
1316 	}
1317 	header->th_ch_flag = TH_HAS_PDU;  /* Normal data */
1318 	ch->th_seq_num++;
1319 	header->th_seq_num = ch->th_seq_num;
1320 
1321 	CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" ,
1322 					__func__, ch->th_seq_num);
1323 
1324 	memcpy(skb_push(ch->trans_skb, TH_HEADER_LENGTH), header,
1325 		TH_HEADER_LENGTH);	/* put the TH on the packet */
1326 
1327 	kfree(header);
1328 
1329 	CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n",
1330 		       __func__, ch->trans_skb->len);
1331 	CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb "
1332 			"data to vtam from collect_q\n", __func__);
1333 	CTCM_D3_DUMP((char *)ch->trans_skb->data,
1334 				min_t(int, ch->trans_skb->len, 50));
1335 
1336 	spin_unlock(&ch->collect_lock);
1337 	clear_normalized_cda(&ch->ccw[1]);
1338 
1339 	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1340 			(void *)(unsigned long)ch->ccw[1].cda,
1341 			ch->trans_skb->data);
1342 	ch->ccw[1].count = ch->max_bufsize;
1343 
1344 	if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) {
1345 		dev_kfree_skb_any(ch->trans_skb);
1346 		ch->trans_skb = NULL;
1347 		CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR,
1348 			"%s: %s: IDAL alloc failed",
1349 				CTCM_FUNTAIL, ch->id);
1350 		fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1351 		return;
1352 	}
1353 
1354 	CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
1355 			(void *)(unsigned long)ch->ccw[1].cda,
1356 			ch->trans_skb->data);
1357 
1358 	ch->ccw[1].count = ch->trans_skb->len;
1359 	fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch);
1360 	ch->prof.send_stamp = jiffies;
1361 	if (do_debug_ccw)
1362 		ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1363 	rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1364 					(unsigned long)ch, 0xff, 0);
1365 	ch->prof.doios_multi++;
1366 	if (rc != 0) {
1367 		priv->stats.tx_dropped += i;
1368 		priv->stats.tx_errors += i;
1369 		fsm_deltimer(&ch->timer);
1370 		ctcm_ccw_check_rc(ch, rc, "chained TX");
1371 	}
1372 done:
1373 	ctcm_clear_busy(dev);
1374 	return;
1375 }
1376 
1377 /**
1378  * Got normal data, check for sanity, queue it up, allocate new buffer
1379  * trigger bottom half, and initiate next read.
1380  *
1381  * fi		An instance of a channel statemachine.
1382  * event	The event, just happened.
1383  * arg		Generic pointer, casted from channel * upon call.
1384  */
1385 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg)
1386 {
1387 	struct channel		*ch = arg;
1388 	struct net_device	*dev = ch->netdev;
1389 	struct ctcm_priv	*priv = dev->ml_priv;
1390 	struct mpc_group	*grp = priv->mpcg;
1391 	struct sk_buff		*skb = ch->trans_skb;
1392 	struct sk_buff		*new_skb;
1393 	unsigned long		saveflags = 0;	/* avoids compiler warning */
1394 	int len	= ch->max_bufsize - ch->irb->scsw.cmd.count;
1395 
1396 	CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n",
1397 			CTCM_FUNTAIL, dev->name, smp_processor_id(),
1398 				ch->id, ch->max_bufsize, len);
1399 	fsm_deltimer(&ch->timer);
1400 
1401 	if (skb == NULL) {
1402 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1403 			"%s(%s): TRANS_SKB = NULL",
1404 				CTCM_FUNTAIL, dev->name);
1405 			goto again;
1406 	}
1407 
1408 	if (len < TH_HEADER_LENGTH) {
1409 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1410 				"%s(%s): packet length %d to short",
1411 					CTCM_FUNTAIL, dev->name, len);
1412 		priv->stats.rx_dropped++;
1413 		priv->stats.rx_length_errors++;
1414 	} else {
1415 		/* must have valid th header or game over */
1416 		__u32	block_len = len;
1417 		len = TH_HEADER_LENGTH + XID2_LENGTH + 4;
1418 		new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC);
1419 
1420 		if (new_skb == NULL) {
1421 			CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1422 				"%s(%d): skb allocation failed",
1423 						CTCM_FUNTAIL, dev->name);
1424 			fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev);
1425 					goto again;
1426 		}
1427 		switch (fsm_getstate(grp->fsm)) {
1428 		case MPCG_STATE_RESET:
1429 		case MPCG_STATE_INOP:
1430 			dev_kfree_skb_any(new_skb);
1431 			break;
1432 		case MPCG_STATE_FLOWC:
1433 		case MPCG_STATE_READY:
1434 			memcpy(skb_put(new_skb, block_len),
1435 					       skb->data, block_len);
1436 			skb_queue_tail(&ch->io_queue, new_skb);
1437 			tasklet_schedule(&ch->ch_tasklet);
1438 			break;
1439 		default:
1440 			memcpy(skb_put(new_skb, len), skb->data, len);
1441 			skb_queue_tail(&ch->io_queue, new_skb);
1442 			tasklet_hi_schedule(&ch->ch_tasklet);
1443 			break;
1444 		}
1445 	}
1446 
1447 again:
1448 	switch (fsm_getstate(grp->fsm)) {
1449 	int rc, dolock;
1450 	case MPCG_STATE_FLOWC:
1451 	case MPCG_STATE_READY:
1452 		if (ctcm_checkalloc_buffer(ch))
1453 			break;
1454 		ch->trans_skb->data = ch->trans_skb_data;
1455 		skb_reset_tail_pointer(ch->trans_skb);
1456 		ch->trans_skb->len = 0;
1457 		ch->ccw[1].count = ch->max_bufsize;
1458 			if (do_debug_ccw)
1459 			ctcmpc_dumpit((char *)&ch->ccw[0],
1460 					sizeof(struct ccw1) * 3);
1461 		dolock = !in_irq();
1462 		if (dolock)
1463 			spin_lock_irqsave(
1464 				get_ccwdev_lock(ch->cdev), saveflags);
1465 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1466 						(unsigned long)ch, 0xff, 0);
1467 		if (dolock) /* see remark about conditional locking */
1468 			spin_unlock_irqrestore(
1469 				get_ccwdev_lock(ch->cdev), saveflags);
1470 		if (rc != 0)
1471 			ctcm_ccw_check_rc(ch, rc, "normal RX");
1472 	default:
1473 		break;
1474 	}
1475 
1476 	CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n",
1477 			__func__, dev->name, ch, ch->id);
1478 
1479 }
1480 
1481 /**
1482  * Initialize connection by sending a __u16 of value 0.
1483  *
1484  * fi		An instance of a channel statemachine.
1485  * event	The event, just happened.
1486  * arg		Generic pointer, casted from channel * upon call.
1487  */
1488 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg)
1489 {
1490 	struct channel		*ch = arg;
1491 	struct net_device	*dev = ch->netdev;
1492 	struct ctcm_priv	*priv = dev->ml_priv;
1493 	struct mpc_group	*gptr = priv->mpcg;
1494 
1495 	CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n",
1496 				__func__, ch->id, ch);
1497 
1498 	CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO,
1499 			"%s: %s: chstate:%i, grpstate:%i, prot:%i\n",
1500 			CTCM_FUNTAIL, ch->id, fsm_getstate(fi),
1501 			fsm_getstate(gptr->fsm), ch->protocol);
1502 
1503 	if (fsm_getstate(fi) == CTC_STATE_TXIDLE)
1504 		MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? ");
1505 
1506 	fsm_deltimer(&ch->timer);
1507 	if (ctcm_checkalloc_buffer(ch))
1508 				goto done;
1509 
1510 	switch (fsm_getstate(fi)) {
1511 	case CTC_STATE_STARTRETRY:
1512 	case CTC_STATE_SETUPWAIT:
1513 		if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) {
1514 			ctcmpc_chx_rxidle(fi, event, arg);
1515 		} else {
1516 			fsm_newstate(fi, CTC_STATE_TXIDLE);
1517 			fsm_event(priv->fsm, DEV_EVENT_TXUP, dev);
1518 		}
1519 				goto done;
1520 	default:
1521 		break;
1522 	}
1523 
1524 	fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)
1525 		     ? CTC_STATE_RXINIT : CTC_STATE_TXINIT);
1526 
1527 done:
1528 	CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n",
1529 				__func__, ch->id, ch);
1530 	return;
1531 }
1532 
1533 /**
1534  * Got initial data, check it. If OK,
1535  * notify device statemachine that we are up and
1536  * running.
1537  *
1538  * fi		An instance of a channel statemachine.
1539  * event	The event, just happened.
1540  * arg		Generic pointer, casted from channel * upon call.
1541  */
1542 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg)
1543 {
1544 	struct channel *ch = arg;
1545 	struct net_device *dev = ch->netdev;
1546 	struct ctcm_priv  *priv = dev->ml_priv;
1547 	struct mpc_group  *grp = priv->mpcg;
1548 	int rc;
1549 	unsigned long saveflags = 0;	/* avoids compiler warning */
1550 
1551 	fsm_deltimer(&ch->timer);
1552 	CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n",
1553 			__func__, ch->id, dev->name, smp_processor_id(),
1554 				fsm_getstate(fi), fsm_getstate(grp->fsm));
1555 
1556 	fsm_newstate(fi, CTC_STATE_RXIDLE);
1557 	/* XID processing complete */
1558 
1559 	switch (fsm_getstate(grp->fsm)) {
1560 	case MPCG_STATE_FLOWC:
1561 	case MPCG_STATE_READY:
1562 		if (ctcm_checkalloc_buffer(ch))
1563 				goto done;
1564 		ch->trans_skb->data = ch->trans_skb_data;
1565 		skb_reset_tail_pointer(ch->trans_skb);
1566 		ch->trans_skb->len = 0;
1567 		ch->ccw[1].count = ch->max_bufsize;
1568 		CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3);
1569 		if (event == CTC_EVENT_START)
1570 			/* see remark about conditional locking */
1571 			spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags);
1572 		rc = ccw_device_start(ch->cdev, &ch->ccw[0],
1573 						(unsigned long)ch, 0xff, 0);
1574 		if (event == CTC_EVENT_START)
1575 			spin_unlock_irqrestore(
1576 					get_ccwdev_lock(ch->cdev), saveflags);
1577 		if (rc != 0) {
1578 			fsm_newstate(fi, CTC_STATE_RXINIT);
1579 			ctcm_ccw_check_rc(ch, rc, "initial RX");
1580 				goto done;
1581 		}
1582 		break;
1583 	default:
1584 		break;
1585 	}
1586 
1587 	fsm_event(priv->fsm, DEV_EVENT_RXUP, dev);
1588 done:
1589 	return;
1590 }
1591 
1592 /*
1593  * ctcmpc channel FSM action
1594  * called from several points in ctcmpc_ch_fsm
1595  * ctcmpc only
1596  */
1597 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg)
1598 {
1599 	struct channel	  *ch     = arg;
1600 	struct net_device *dev    = ch->netdev;
1601 	struct ctcm_priv  *priv   = dev->ml_priv;
1602 	struct mpc_group  *grp = priv->mpcg;
1603 
1604 	CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n",
1605 		__func__, dev->name, ch->id, ch, smp_processor_id(),
1606 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1607 
1608 	switch (fsm_getstate(grp->fsm)) {
1609 	case MPCG_STATE_XID2INITW:
1610 		/* ok..start yside xid exchanges */
1611 		if (!ch->in_mpcgroup)
1612 			break;
1613 		if (fsm_getstate(ch->fsm) ==  CH_XID0_PENDING) {
1614 			fsm_deltimer(&grp->timer);
1615 			fsm_addtimer(&grp->timer,
1616 				MPC_XID_TIMEOUT_VALUE,
1617 				MPCG_EVENT_TIMER, dev);
1618 			fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1619 
1620 		} else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1621 			/* attn rcvd before xid0 processed via bh */
1622 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1623 		break;
1624 	case MPCG_STATE_XID2INITX:
1625 	case MPCG_STATE_XID0IOWAIT:
1626 	case MPCG_STATE_XID0IOWAIX:
1627 		/* attn rcvd before xid0 processed on ch
1628 		but mid-xid0 processing for group    */
1629 		if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1)
1630 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1631 		break;
1632 	case MPCG_STATE_XID7INITW:
1633 	case MPCG_STATE_XID7INITX:
1634 	case MPCG_STATE_XID7INITI:
1635 	case MPCG_STATE_XID7INITZ:
1636 		switch (fsm_getstate(ch->fsm)) {
1637 		case CH_XID7_PENDING:
1638 			fsm_newstate(ch->fsm, CH_XID7_PENDING1);
1639 			break;
1640 		case CH_XID7_PENDING2:
1641 			fsm_newstate(ch->fsm, CH_XID7_PENDING3);
1642 			break;
1643 		}
1644 		fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev);
1645 		break;
1646 	}
1647 
1648 	return;
1649 }
1650 
1651 /*
1652  * ctcmpc channel FSM action
1653  * called from one point in ctcmpc_ch_fsm
1654  * ctcmpc only
1655  */
1656 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg)
1657 {
1658 	struct channel	  *ch     = arg;
1659 	struct net_device *dev    = ch->netdev;
1660 	struct ctcm_priv  *priv   = dev->ml_priv;
1661 	struct mpc_group  *grp    = priv->mpcg;
1662 
1663 	CTCM_PR_DEBUG("%s(%s): %s\n  ChState:%s GrpState:%s\n",
1664 			__func__, dev->name, ch->id,
1665 			fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm));
1666 
1667 	fsm_deltimer(&ch->timer);
1668 
1669 	switch (fsm_getstate(grp->fsm)) {
1670 	case MPCG_STATE_XID0IOWAIT:
1671 		/* vtam wants to be primary.start yside xid exchanges*/
1672 		/* only receive one attn-busy at a time so must not  */
1673 		/* change state each time			     */
1674 		grp->changed_side = 1;
1675 		fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW);
1676 		break;
1677 	case MPCG_STATE_XID2INITW:
1678 		if (grp->changed_side == 1) {
1679 			grp->changed_side = 2;
1680 			break;
1681 		}
1682 		/* process began via call to establish_conn	 */
1683 		/* so must report failure instead of reverting	 */
1684 		/* back to ready-for-xid passive state		 */
1685 		if (grp->estconnfunc)
1686 				goto done;
1687 		/* this attnbusy is NOT the result of xside xid  */
1688 		/* collisions so yside must have been triggered  */
1689 		/* by an ATTN that was not intended to start XID */
1690 		/* processing. Revert back to ready-for-xid and  */
1691 		/* wait for ATTN interrupt to signal xid start	 */
1692 		if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) {
1693 			fsm_newstate(ch->fsm, CH_XID0_PENDING) ;
1694 			fsm_deltimer(&grp->timer);
1695 				goto done;
1696 		}
1697 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1698 				goto done;
1699 	case MPCG_STATE_XID2INITX:
1700 		/* XID2 was received before ATTN Busy for second
1701 		   channel.Send yside xid for second channel.
1702 		*/
1703 		if (grp->changed_side == 1) {
1704 			grp->changed_side = 2;
1705 			break;
1706 		}
1707 	case MPCG_STATE_XID0IOWAIX:
1708 	case MPCG_STATE_XID7INITW:
1709 	case MPCG_STATE_XID7INITX:
1710 	case MPCG_STATE_XID7INITI:
1711 	case MPCG_STATE_XID7INITZ:
1712 	default:
1713 		/* multiple attn-busy indicates too out-of-sync      */
1714 		/* and they are certainly not being received as part */
1715 		/* of valid mpc group negotiations..		     */
1716 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1717 				goto done;
1718 	}
1719 
1720 	if (grp->changed_side == 1) {
1721 		fsm_deltimer(&grp->timer);
1722 		fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE,
1723 			     MPCG_EVENT_TIMER, dev);
1724 	}
1725 	if (ch->in_mpcgroup)
1726 		fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1727 	else
1728 		CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR,
1729 			"%s(%s): channel %s not added to group",
1730 				CTCM_FUNTAIL, dev->name, ch->id);
1731 
1732 done:
1733 	return;
1734 }
1735 
1736 /*
1737  * ctcmpc channel FSM action
1738  * called from several points in ctcmpc_ch_fsm
1739  * ctcmpc only
1740  */
1741 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg)
1742 {
1743 	struct channel	   *ch	   = arg;
1744 	struct net_device  *dev    = ch->netdev;
1745 	struct ctcm_priv   *priv   = dev->ml_priv;
1746 	struct mpc_group   *grp    = priv->mpcg;
1747 
1748 	fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch);
1749 	return;
1750 }
1751 
1752 /*
1753  * ctcmpc channel FSM action
1754  * called from several points in ctcmpc_ch_fsm
1755  * ctcmpc only
1756  */
1757 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg)
1758 {
1759 	struct channel *ach = arg;
1760 	struct net_device *dev = ach->netdev;
1761 	struct ctcm_priv *priv = dev->ml_priv;
1762 	struct mpc_group *grp = priv->mpcg;
1763 	struct channel *wch = priv->channel[CTCM_WRITE];
1764 	struct channel *rch = priv->channel[CTCM_READ];
1765 	struct sk_buff *skb;
1766 	struct th_sweep *header;
1767 	int rc = 0;
1768 	unsigned long saveflags = 0;
1769 
1770 	CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n",
1771 			__func__, smp_processor_id(), ach, ach->id);
1772 
1773 	if (grp->in_sweep == 0)
1774 				goto done;
1775 
1776 	CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" ,
1777 				__func__, wch->th_seq_num);
1778 	CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" ,
1779 				__func__, rch->th_seq_num);
1780 
1781 	if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) {
1782 		/* give the previous IO time to complete */
1783 		fsm_addtimer(&wch->sweep_timer,
1784 			200, CTC_EVENT_RSWEEP_TIMER, wch);
1785 				goto done;
1786 	}
1787 
1788 	skb = skb_dequeue(&wch->sweep_queue);
1789 	if (!skb)
1790 				goto done;
1791 
1792 	if (set_normalized_cda(&wch->ccw[4], skb->data)) {
1793 		grp->in_sweep = 0;
1794 		ctcm_clear_busy_do(dev);
1795 		dev_kfree_skb_any(skb);
1796 		fsm_event(grp->fsm, MPCG_EVENT_INOP, dev);
1797 				goto done;
1798 	} else {
1799 		atomic_inc(&skb->users);
1800 		skb_queue_tail(&wch->io_queue, skb);
1801 	}
1802 
1803 	/* send out the sweep */
1804 	wch->ccw[4].count = skb->len;
1805 
1806 	header = (struct th_sweep *)skb->data;
1807 	switch (header->th.th_ch_flag) {
1808 	case TH_SWEEP_REQ:
1809 		grp->sweep_req_pend_num--;
1810 		break;
1811 	case TH_SWEEP_RESP:
1812 		grp->sweep_rsp_pend_num--;
1813 		break;
1814 	}
1815 
1816 	header->sw.th_last_seq = wch->th_seq_num;
1817 
1818 	CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3);
1819 	CTCM_PR_DBGDATA("%s: sweep packet\n", __func__);
1820 	CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH);
1821 
1822 	fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch);
1823 	fsm_newstate(wch->fsm, CTC_STATE_TX);
1824 
1825 	spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags);
1826 	wch->prof.send_stamp = jiffies;
1827 	rc = ccw_device_start(wch->cdev, &wch->ccw[3],
1828 					(unsigned long) wch, 0xff, 0);
1829 	spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags);
1830 
1831 	if ((grp->sweep_req_pend_num == 0) &&
1832 	   (grp->sweep_rsp_pend_num == 0)) {
1833 		grp->in_sweep = 0;
1834 		rch->th_seq_num = 0x00;
1835 		wch->th_seq_num = 0x00;
1836 		ctcm_clear_busy_do(dev);
1837 	}
1838 
1839 	CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" ,
1840 			__func__, wch->th_seq_num, rch->th_seq_num);
1841 
1842 	if (rc != 0)
1843 		ctcm_ccw_check_rc(wch, rc, "send sweep");
1844 
1845 done:
1846 	return;
1847 }
1848 
1849 
1850 /*
1851  * The ctcmpc statemachine for a channel.
1852  */
1853 
1854 const fsm_node ctcmpc_ch_fsm[] = {
1855 	{ CTC_STATE_STOPPED,	CTC_EVENT_STOP,		ctcm_action_nop  },
1856 	{ CTC_STATE_STOPPED,	CTC_EVENT_START,	ctcm_chx_start  },
1857 	{ CTC_STATE_STOPPED,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1858 	{ CTC_STATE_STOPPED,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1859 	{ CTC_STATE_STOPPED,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1860 
1861 	{ CTC_STATE_NOTOP,	CTC_EVENT_STOP,		ctcm_chx_stop  },
1862 	{ CTC_STATE_NOTOP,	CTC_EVENT_START,	ctcm_action_nop  },
1863 	{ CTC_STATE_NOTOP,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1864 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_FAIL,	ctcm_action_nop  },
1865 	{ CTC_STATE_NOTOP,	CTC_EVENT_MC_GOOD,	ctcm_chx_start  },
1866 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RCRESET,	ctcm_chx_stop  },
1867 	{ CTC_STATE_NOTOP,	CTC_EVENT_UC_RSRESET,	ctcm_chx_stop  },
1868 	{ CTC_STATE_NOTOP,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1869 
1870 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1871 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1872 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1873 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setuperr  },
1874 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1875 	{ CTC_STATE_STARTWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1876 
1877 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1878 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1879 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
1880 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1881 	{ CTC_STATE_STARTRETRY,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1882 
1883 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1884 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_START,	ctcm_action_nop  },
1885 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
1886 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1887 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1888 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_TIMER,	ctcm_chx_setmode  },
1889 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1890 	{ CTC_STATE_SETUPWAIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1891 
1892 	{ CTC_STATE_RXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1893 	{ CTC_STATE_RXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
1894 	{ CTC_STATE_RXINIT,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rxidle  },
1895 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxiniterr  },
1896 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_rxiniterr  },
1897 	{ CTC_STATE_RXINIT,	CTC_EVENT_TIMER,	ctcm_chx_rxiniterr  },
1898 	{ CTC_STATE_RXINIT,	CTC_EVENT_ATTNBUSY,	ctcm_chx_rxinitfail  },
1899 	{ CTC_STATE_RXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1900 	{ CTC_STATE_RXINIT,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_firstio  },
1901 	{ CTC_STATE_RXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1902 
1903 	{ CH_XID0_PENDING,	CTC_EVENT_FINSTAT,	ctcm_action_nop  },
1904 	{ CH_XID0_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1905 	{ CH_XID0_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1906 	{ CH_XID0_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1907 	{ CH_XID0_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1908 	{ CH_XID0_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1909 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1910 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1911 	{ CH_XID0_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1912 	{ CH_XID0_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1913 
1914 	{ CH_XID0_INPROGRESS,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1915 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1916 	{ CH_XID0_INPROGRESS,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1917 	{ CH_XID0_INPROGRESS,	CTC_EVENT_START,	ctcm_action_nop  },
1918 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1919 	{ CH_XID0_INPROGRESS,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1920 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1921 	{ CH_XID0_INPROGRESS,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr },
1922 	{ CH_XID0_INPROGRESS,	CTC_EVENT_ATTNBUSY,	ctcmpc_chx_attnbusy  },
1923 	{ CH_XID0_INPROGRESS,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1924 	{ CH_XID0_INPROGRESS,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1925 
1926 	{ CH_XID7_PENDING,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1927 	{ CH_XID7_PENDING,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1928 	{ CH_XID7_PENDING,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1929 	{ CH_XID7_PENDING,	CTC_EVENT_START,	ctcm_action_nop  },
1930 	{ CH_XID7_PENDING,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1931 	{ CH_XID7_PENDING,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1932 	{ CH_XID7_PENDING,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1933 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1934 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1935 	{ CH_XID7_PENDING,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1936 	{ CH_XID7_PENDING,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1937 	{ CH_XID7_PENDING,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1938 	{ CH_XID7_PENDING,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1939 
1940 	{ CH_XID7_PENDING1,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1941 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1942 	{ CH_XID7_PENDING1,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1943 	{ CH_XID7_PENDING1,	CTC_EVENT_START,	ctcm_action_nop  },
1944 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1945 	{ CH_XID7_PENDING1,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1946 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1947 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1948 	{ CH_XID7_PENDING1,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1949 	{ CH_XID7_PENDING1,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1950 	{ CH_XID7_PENDING1,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1951 	{ CH_XID7_PENDING1,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1952 
1953 	{ CH_XID7_PENDING2,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1954 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1955 	{ CH_XID7_PENDING2,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1956 	{ CH_XID7_PENDING2,	CTC_EVENT_START,	ctcm_action_nop  },
1957 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1958 	{ CH_XID7_PENDING2,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1959 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1960 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1961 	{ CH_XID7_PENDING2,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1962 	{ CH_XID7_PENDING2,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1963 	{ CH_XID7_PENDING2,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1964 	{ CH_XID7_PENDING2,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1965 
1966 	{ CH_XID7_PENDING3,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1967 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1968 	{ CH_XID7_PENDING3,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1969 	{ CH_XID7_PENDING3,	CTC_EVENT_START,	ctcm_action_nop  },
1970 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1971 	{ CH_XID7_PENDING3,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1972 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1973 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1974 	{ CH_XID7_PENDING3,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1975 	{ CH_XID7_PENDING3,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1976 	{ CH_XID7_PENDING3,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1977 	{ CH_XID7_PENDING3,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1978 
1979 	{ CH_XID7_PENDING4,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1980 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTN,		ctcmpc_chx_attn  },
1981 	{ CH_XID7_PENDING4,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1982 	{ CH_XID7_PENDING4,	CTC_EVENT_START,	ctcm_action_nop  },
1983 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1984 	{ CH_XID7_PENDING4,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1985 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
1986 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RCRESET,	ctcm_chx_setuperr  },
1987 	{ CH_XID7_PENDING4,	CTC_EVENT_UC_RSRESET,	ctcm_chx_setuperr  },
1988 	{ CH_XID7_PENDING4,	CTC_EVENT_ATTNBUSY,	ctcm_chx_iofatal  },
1989 	{ CH_XID7_PENDING4,	CTC_EVENT_TIMER,	ctcmpc_chx_resend  },
1990 	{ CH_XID7_PENDING4,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
1991 
1992 	{ CTC_STATE_RXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
1993 	{ CTC_STATE_RXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
1994 	{ CTC_STATE_RXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_rx  },
1995 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_rxdisc  },
1996 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
1997 	{ CTC_STATE_RXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
1998 	{ CTC_STATE_RXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
1999 	{ CTC_STATE_RXIDLE,	CTC_EVENT_UC_ZERO,	ctcmpc_chx_rx  },
2000 
2001 	{ CTC_STATE_TXINIT,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2002 	{ CTC_STATE_TXINIT,	CTC_EVENT_START,	ctcm_action_nop  },
2003 	{ CTC_STATE_TXINIT,	CTC_EVENT_FINSTAT,	ctcm_chx_txidle  },
2004 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RCRESET,	ctcm_chx_txiniterr  },
2005 	{ CTC_STATE_TXINIT,	CTC_EVENT_UC_RSRESET,	ctcm_chx_txiniterr  },
2006 	{ CTC_STATE_TXINIT,	CTC_EVENT_TIMER,	ctcm_chx_txiniterr  },
2007 	{ CTC_STATE_TXINIT,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2008 	{ CTC_STATE_TXINIT,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2009 	{ CTC_STATE_TXINIT,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2010 
2011 	{ CTC_STATE_TXIDLE,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2012 	{ CTC_STATE_TXIDLE,	CTC_EVENT_START,	ctcm_action_nop  },
2013 	{ CTC_STATE_TXIDLE,	CTC_EVENT_FINSTAT,	ctcmpc_chx_firstio  },
2014 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2015 	{ CTC_STATE_TXIDLE,	CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2016 	{ CTC_STATE_TXIDLE,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2017 	{ CTC_STATE_TXIDLE,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2018 	{ CTC_STATE_TXIDLE,	CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2019 
2020 	{ CTC_STATE_TERM,	CTC_EVENT_STOP,		ctcm_action_nop  },
2021 	{ CTC_STATE_TERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2022 	{ CTC_STATE_TERM,	CTC_EVENT_FINSTAT,	ctcm_chx_stopped  },
2023 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2024 	{ CTC_STATE_TERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2025 	{ CTC_STATE_TERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2026 	{ CTC_STATE_TERM,	CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2027 	{ CTC_STATE_TERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2028 
2029 	{ CTC_STATE_DTERM,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2030 	{ CTC_STATE_DTERM,	CTC_EVENT_START,	ctcm_chx_restart  },
2031 	{ CTC_STATE_DTERM,	CTC_EVENT_FINSTAT,	ctcm_chx_setmode  },
2032 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RCRESET,	ctcm_action_nop  },
2033 	{ CTC_STATE_DTERM,	CTC_EVENT_UC_RSRESET,	ctcm_action_nop  },
2034 	{ CTC_STATE_DTERM,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2035 	{ CTC_STATE_DTERM,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2036 
2037 	{ CTC_STATE_TX,		CTC_EVENT_STOP,		ctcm_chx_haltio  },
2038 	{ CTC_STATE_TX,		CTC_EVENT_START,	ctcm_action_nop  },
2039 	{ CTC_STATE_TX,		CTC_EVENT_FINSTAT,	ctcmpc_chx_txdone  },
2040 	{ CTC_STATE_TX,		CTC_EVENT_UC_RCRESET,	ctcm_chx_fail  },
2041 	{ CTC_STATE_TX,		CTC_EVENT_UC_RSRESET,	ctcm_chx_fail  },
2042 	{ CTC_STATE_TX,		CTC_EVENT_TIMER,	ctcm_chx_txretry  },
2043 	{ CTC_STATE_TX,		CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2044 	{ CTC_STATE_TX,		CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2045 	{ CTC_STATE_TX,		CTC_EVENT_RSWEEP_TIMER,	ctcmpc_chx_send_sweep },
2046 	{ CTC_STATE_TX,		CTC_EVENT_IO_EBUSY,	ctcm_chx_fail  },
2047 
2048 	{ CTC_STATE_RXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2049 	{ CTC_STATE_TXERR,	CTC_EVENT_STOP,		ctcm_chx_haltio  },
2050 	{ CTC_STATE_TXERR,	CTC_EVENT_IO_ENODEV,	ctcm_chx_iofatal  },
2051 	{ CTC_STATE_TXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2052 	{ CTC_STATE_RXERR,	CTC_EVENT_MC_FAIL,	ctcm_chx_fail  },
2053 };
2054 
2055 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm);
2056 
2057 /*
2058  * Actions for interface - statemachine.
2059  */
2060 
2061 /**
2062  * Startup channels by sending CTC_EVENT_START to each channel.
2063  *
2064  * fi		An instance of an interface statemachine.
2065  * event	The event, just happened.
2066  * arg		Generic pointer, casted from struct net_device * upon call.
2067  */
2068 static void dev_action_start(fsm_instance *fi, int event, void *arg)
2069 {
2070 	struct net_device *dev = arg;
2071 	struct ctcm_priv *priv = dev->ml_priv;
2072 	int direction;
2073 
2074 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2075 
2076 	fsm_deltimer(&priv->restart_timer);
2077 	fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2078 	if (IS_MPC(priv))
2079 		priv->mpcg->channels_terminating = 0;
2080 	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2081 		struct channel *ch = priv->channel[direction];
2082 		fsm_event(ch->fsm, CTC_EVENT_START, ch);
2083 	}
2084 }
2085 
2086 /**
2087  * Shutdown channels by sending CTC_EVENT_STOP to each channel.
2088  *
2089  * fi		An instance of an interface statemachine.
2090  * event	The event, just happened.
2091  * arg		Generic pointer, casted from struct net_device * upon call.
2092  */
2093 static void dev_action_stop(fsm_instance *fi, int event, void *arg)
2094 {
2095 	int direction;
2096 	struct net_device *dev = arg;
2097 	struct ctcm_priv *priv = dev->ml_priv;
2098 
2099 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2100 
2101 	fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2102 	for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) {
2103 		struct channel *ch = priv->channel[direction];
2104 		fsm_event(ch->fsm, CTC_EVENT_STOP, ch);
2105 		ch->th_seq_num = 0x00;
2106 		CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n",
2107 				__func__, ch->th_seq_num);
2108 	}
2109 	if (IS_MPC(priv))
2110 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2111 }
2112 
2113 static void dev_action_restart(fsm_instance *fi, int event, void *arg)
2114 {
2115 	int restart_timer;
2116 	struct net_device *dev = arg;
2117 	struct ctcm_priv *priv = dev->ml_priv;
2118 
2119 	CTCMY_DBF_DEV_NAME(TRACE, dev, "");
2120 
2121 	if (IS_MPC(priv)) {
2122 		restart_timer = CTCM_TIME_1_SEC;
2123 	} else {
2124 		restart_timer = CTCM_TIME_5_SEC;
2125 	}
2126 	dev_info(&dev->dev, "Restarting device\n");
2127 
2128 	dev_action_stop(fi, event, arg);
2129 	fsm_event(priv->fsm, DEV_EVENT_STOP, dev);
2130 	if (IS_MPC(priv))
2131 		fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET);
2132 
2133 	/* going back into start sequence too quickly can	  */
2134 	/* result in the other side becoming unreachable   due	  */
2135 	/* to sense reported when IO is aborted			  */
2136 	fsm_addtimer(&priv->restart_timer, restart_timer,
2137 			DEV_EVENT_START, dev);
2138 }
2139 
2140 /**
2141  * Called from channel statemachine
2142  * when a channel is up and running.
2143  *
2144  * fi		An instance of an interface statemachine.
2145  * event	The event, just happened.
2146  * arg		Generic pointer, casted from struct net_device * upon call.
2147  */
2148 static void dev_action_chup(fsm_instance *fi, int event, void *arg)
2149 {
2150 	struct net_device *dev = arg;
2151 	struct ctcm_priv *priv = dev->ml_priv;
2152 	int dev_stat = fsm_getstate(fi);
2153 
2154 	CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE,
2155 			"%s(%s): priv = %p [%d,%d]\n ",	CTCM_FUNTAIL,
2156 				dev->name, dev->ml_priv, dev_stat, event);
2157 
2158 	switch (fsm_getstate(fi)) {
2159 	case DEV_STATE_STARTWAIT_RXTX:
2160 		if (event == DEV_EVENT_RXUP)
2161 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2162 		else
2163 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2164 		break;
2165 	case DEV_STATE_STARTWAIT_RX:
2166 		if (event == DEV_EVENT_RXUP) {
2167 			fsm_newstate(fi, DEV_STATE_RUNNING);
2168 			dev_info(&dev->dev,
2169 				"Connected with remote side\n");
2170 			ctcm_clear_busy(dev);
2171 		}
2172 		break;
2173 	case DEV_STATE_STARTWAIT_TX:
2174 		if (event == DEV_EVENT_TXUP) {
2175 			fsm_newstate(fi, DEV_STATE_RUNNING);
2176 			dev_info(&dev->dev,
2177 				"Connected with remote side\n");
2178 			ctcm_clear_busy(dev);
2179 		}
2180 		break;
2181 	case DEV_STATE_STOPWAIT_TX:
2182 		if (event == DEV_EVENT_RXUP)
2183 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2184 		break;
2185 	case DEV_STATE_STOPWAIT_RX:
2186 		if (event == DEV_EVENT_TXUP)
2187 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
2188 		break;
2189 	}
2190 
2191 	if (IS_MPC(priv)) {
2192 		if (event == DEV_EVENT_RXUP)
2193 			mpc_channel_action(priv->channel[CTCM_READ],
2194 				CTCM_READ, MPC_CHANNEL_ADD);
2195 		else
2196 			mpc_channel_action(priv->channel[CTCM_WRITE],
2197 				CTCM_WRITE, MPC_CHANNEL_ADD);
2198 	}
2199 }
2200 
2201 /**
2202  * Called from device statemachine
2203  * when a channel has been shutdown.
2204  *
2205  * fi		An instance of an interface statemachine.
2206  * event	The event, just happened.
2207  * arg		Generic pointer, casted from struct net_device * upon call.
2208  */
2209 static void dev_action_chdown(fsm_instance *fi, int event, void *arg)
2210 {
2211 
2212 	struct net_device *dev = arg;
2213 	struct ctcm_priv *priv = dev->ml_priv;
2214 
2215 	CTCMY_DBF_DEV_NAME(SETUP, dev, "");
2216 
2217 	switch (fsm_getstate(fi)) {
2218 	case DEV_STATE_RUNNING:
2219 		if (event == DEV_EVENT_TXDOWN)
2220 			fsm_newstate(fi, DEV_STATE_STARTWAIT_TX);
2221 		else
2222 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RX);
2223 		break;
2224 	case DEV_STATE_STARTWAIT_RX:
2225 		if (event == DEV_EVENT_TXDOWN)
2226 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2227 		break;
2228 	case DEV_STATE_STARTWAIT_TX:
2229 		if (event == DEV_EVENT_RXDOWN)
2230 			fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
2231 		break;
2232 	case DEV_STATE_STOPWAIT_RXTX:
2233 		if (event == DEV_EVENT_TXDOWN)
2234 			fsm_newstate(fi, DEV_STATE_STOPWAIT_RX);
2235 		else
2236 			fsm_newstate(fi, DEV_STATE_STOPWAIT_TX);
2237 		break;
2238 	case DEV_STATE_STOPWAIT_RX:
2239 		if (event == DEV_EVENT_RXDOWN)
2240 			fsm_newstate(fi, DEV_STATE_STOPPED);
2241 		break;
2242 	case DEV_STATE_STOPWAIT_TX:
2243 		if (event == DEV_EVENT_TXDOWN)
2244 			fsm_newstate(fi, DEV_STATE_STOPPED);
2245 		break;
2246 	}
2247 	if (IS_MPC(priv)) {
2248 		if (event == DEV_EVENT_RXDOWN)
2249 			mpc_channel_action(priv->channel[CTCM_READ],
2250 				CTCM_READ, MPC_CHANNEL_REMOVE);
2251 		else
2252 			mpc_channel_action(priv->channel[CTCM_WRITE],
2253 				CTCM_WRITE, MPC_CHANNEL_REMOVE);
2254 	}
2255 }
2256 
2257 const fsm_node dev_fsm[] = {
2258 	{ DEV_STATE_STOPPED,        DEV_EVENT_START,   dev_action_start   },
2259 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_START,   dev_action_start   },
2260 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RXDOWN,  dev_action_chdown  },
2261 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_TXDOWN,  dev_action_chdown  },
2262 	{ DEV_STATE_STOPWAIT_RXTX,  DEV_EVENT_RESTART, dev_action_restart },
2263 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_START,   dev_action_start   },
2264 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXUP,    dev_action_chup    },
2265 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_TXUP,    dev_action_chup    },
2266 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RXDOWN,  dev_action_chdown  },
2267 	{ DEV_STATE_STOPWAIT_RX,    DEV_EVENT_RESTART, dev_action_restart },
2268 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_START,   dev_action_start   },
2269 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RXUP,    dev_action_chup    },
2270 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXUP,    dev_action_chup    },
2271 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_TXDOWN,  dev_action_chdown  },
2272 	{ DEV_STATE_STOPWAIT_TX,    DEV_EVENT_RESTART, dev_action_restart },
2273 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP,    dev_action_stop    },
2274 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP,    dev_action_chup    },
2275 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP,    dev_action_chup    },
2276 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN,  dev_action_chdown  },
2277 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN,  dev_action_chdown  },
2278 	{ DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart },
2279 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_STOP,    dev_action_stop    },
2280 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXUP,    dev_action_chup    },
2281 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_TXUP,    dev_action_chup    },
2282 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RXDOWN,  dev_action_chdown  },
2283 	{ DEV_STATE_STARTWAIT_TX,   DEV_EVENT_RESTART, dev_action_restart },
2284 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_STOP,    dev_action_stop    },
2285 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RXUP,    dev_action_chup    },
2286 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXUP,    dev_action_chup    },
2287 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_TXDOWN,  dev_action_chdown  },
2288 	{ DEV_STATE_STARTWAIT_RX,   DEV_EVENT_RESTART, dev_action_restart },
2289 	{ DEV_STATE_RUNNING,        DEV_EVENT_STOP,    dev_action_stop    },
2290 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXDOWN,  dev_action_chdown  },
2291 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXDOWN,  dev_action_chdown  },
2292 	{ DEV_STATE_RUNNING,        DEV_EVENT_TXUP,    ctcm_action_nop    },
2293 	{ DEV_STATE_RUNNING,        DEV_EVENT_RXUP,    ctcm_action_nop    },
2294 	{ DEV_STATE_RUNNING,        DEV_EVENT_RESTART, dev_action_restart },
2295 };
2296 
2297 int dev_fsm_len = ARRAY_SIZE(dev_fsm);
2298 
2299 /* --- This is the END my friend --- */
2300 
2301