1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2001, 2007 4 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * MPC additions : 7 * Belinda Thompson (belindat@us.ibm.com) 8 * Andy Richter (richtera@us.ibm.com) 9 */ 10 11 #undef DEBUG 12 #undef DEBUGDATA 13 #undef DEBUGCCW 14 15 #define pr_fmt(fmt) "ctcm: " fmt 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/slab.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/interrupt.h> 24 #include <linux/timer.h> 25 #include <linux/bitops.h> 26 27 #include <linux/signal.h> 28 #include <linux/string.h> 29 30 #include <linux/ip.h> 31 #include <linux/if_arp.h> 32 #include <linux/tcp.h> 33 #include <linux/skbuff.h> 34 #include <linux/ctype.h> 35 #include <net/dst.h> 36 37 #include <linux/io.h> 38 #include <asm/ccwdev.h> 39 #include <asm/ccwgroup.h> 40 #include <linux/uaccess.h> 41 42 #include <asm/idals.h> 43 44 #include "fsm.h" 45 46 #include "ctcm_dbug.h" 47 #include "ctcm_main.h" 48 #include "ctcm_fsms.h" 49 50 const char *dev_state_names[] = { 51 [DEV_STATE_STOPPED] = "Stopped", 52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 58 [DEV_STATE_RUNNING] = "Running", 59 }; 60 61 const char *dev_event_names[] = { 62 [DEV_EVENT_START] = "Start", 63 [DEV_EVENT_STOP] = "Stop", 64 [DEV_EVENT_RXUP] = "RX up", 65 [DEV_EVENT_TXUP] = "TX up", 66 [DEV_EVENT_RXDOWN] = "RX down", 67 [DEV_EVENT_TXDOWN] = "TX down", 68 [DEV_EVENT_RESTART] = "Restart", 69 }; 70 71 const char *ctc_ch_event_names[] = { 72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 77 [CTC_EVENT_ATTN] = "Status ATTN", 78 [CTC_EVENT_BUSY] = "Status BUSY", 79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 88 [CTC_EVENT_MC_FAIL] = "Machine check failure", 89 [CTC_EVENT_MC_GOOD] = "Machine check operational", 90 [CTC_EVENT_IRQ] = "IRQ normal", 91 [CTC_EVENT_FINSTAT] = "IRQ final", 92 [CTC_EVENT_TIMER] = "Timer", 93 [CTC_EVENT_START] = "Start", 94 [CTC_EVENT_STOP] = "Stop", 95 /* 96 * additional MPC events 97 */ 98 [CTC_EVENT_SEND_XID] = "XID Exchange", 99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 100 }; 101 102 const char *ctc_ch_state_names[] = { 103 [CTC_STATE_IDLE] = "Idle", 104 [CTC_STATE_STOPPED] = "Stopped", 105 [CTC_STATE_STARTWAIT] = "StartWait", 106 [CTC_STATE_STARTRETRY] = "StartRetry", 107 [CTC_STATE_SETUPWAIT] = "SetupWait", 108 [CTC_STATE_RXINIT] = "RX init", 109 [CTC_STATE_TXINIT] = "TX init", 110 [CTC_STATE_RX] = "RX", 111 [CTC_STATE_TX] = "TX", 112 [CTC_STATE_RXIDLE] = "RX idle", 113 [CTC_STATE_TXIDLE] = "TX idle", 114 [CTC_STATE_RXERR] = "RX error", 115 [CTC_STATE_TXERR] = "TX error", 116 [CTC_STATE_TERM] = "Terminating", 117 [CTC_STATE_DTERM] = "Restarting", 118 [CTC_STATE_NOTOP] = "Not operational", 119 /* 120 * additional MPC states 121 */ 122 [CH_XID0_PENDING] = "Pending XID0 Start", 123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 124 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 129 }; 130 131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 132 133 /* 134 * ----- static ctcm actions for channel statemachine ----- 135 * 136 */ 137 static void chx_txdone(fsm_instance *fi, int event, void *arg); 138 static void chx_rx(fsm_instance *fi, int event, void *arg); 139 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 140 static void chx_firstio(fsm_instance *fi, int event, void *arg); 141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 155 156 /* 157 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 158 * 159 */ 160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 163 /* shared : 164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 178 */ 179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 181 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 183 184 /* 185 * Check return code of a preceding ccw_device call, halt_IO etc... 186 * 187 * ch : The channel, the error belongs to. 188 * Returns the error code (!= 0) to inspect. 189 */ 190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 191 { 192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 193 "%s(%s): %s: %04x\n", 194 CTCM_FUNTAIL, ch->id, msg, rc); 195 switch (rc) { 196 case -EBUSY: 197 pr_info("%s: The communication peer is busy\n", 198 ch->id); 199 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 200 break; 201 case -ENODEV: 202 pr_err("%s: The specified target device is not valid\n", 203 ch->id); 204 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 205 break; 206 default: 207 pr_err("An I/O operation resulted in error %04x\n", 208 rc); 209 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 210 } 211 } 212 213 void ctcm_purge_skb_queue(struct sk_buff_head *q) 214 { 215 struct sk_buff *skb; 216 217 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); 218 219 while ((skb = skb_dequeue(q))) { 220 refcount_dec(&skb->users); 221 dev_kfree_skb_any(skb); 222 } 223 } 224 225 /* 226 * NOP action for statemachines 227 */ 228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 229 { 230 } 231 232 /* 233 * Actions for channel - statemachines. 234 */ 235 236 /* 237 * Normal data has been send. Free the corresponding 238 * skb (it's in io_queue), reset dev->tbusy and 239 * revert to idle state. 240 * 241 * fi An instance of a channel statemachine. 242 * event The event, just happened. 243 * arg Generic pointer, casted from channel * upon call. 244 */ 245 static void chx_txdone(fsm_instance *fi, int event, void *arg) 246 { 247 struct channel *ch = arg; 248 struct net_device *dev = ch->netdev; 249 struct ctcm_priv *priv = dev->ml_priv; 250 struct sk_buff *skb; 251 int first = 1; 252 int i; 253 unsigned long duration; 254 unsigned long done_stamp = jiffies; 255 256 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 257 258 duration = done_stamp - ch->prof.send_stamp; 259 if (duration > ch->prof.tx_time) 260 ch->prof.tx_time = duration; 261 262 if (ch->irb->scsw.cmd.count != 0) 263 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 264 "%s(%s): TX not complete, remaining %d bytes", 265 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 266 fsm_deltimer(&ch->timer); 267 while ((skb = skb_dequeue(&ch->io_queue))) { 268 priv->stats.tx_packets++; 269 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 270 if (first) { 271 priv->stats.tx_bytes += 2; 272 first = 0; 273 } 274 refcount_dec(&skb->users); 275 dev_kfree_skb_irq(skb); 276 } 277 spin_lock(&ch->collect_lock); 278 clear_normalized_cda(&ch->ccw[4]); 279 if (ch->collect_len > 0) { 280 int rc; 281 282 if (ctcm_checkalloc_buffer(ch)) { 283 spin_unlock(&ch->collect_lock); 284 return; 285 } 286 ch->trans_skb->data = ch->trans_skb_data; 287 skb_reset_tail_pointer(ch->trans_skb); 288 ch->trans_skb->len = 0; 289 if (ch->prof.maxmulti < (ch->collect_len + 2)) 290 ch->prof.maxmulti = ch->collect_len + 2; 291 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 292 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 293 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 294 i = 0; 295 while ((skb = skb_dequeue(&ch->collect_queue))) { 296 skb_copy_from_linear_data(skb, 297 skb_put(ch->trans_skb, skb->len), skb->len); 298 priv->stats.tx_packets++; 299 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 300 refcount_dec(&skb->users); 301 dev_kfree_skb_irq(skb); 302 i++; 303 } 304 ch->collect_len = 0; 305 spin_unlock(&ch->collect_lock); 306 ch->ccw[1].count = ch->trans_skb->len; 307 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 308 ch->prof.send_stamp = jiffies; 309 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 310 ch->prof.doios_multi++; 311 if (rc != 0) { 312 priv->stats.tx_dropped += i; 313 priv->stats.tx_errors += i; 314 fsm_deltimer(&ch->timer); 315 ctcm_ccw_check_rc(ch, rc, "chained TX"); 316 } 317 } else { 318 spin_unlock(&ch->collect_lock); 319 fsm_newstate(fi, CTC_STATE_TXIDLE); 320 } 321 ctcm_clear_busy_do(dev); 322 } 323 324 /* 325 * Initial data is sent. 326 * Notify device statemachine that we are up and 327 * running. 328 * 329 * fi An instance of a channel statemachine. 330 * event The event, just happened. 331 * arg Generic pointer, casted from channel * upon call. 332 */ 333 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 334 { 335 struct channel *ch = arg; 336 struct net_device *dev = ch->netdev; 337 struct ctcm_priv *priv = dev->ml_priv; 338 339 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 340 341 fsm_deltimer(&ch->timer); 342 fsm_newstate(fi, CTC_STATE_TXIDLE); 343 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 344 } 345 346 /* 347 * Got normal data, check for sanity, queue it up, allocate new buffer 348 * trigger bottom half, and initiate next read. 349 * 350 * fi An instance of a channel statemachine. 351 * event The event, just happened. 352 * arg Generic pointer, casted from channel * upon call. 353 */ 354 static void chx_rx(fsm_instance *fi, int event, void *arg) 355 { 356 struct channel *ch = arg; 357 struct net_device *dev = ch->netdev; 358 struct ctcm_priv *priv = dev->ml_priv; 359 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 360 struct sk_buff *skb = ch->trans_skb; 361 __u16 block_len = *((__u16 *)skb->data); 362 int check_len; 363 int rc; 364 365 fsm_deltimer(&ch->timer); 366 if (len < 8) { 367 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 368 "%s(%s): got packet with length %d < 8\n", 369 CTCM_FUNTAIL, dev->name, len); 370 priv->stats.rx_dropped++; 371 priv->stats.rx_length_errors++; 372 goto again; 373 } 374 if (len > ch->max_bufsize) { 375 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 376 "%s(%s): got packet with length %d > %d\n", 377 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); 378 priv->stats.rx_dropped++; 379 priv->stats.rx_length_errors++; 380 goto again; 381 } 382 383 /* 384 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 385 */ 386 switch (ch->protocol) { 387 case CTCM_PROTO_S390: 388 case CTCM_PROTO_OS390: 389 check_len = block_len + 2; 390 break; 391 default: 392 check_len = block_len; 393 break; 394 } 395 if ((len < block_len) || (len > check_len)) { 396 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 397 "%s(%s): got block length %d != rx length %d\n", 398 CTCM_FUNTAIL, dev->name, block_len, len); 399 if (do_debug) 400 ctcmpc_dump_skb(skb, 0); 401 402 *((__u16 *)skb->data) = len; 403 priv->stats.rx_dropped++; 404 priv->stats.rx_length_errors++; 405 goto again; 406 } 407 if (block_len > 2) { 408 *((__u16 *)skb->data) = block_len - 2; 409 ctcm_unpack_skb(ch, skb); 410 } 411 again: 412 skb->data = ch->trans_skb_data; 413 skb_reset_tail_pointer(skb); 414 skb->len = 0; 415 if (ctcm_checkalloc_buffer(ch)) 416 return; 417 ch->ccw[1].count = ch->max_bufsize; 418 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 419 if (rc != 0) 420 ctcm_ccw_check_rc(ch, rc, "normal RX"); 421 } 422 423 /* 424 * Initialize connection by sending a __u16 of value 0. 425 * 426 * fi An instance of a channel statemachine. 427 * event The event, just happened. 428 * arg Generic pointer, casted from channel * upon call. 429 */ 430 static void chx_firstio(fsm_instance *fi, int event, void *arg) 431 { 432 int rc; 433 struct channel *ch = arg; 434 int fsmstate = fsm_getstate(fi); 435 436 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 437 "%s(%s) : %02x", 438 CTCM_FUNTAIL, ch->id, fsmstate); 439 440 ch->sense_rc = 0; /* reset unit check report control */ 441 if (fsmstate == CTC_STATE_TXIDLE) 442 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 443 "%s(%s): remote side issued READ?, init.\n", 444 CTCM_FUNTAIL, ch->id); 445 fsm_deltimer(&ch->timer); 446 if (ctcm_checkalloc_buffer(ch)) 447 return; 448 if ((fsmstate == CTC_STATE_SETUPWAIT) && 449 (ch->protocol == CTCM_PROTO_OS390)) { 450 /* OS/390 resp. z/OS */ 451 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 452 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 453 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 454 CTC_EVENT_TIMER, ch); 455 chx_rxidle(fi, event, arg); 456 } else { 457 struct net_device *dev = ch->netdev; 458 struct ctcm_priv *priv = dev->ml_priv; 459 fsm_newstate(fi, CTC_STATE_TXIDLE); 460 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 461 } 462 return; 463 } 464 /* 465 * Don't setup a timer for receiving the initial RX frame 466 * if in compatibility mode, since VM TCP delays the initial 467 * frame until it has some data to send. 468 */ 469 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || 470 (ch->protocol != CTCM_PROTO_S390)) 471 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 472 473 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 474 ch->ccw[1].count = 2; /* Transfer only length */ 475 476 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 477 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 478 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 479 if (rc != 0) { 480 fsm_deltimer(&ch->timer); 481 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 482 ctcm_ccw_check_rc(ch, rc, "init IO"); 483 } 484 /* 485 * If in compatibility mode since we don't setup a timer, we 486 * also signal RX channel up immediately. This enables us 487 * to send packets early which in turn usually triggers some 488 * reply from VM TCP which brings up the RX channel to it's 489 * final state. 490 */ 491 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && 492 (ch->protocol == CTCM_PROTO_S390)) { 493 struct net_device *dev = ch->netdev; 494 struct ctcm_priv *priv = dev->ml_priv; 495 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 496 } 497 } 498 499 /* 500 * Got initial data, check it. If OK, 501 * notify device statemachine that we are up and 502 * running. 503 * 504 * fi An instance of a channel statemachine. 505 * event The event, just happened. 506 * arg Generic pointer, casted from channel * upon call. 507 */ 508 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 509 { 510 struct channel *ch = arg; 511 struct net_device *dev = ch->netdev; 512 struct ctcm_priv *priv = dev->ml_priv; 513 __u16 buflen; 514 int rc; 515 516 fsm_deltimer(&ch->timer); 517 buflen = *((__u16 *)ch->trans_skb->data); 518 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", 519 __func__, dev->name, buflen); 520 521 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 522 if (ctcm_checkalloc_buffer(ch)) 523 return; 524 ch->ccw[1].count = ch->max_bufsize; 525 fsm_newstate(fi, CTC_STATE_RXIDLE); 526 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 527 if (rc != 0) { 528 fsm_newstate(fi, CTC_STATE_RXINIT); 529 ctcm_ccw_check_rc(ch, rc, "initial RX"); 530 } else 531 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 532 } else { 533 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", 534 __func__, dev->name, 535 buflen, CTCM_INITIAL_BLOCKLEN); 536 chx_firstio(fi, event, arg); 537 } 538 } 539 540 /* 541 * Set channel into extended mode. 542 * 543 * fi An instance of a channel statemachine. 544 * event The event, just happened. 545 * arg Generic pointer, casted from channel * upon call. 546 */ 547 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 548 { 549 struct channel *ch = arg; 550 int rc; 551 unsigned long saveflags = 0; 552 int timeout = CTCM_TIME_5_SEC; 553 554 fsm_deltimer(&ch->timer); 555 if (IS_MPC(ch)) { 556 timeout = 1500; 557 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", 558 __func__, smp_processor_id(), ch, ch->id); 559 } 560 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 561 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 562 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 563 564 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 565 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 566 /* Such conditional locking is undeterministic in 567 * static view. => ignore sparse warnings here. */ 568 569 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); 570 if (event == CTC_EVENT_TIMER) /* see above comments */ 571 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 572 if (rc != 0) { 573 fsm_deltimer(&ch->timer); 574 fsm_newstate(fi, CTC_STATE_STARTWAIT); 575 ctcm_ccw_check_rc(ch, rc, "set Mode"); 576 } else 577 ch->retry = 0; 578 } 579 580 /* 581 * Setup channel. 582 * 583 * fi An instance of a channel statemachine. 584 * event The event, just happened. 585 * arg Generic pointer, casted from channel * upon call. 586 */ 587 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 588 { 589 struct channel *ch = arg; 590 unsigned long saveflags; 591 int rc; 592 593 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 594 CTCM_FUNTAIL, ch->id, 595 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); 596 597 if (ch->trans_skb != NULL) { 598 clear_normalized_cda(&ch->ccw[1]); 599 dev_kfree_skb(ch->trans_skb); 600 ch->trans_skb = NULL; 601 } 602 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 603 ch->ccw[1].cmd_code = CCW_CMD_READ; 604 ch->ccw[1].flags = CCW_FLAG_SLI; 605 ch->ccw[1].count = 0; 606 } else { 607 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 608 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 609 ch->ccw[1].count = 0; 610 } 611 if (ctcm_checkalloc_buffer(ch)) { 612 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 613 "%s(%s): %s trans_skb alloc delayed " 614 "until first transfer", 615 CTCM_FUNTAIL, ch->id, 616 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? 617 "RX" : "TX"); 618 } 619 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 620 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 621 ch->ccw[0].count = 0; 622 ch->ccw[0].cda = 0; 623 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 624 ch->ccw[2].flags = CCW_FLAG_SLI; 625 ch->ccw[2].count = 0; 626 ch->ccw[2].cda = 0; 627 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 628 ch->ccw[4].cda = 0; 629 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 630 631 fsm_newstate(fi, CTC_STATE_STARTWAIT); 632 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 633 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 634 rc = ccw_device_halt(ch->cdev, 0); 635 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 636 if (rc != 0) { 637 if (rc != -EBUSY) 638 fsm_deltimer(&ch->timer); 639 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 640 } 641 } 642 643 /* 644 * Shutdown a channel. 645 * 646 * fi An instance of a channel statemachine. 647 * event The event, just happened. 648 * arg Generic pointer, casted from channel * upon call. 649 */ 650 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 651 { 652 struct channel *ch = arg; 653 unsigned long saveflags = 0; 654 int rc; 655 int oldstate; 656 657 fsm_deltimer(&ch->timer); 658 if (IS_MPC(ch)) 659 fsm_deltimer(&ch->sweep_timer); 660 661 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 662 663 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 664 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 665 /* Such conditional locking is undeterministic in 666 * static view. => ignore sparse warnings here. */ 667 oldstate = fsm_getstate(fi); 668 fsm_newstate(fi, CTC_STATE_TERM); 669 rc = ccw_device_halt(ch->cdev, 0); 670 671 if (event == CTC_EVENT_STOP) 672 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 673 /* see remark above about conditional locking */ 674 675 if (rc != 0 && rc != -EBUSY) { 676 fsm_deltimer(&ch->timer); 677 if (event != CTC_EVENT_STOP) { 678 fsm_newstate(fi, oldstate); 679 ctcm_ccw_check_rc(ch, rc, (char *)__func__); 680 } 681 } 682 } 683 684 /* 685 * Cleanup helper for chx_fail and chx_stopped 686 * cleanup channels queue and notify interface statemachine. 687 * 688 * fi An instance of a channel statemachine. 689 * state The next state (depending on caller). 690 * ch The channel to operate on. 691 */ 692 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 693 struct channel *ch) 694 { 695 struct net_device *dev = ch->netdev; 696 struct ctcm_priv *priv = dev->ml_priv; 697 698 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 699 "%s(%s): %s[%d]\n", 700 CTCM_FUNTAIL, dev->name, ch->id, state); 701 702 fsm_deltimer(&ch->timer); 703 if (IS_MPC(ch)) 704 fsm_deltimer(&ch->sweep_timer); 705 706 fsm_newstate(fi, state); 707 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 708 clear_normalized_cda(&ch->ccw[1]); 709 dev_kfree_skb_any(ch->trans_skb); 710 ch->trans_skb = NULL; 711 } 712 713 ch->th_seg = 0x00; 714 ch->th_seq_num = 0x00; 715 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 716 skb_queue_purge(&ch->io_queue); 717 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 718 } else { 719 ctcm_purge_skb_queue(&ch->io_queue); 720 if (IS_MPC(ch)) 721 ctcm_purge_skb_queue(&ch->sweep_queue); 722 spin_lock(&ch->collect_lock); 723 ctcm_purge_skb_queue(&ch->collect_queue); 724 ch->collect_len = 0; 725 spin_unlock(&ch->collect_lock); 726 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 727 } 728 } 729 730 /* 731 * A channel has successfully been halted. 732 * Cleanup it's queue and notify interface statemachine. 733 * 734 * fi An instance of a channel statemachine. 735 * event The event, just happened. 736 * arg Generic pointer, casted from channel * upon call. 737 */ 738 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 739 { 740 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 741 } 742 743 /* 744 * A stop command from device statemachine arrived and we are in 745 * not operational mode. Set state to stopped. 746 * 747 * fi An instance of a channel statemachine. 748 * event The event, just happened. 749 * arg Generic pointer, casted from channel * upon call. 750 */ 751 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 752 { 753 fsm_newstate(fi, CTC_STATE_STOPPED); 754 } 755 756 /* 757 * A machine check for no path, not operational status or gone device has 758 * happened. 759 * Cleanup queue and notify interface statemachine. 760 * 761 * fi An instance of a channel statemachine. 762 * event The event, just happened. 763 * arg Generic pointer, casted from channel * upon call. 764 */ 765 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 766 { 767 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 768 } 769 770 /* 771 * Handle error during setup of channel. 772 * 773 * fi An instance of a channel statemachine. 774 * event The event, just happened. 775 * arg Generic pointer, casted from channel * upon call. 776 */ 777 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 778 { 779 struct channel *ch = arg; 780 struct net_device *dev = ch->netdev; 781 struct ctcm_priv *priv = dev->ml_priv; 782 783 /* 784 * Special case: Got UC_RCRESET on setmode. 785 * This means that remote side isn't setup. In this case 786 * simply retry after some 10 secs... 787 */ 788 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 789 ((event == CTC_EVENT_UC_RCRESET) || 790 (event == CTC_EVENT_UC_RSRESET))) { 791 fsm_newstate(fi, CTC_STATE_STARTRETRY); 792 fsm_deltimer(&ch->timer); 793 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 794 if (!IS_MPC(ch) && 795 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { 796 int rc = ccw_device_halt(ch->cdev, 0); 797 if (rc != 0) 798 ctcm_ccw_check_rc(ch, rc, 799 "HaltIO in chx_setuperr"); 800 } 801 return; 802 } 803 804 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 805 "%s(%s) : %s error during %s channel setup state=%s\n", 806 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 807 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", 808 fsm_getstate_str(fi)); 809 810 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 811 fsm_newstate(fi, CTC_STATE_RXERR); 812 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 813 } else { 814 fsm_newstate(fi, CTC_STATE_TXERR); 815 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 816 } 817 } 818 819 /* 820 * Restart a channel after an error. 821 * 822 * fi An instance of a channel statemachine. 823 * event The event, just happened. 824 * arg Generic pointer, casted from channel * upon call. 825 */ 826 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 827 { 828 struct channel *ch = arg; 829 struct net_device *dev = ch->netdev; 830 unsigned long saveflags = 0; 831 int oldstate; 832 int rc; 833 834 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 835 "%s: %s[%d] of %s\n", 836 CTCM_FUNTAIL, ch->id, event, dev->name); 837 838 fsm_deltimer(&ch->timer); 839 840 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 841 oldstate = fsm_getstate(fi); 842 fsm_newstate(fi, CTC_STATE_STARTWAIT); 843 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 844 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 845 /* Such conditional locking is a known problem for 846 * sparse because its undeterministic in static view. 847 * Warnings should be ignored here. */ 848 rc = ccw_device_halt(ch->cdev, 0); 849 if (event == CTC_EVENT_TIMER) 850 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 851 if (rc != 0) { 852 if (rc != -EBUSY) { 853 fsm_deltimer(&ch->timer); 854 fsm_newstate(fi, oldstate); 855 } 856 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 857 } 858 } 859 860 /* 861 * Handle error during RX initial handshake (exchange of 862 * 0-length block header) 863 * 864 * fi An instance of a channel statemachine. 865 * event The event, just happened. 866 * arg Generic pointer, casted from channel * upon call. 867 */ 868 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 869 { 870 struct channel *ch = arg; 871 struct net_device *dev = ch->netdev; 872 struct ctcm_priv *priv = dev->ml_priv; 873 874 if (event == CTC_EVENT_TIMER) { 875 if (!IS_MPCDEV(dev)) 876 /* TODO : check if MPC deletes timer somewhere */ 877 fsm_deltimer(&ch->timer); 878 if (ch->retry++ < 3) 879 ctcm_chx_restart(fi, event, arg); 880 else { 881 fsm_newstate(fi, CTC_STATE_RXERR); 882 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 883 } 884 } else { 885 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 886 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 887 ctc_ch_event_names[event], fsm_getstate_str(fi)); 888 889 dev_warn(&dev->dev, 890 "Initialization failed with RX/TX init handshake " 891 "error %s\n", ctc_ch_event_names[event]); 892 } 893 } 894 895 /* 896 * Notify device statemachine if we gave up initialization 897 * of RX channel. 898 * 899 * fi An instance of a channel statemachine. 900 * event The event, just happened. 901 * arg Generic pointer, casted from channel * upon call. 902 */ 903 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 904 { 905 struct channel *ch = arg; 906 struct net_device *dev = ch->netdev; 907 struct ctcm_priv *priv = dev->ml_priv; 908 909 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 910 "%s(%s): RX %s busy, init. fail", 911 CTCM_FUNTAIL, dev->name, ch->id); 912 fsm_newstate(fi, CTC_STATE_RXERR); 913 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 914 } 915 916 /* 917 * Handle RX Unit check remote reset (remote disconnected) 918 * 919 * fi An instance of a channel statemachine. 920 * event The event, just happened. 921 * arg Generic pointer, casted from channel * upon call. 922 */ 923 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 924 { 925 struct channel *ch = arg; 926 struct channel *ch2; 927 struct net_device *dev = ch->netdev; 928 struct ctcm_priv *priv = dev->ml_priv; 929 930 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 931 "%s: %s: remote disconnect - re-init ...", 932 CTCM_FUNTAIL, dev->name); 933 fsm_deltimer(&ch->timer); 934 /* 935 * Notify device statemachine 936 */ 937 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 938 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 939 940 fsm_newstate(fi, CTC_STATE_DTERM); 941 ch2 = priv->channel[CTCM_WRITE]; 942 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 943 944 ccw_device_halt(ch->cdev, 0); 945 ccw_device_halt(ch2->cdev, 0); 946 } 947 948 /* 949 * Handle error during TX channel initialization. 950 * 951 * fi An instance of a channel statemachine. 952 * event The event, just happened. 953 * arg Generic pointer, casted from channel * upon call. 954 */ 955 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 956 { 957 struct channel *ch = arg; 958 struct net_device *dev = ch->netdev; 959 struct ctcm_priv *priv = dev->ml_priv; 960 961 if (event == CTC_EVENT_TIMER) { 962 fsm_deltimer(&ch->timer); 963 if (ch->retry++ < 3) 964 ctcm_chx_restart(fi, event, arg); 965 else { 966 fsm_newstate(fi, CTC_STATE_TXERR); 967 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 968 } 969 } else { 970 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 971 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 972 ctc_ch_event_names[event], fsm_getstate_str(fi)); 973 974 dev_warn(&dev->dev, 975 "Initialization failed with RX/TX init handshake " 976 "error %s\n", ctc_ch_event_names[event]); 977 } 978 } 979 980 /* 981 * Handle TX timeout by retrying operation. 982 * 983 * fi An instance of a channel statemachine. 984 * event The event, just happened. 985 * arg Generic pointer, casted from channel * upon call. 986 */ 987 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 988 { 989 struct channel *ch = arg; 990 struct net_device *dev = ch->netdev; 991 struct ctcm_priv *priv = dev->ml_priv; 992 struct sk_buff *skb; 993 994 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", 995 __func__, smp_processor_id(), ch, ch->id); 996 997 fsm_deltimer(&ch->timer); 998 if (ch->retry++ > 3) { 999 struct mpc_group *gptr = priv->mpcg; 1000 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1001 "%s: %s: retries exceeded", 1002 CTCM_FUNTAIL, ch->id); 1003 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1004 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1005 use gptr as mpc indicator */ 1006 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1007 ctcm_chx_restart(fi, event, arg); 1008 goto done; 1009 } 1010 1011 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1012 "%s : %s: retry %d", 1013 CTCM_FUNTAIL, ch->id, ch->retry); 1014 skb = skb_peek(&ch->io_queue); 1015 if (skb) { 1016 int rc = 0; 1017 unsigned long saveflags = 0; 1018 clear_normalized_cda(&ch->ccw[4]); 1019 ch->ccw[4].count = skb->len; 1020 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1021 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1022 "%s: %s: IDAL alloc failed", 1023 CTCM_FUNTAIL, ch->id); 1024 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1025 ctcm_chx_restart(fi, event, arg); 1026 goto done; 1027 } 1028 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1029 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1030 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1031 /* Such conditional locking is a known problem for 1032 * sparse because its undeterministic in static view. 1033 * Warnings should be ignored here. */ 1034 if (do_debug_ccw) 1035 ctcmpc_dumpit((char *)&ch->ccw[3], 1036 sizeof(struct ccw1) * 3); 1037 1038 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); 1039 if (event == CTC_EVENT_TIMER) 1040 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1041 saveflags); 1042 if (rc != 0) { 1043 fsm_deltimer(&ch->timer); 1044 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1045 ctcm_purge_skb_queue(&ch->io_queue); 1046 } 1047 } 1048 done: 1049 return; 1050 } 1051 1052 /* 1053 * Handle fatal errors during an I/O command. 1054 * 1055 * fi An instance of a channel statemachine. 1056 * event The event, just happened. 1057 * arg Generic pointer, casted from channel * upon call. 1058 */ 1059 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1060 { 1061 struct channel *ch = arg; 1062 struct net_device *dev = ch->netdev; 1063 struct ctcm_priv *priv = dev->ml_priv; 1064 int rd = CHANNEL_DIRECTION(ch->flags); 1065 1066 fsm_deltimer(&ch->timer); 1067 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1068 "%s: %s: %s unrecoverable channel error", 1069 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); 1070 1071 if (IS_MPC(ch)) { 1072 priv->stats.tx_dropped++; 1073 priv->stats.tx_errors++; 1074 } 1075 if (rd == CTCM_READ) { 1076 fsm_newstate(fi, CTC_STATE_RXERR); 1077 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1078 } else { 1079 fsm_newstate(fi, CTC_STATE_TXERR); 1080 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1081 } 1082 } 1083 1084 /* 1085 * The ctcm statemachine for a channel. 1086 */ 1087 const fsm_node ch_fsm[] = { 1088 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1089 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1090 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1091 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1092 1093 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1094 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1095 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1096 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1097 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1098 1099 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1100 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1101 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1102 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1103 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1104 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1105 1106 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1107 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1108 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1109 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1110 1111 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1112 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1113 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1114 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1115 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1116 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1117 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1118 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1119 1120 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1121 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1122 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1123 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1124 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1125 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1126 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1127 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1128 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1129 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1130 1131 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1132 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1133 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1134 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1135 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1136 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1137 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1138 1139 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1140 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1141 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1142 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1143 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1144 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1145 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1146 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1147 1148 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1149 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1150 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1151 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1152 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1153 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1154 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1155 1156 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1157 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1158 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1159 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1160 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1161 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1162 1163 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1164 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1165 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1166 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1167 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1168 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1169 1170 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1171 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1172 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1173 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1174 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1175 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1176 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1177 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1178 1179 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1180 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1181 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1182 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1183 }; 1184 1185 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1186 1187 /* 1188 * MPC actions for mpc channel statemachine 1189 * handling of MPC protocol requires extra 1190 * statemachine and actions which are prefixed ctcmpc_ . 1191 * The ctc_ch_states and ctc_ch_state_names, 1192 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1193 * which are expanded by some elements. 1194 */ 1195 1196 /* 1197 * Actions for mpc channel statemachine. 1198 */ 1199 1200 /* 1201 * Normal data has been send. Free the corresponding 1202 * skb (it's in io_queue), reset dev->tbusy and 1203 * revert to idle state. 1204 * 1205 * fi An instance of a channel statemachine. 1206 * event The event, just happened. 1207 * arg Generic pointer, casted from channel * upon call. 1208 */ 1209 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1210 { 1211 struct channel *ch = arg; 1212 struct net_device *dev = ch->netdev; 1213 struct ctcm_priv *priv = dev->ml_priv; 1214 struct mpc_group *grp = priv->mpcg; 1215 struct sk_buff *skb; 1216 int first = 1; 1217 int i; 1218 __u32 data_space; 1219 unsigned long duration; 1220 struct sk_buff *peekskb; 1221 int rc; 1222 struct th_header *header; 1223 struct pdu *p_header; 1224 unsigned long done_stamp = jiffies; 1225 1226 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", 1227 __func__, dev->name, smp_processor_id()); 1228 1229 duration = done_stamp - ch->prof.send_stamp; 1230 if (duration > ch->prof.tx_time) 1231 ch->prof.tx_time = duration; 1232 1233 if (ch->irb->scsw.cmd.count != 0) 1234 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 1235 "%s(%s): TX not complete, remaining %d bytes", 1236 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 1237 fsm_deltimer(&ch->timer); 1238 while ((skb = skb_dequeue(&ch->io_queue))) { 1239 priv->stats.tx_packets++; 1240 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1241 if (first) { 1242 priv->stats.tx_bytes += 2; 1243 first = 0; 1244 } 1245 refcount_dec(&skb->users); 1246 dev_kfree_skb_irq(skb); 1247 } 1248 spin_lock(&ch->collect_lock); 1249 clear_normalized_cda(&ch->ccw[4]); 1250 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1251 spin_unlock(&ch->collect_lock); 1252 fsm_newstate(fi, CTC_STATE_TXIDLE); 1253 goto done; 1254 } 1255 1256 if (ctcm_checkalloc_buffer(ch)) { 1257 spin_unlock(&ch->collect_lock); 1258 goto done; 1259 } 1260 ch->trans_skb->data = ch->trans_skb_data; 1261 skb_reset_tail_pointer(ch->trans_skb); 1262 ch->trans_skb->len = 0; 1263 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1264 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1265 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1266 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1267 i = 0; 1268 p_header = NULL; 1269 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1270 1271 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" 1272 " data_space:%04x\n", 1273 __func__, data_space); 1274 1275 while ((skb = skb_dequeue(&ch->collect_queue))) { 1276 skb_put_data(ch->trans_skb, skb->data, skb->len); 1277 p_header = (struct pdu *) 1278 (skb_tail_pointer(ch->trans_skb) - skb->len); 1279 p_header->pdu_flag = 0x00; 1280 if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) 1281 p_header->pdu_flag |= 0x60; 1282 else 1283 p_header->pdu_flag |= 0x20; 1284 1285 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1286 __func__, ch->trans_skb->len); 1287 CTCM_PR_DBGDATA("%s: pdu header and data for up" 1288 " to 32 bytes sent to vtam\n", __func__); 1289 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); 1290 1291 ch->collect_len -= skb->len; 1292 data_space -= skb->len; 1293 priv->stats.tx_packets++; 1294 priv->stats.tx_bytes += skb->len; 1295 refcount_dec(&skb->users); 1296 dev_kfree_skb_any(skb); 1297 peekskb = skb_peek(&ch->collect_queue); 1298 if (peekskb->len > data_space) 1299 break; 1300 i++; 1301 } 1302 /* p_header points to the last one we handled */ 1303 if (p_header) 1304 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1305 1306 header = skb_push(ch->trans_skb, TH_HEADER_LENGTH); 1307 memset(header, 0, TH_HEADER_LENGTH); 1308 1309 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1310 ch->th_seq_num++; 1311 header->th_seq_num = ch->th_seq_num; 1312 1313 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , 1314 __func__, ch->th_seq_num); 1315 1316 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1317 __func__, ch->trans_skb->len); 1318 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " 1319 "data to vtam from collect_q\n", __func__); 1320 CTCM_D3_DUMP((char *)ch->trans_skb->data, 1321 min_t(int, ch->trans_skb->len, 50)); 1322 1323 spin_unlock(&ch->collect_lock); 1324 clear_normalized_cda(&ch->ccw[1]); 1325 1326 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1327 (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1328 ch->trans_skb->data); 1329 ch->ccw[1].count = ch->max_bufsize; 1330 1331 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1332 dev_kfree_skb_any(ch->trans_skb); 1333 ch->trans_skb = NULL; 1334 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, 1335 "%s: %s: IDAL alloc failed", 1336 CTCM_FUNTAIL, ch->id); 1337 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1338 return; 1339 } 1340 1341 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1342 (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1343 ch->trans_skb->data); 1344 1345 ch->ccw[1].count = ch->trans_skb->len; 1346 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1347 ch->prof.send_stamp = jiffies; 1348 if (do_debug_ccw) 1349 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1350 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1351 ch->prof.doios_multi++; 1352 if (rc != 0) { 1353 priv->stats.tx_dropped += i; 1354 priv->stats.tx_errors += i; 1355 fsm_deltimer(&ch->timer); 1356 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1357 } 1358 done: 1359 ctcm_clear_busy(dev); 1360 return; 1361 } 1362 1363 /* 1364 * Got normal data, check for sanity, queue it up, allocate new buffer 1365 * trigger bottom half, and initiate next read. 1366 * 1367 * fi An instance of a channel statemachine. 1368 * event The event, just happened. 1369 * arg Generic pointer, casted from channel * upon call. 1370 */ 1371 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1372 { 1373 struct channel *ch = arg; 1374 struct net_device *dev = ch->netdev; 1375 struct ctcm_priv *priv = dev->ml_priv; 1376 struct mpc_group *grp = priv->mpcg; 1377 struct sk_buff *skb = ch->trans_skb; 1378 struct sk_buff *new_skb; 1379 unsigned long saveflags = 0; /* avoids compiler warning */ 1380 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 1381 1382 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", 1383 CTCM_FUNTAIL, dev->name, smp_processor_id(), 1384 ch->id, ch->max_bufsize, len); 1385 fsm_deltimer(&ch->timer); 1386 1387 if (skb == NULL) { 1388 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1389 "%s(%s): TRANS_SKB = NULL", 1390 CTCM_FUNTAIL, dev->name); 1391 goto again; 1392 } 1393 1394 if (len < TH_HEADER_LENGTH) { 1395 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1396 "%s(%s): packet length %d too short", 1397 CTCM_FUNTAIL, dev->name, len); 1398 priv->stats.rx_dropped++; 1399 priv->stats.rx_length_errors++; 1400 } else { 1401 /* must have valid th header or game over */ 1402 __u32 block_len = len; 1403 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1404 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1405 1406 if (new_skb == NULL) { 1407 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1408 "%s(%s): skb allocation failed", 1409 CTCM_FUNTAIL, dev->name); 1410 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1411 goto again; 1412 } 1413 switch (fsm_getstate(grp->fsm)) { 1414 case MPCG_STATE_RESET: 1415 case MPCG_STATE_INOP: 1416 dev_kfree_skb_any(new_skb); 1417 break; 1418 case MPCG_STATE_FLOWC: 1419 case MPCG_STATE_READY: 1420 skb_put_data(new_skb, skb->data, block_len); 1421 skb_queue_tail(&ch->io_queue, new_skb); 1422 tasklet_schedule(&ch->ch_tasklet); 1423 break; 1424 default: 1425 skb_put_data(new_skb, skb->data, len); 1426 skb_queue_tail(&ch->io_queue, new_skb); 1427 tasklet_hi_schedule(&ch->ch_tasklet); 1428 break; 1429 } 1430 } 1431 1432 again: 1433 switch (fsm_getstate(grp->fsm)) { 1434 int rc, dolock; 1435 case MPCG_STATE_FLOWC: 1436 case MPCG_STATE_READY: 1437 if (ctcm_checkalloc_buffer(ch)) 1438 break; 1439 ch->trans_skb->data = ch->trans_skb_data; 1440 skb_reset_tail_pointer(ch->trans_skb); 1441 ch->trans_skb->len = 0; 1442 ch->ccw[1].count = ch->max_bufsize; 1443 if (do_debug_ccw) 1444 ctcmpc_dumpit((char *)&ch->ccw[0], 1445 sizeof(struct ccw1) * 3); 1446 dolock = !in_hardirq(); 1447 if (dolock) 1448 spin_lock_irqsave( 1449 get_ccwdev_lock(ch->cdev), saveflags); 1450 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1451 if (dolock) /* see remark about conditional locking */ 1452 spin_unlock_irqrestore( 1453 get_ccwdev_lock(ch->cdev), saveflags); 1454 if (rc != 0) 1455 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1456 break; 1457 default: 1458 break; 1459 } 1460 1461 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", 1462 __func__, dev->name, ch, ch->id); 1463 1464 } 1465 1466 /* 1467 * Initialize connection by sending a __u16 of value 0. 1468 * 1469 * fi An instance of a channel statemachine. 1470 * event The event, just happened. 1471 * arg Generic pointer, casted from channel * upon call. 1472 */ 1473 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1474 { 1475 struct channel *ch = arg; 1476 struct net_device *dev = ch->netdev; 1477 struct ctcm_priv *priv = dev->ml_priv; 1478 struct mpc_group *gptr = priv->mpcg; 1479 1480 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", 1481 __func__, ch->id, ch); 1482 1483 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, 1484 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", 1485 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), 1486 fsm_getstate(gptr->fsm), ch->protocol); 1487 1488 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1489 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1490 1491 fsm_deltimer(&ch->timer); 1492 if (ctcm_checkalloc_buffer(ch)) 1493 goto done; 1494 1495 switch (fsm_getstate(fi)) { 1496 case CTC_STATE_STARTRETRY: 1497 case CTC_STATE_SETUPWAIT: 1498 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 1499 ctcmpc_chx_rxidle(fi, event, arg); 1500 } else { 1501 fsm_newstate(fi, CTC_STATE_TXIDLE); 1502 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1503 } 1504 goto done; 1505 default: 1506 break; 1507 } 1508 1509 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1510 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1511 1512 done: 1513 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", 1514 __func__, ch->id, ch); 1515 return; 1516 } 1517 1518 /* 1519 * Got initial data, check it. If OK, 1520 * notify device statemachine that we are up and 1521 * running. 1522 * 1523 * fi An instance of a channel statemachine. 1524 * event The event, just happened. 1525 * arg Generic pointer, casted from channel * upon call. 1526 */ 1527 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1528 { 1529 struct channel *ch = arg; 1530 struct net_device *dev = ch->netdev; 1531 struct ctcm_priv *priv = dev->ml_priv; 1532 struct mpc_group *grp = priv->mpcg; 1533 int rc; 1534 unsigned long saveflags = 0; /* avoids compiler warning */ 1535 1536 fsm_deltimer(&ch->timer); 1537 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", 1538 __func__, ch->id, dev->name, smp_processor_id(), 1539 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1540 1541 fsm_newstate(fi, CTC_STATE_RXIDLE); 1542 /* XID processing complete */ 1543 1544 switch (fsm_getstate(grp->fsm)) { 1545 case MPCG_STATE_FLOWC: 1546 case MPCG_STATE_READY: 1547 if (ctcm_checkalloc_buffer(ch)) 1548 goto done; 1549 ch->trans_skb->data = ch->trans_skb_data; 1550 skb_reset_tail_pointer(ch->trans_skb); 1551 ch->trans_skb->len = 0; 1552 ch->ccw[1].count = ch->max_bufsize; 1553 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1554 if (event == CTC_EVENT_START) 1555 /* see remark about conditional locking */ 1556 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1557 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1558 if (event == CTC_EVENT_START) 1559 spin_unlock_irqrestore( 1560 get_ccwdev_lock(ch->cdev), saveflags); 1561 if (rc != 0) { 1562 fsm_newstate(fi, CTC_STATE_RXINIT); 1563 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1564 goto done; 1565 } 1566 break; 1567 default: 1568 break; 1569 } 1570 1571 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1572 done: 1573 return; 1574 } 1575 1576 /* 1577 * ctcmpc channel FSM action 1578 * called from several points in ctcmpc_ch_fsm 1579 * ctcmpc only 1580 */ 1581 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1582 { 1583 struct channel *ch = arg; 1584 struct net_device *dev = ch->netdev; 1585 struct ctcm_priv *priv = dev->ml_priv; 1586 struct mpc_group *grp = priv->mpcg; 1587 1588 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", 1589 __func__, dev->name, ch->id, ch, smp_processor_id(), 1590 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1591 1592 switch (fsm_getstate(grp->fsm)) { 1593 case MPCG_STATE_XID2INITW: 1594 /* ok..start yside xid exchanges */ 1595 if (!ch->in_mpcgroup) 1596 break; 1597 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1598 fsm_deltimer(&grp->timer); 1599 fsm_addtimer(&grp->timer, 1600 MPC_XID_TIMEOUT_VALUE, 1601 MPCG_EVENT_TIMER, dev); 1602 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1603 1604 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1605 /* attn rcvd before xid0 processed via bh */ 1606 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1607 break; 1608 case MPCG_STATE_XID2INITX: 1609 case MPCG_STATE_XID0IOWAIT: 1610 case MPCG_STATE_XID0IOWAIX: 1611 /* attn rcvd before xid0 processed on ch 1612 but mid-xid0 processing for group */ 1613 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1614 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1615 break; 1616 case MPCG_STATE_XID7INITW: 1617 case MPCG_STATE_XID7INITX: 1618 case MPCG_STATE_XID7INITI: 1619 case MPCG_STATE_XID7INITZ: 1620 switch (fsm_getstate(ch->fsm)) { 1621 case CH_XID7_PENDING: 1622 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1623 break; 1624 case CH_XID7_PENDING2: 1625 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1626 break; 1627 } 1628 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1629 break; 1630 } 1631 1632 return; 1633 } 1634 1635 /* 1636 * ctcmpc channel FSM action 1637 * called from one point in ctcmpc_ch_fsm 1638 * ctcmpc only 1639 */ 1640 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1641 { 1642 struct channel *ch = arg; 1643 struct net_device *dev = ch->netdev; 1644 struct ctcm_priv *priv = dev->ml_priv; 1645 struct mpc_group *grp = priv->mpcg; 1646 1647 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", 1648 __func__, dev->name, ch->id, 1649 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1650 1651 fsm_deltimer(&ch->timer); 1652 1653 switch (fsm_getstate(grp->fsm)) { 1654 case MPCG_STATE_XID0IOWAIT: 1655 /* vtam wants to be primary.start yside xid exchanges*/ 1656 /* only receive one attn-busy at a time so must not */ 1657 /* change state each time */ 1658 grp->changed_side = 1; 1659 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1660 break; 1661 case MPCG_STATE_XID2INITW: 1662 if (grp->changed_side == 1) { 1663 grp->changed_side = 2; 1664 break; 1665 } 1666 /* process began via call to establish_conn */ 1667 /* so must report failure instead of reverting */ 1668 /* back to ready-for-xid passive state */ 1669 if (grp->estconnfunc) 1670 goto done; 1671 /* this attnbusy is NOT the result of xside xid */ 1672 /* collisions so yside must have been triggered */ 1673 /* by an ATTN that was not intended to start XID */ 1674 /* processing. Revert back to ready-for-xid and */ 1675 /* wait for ATTN interrupt to signal xid start */ 1676 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1677 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1678 fsm_deltimer(&grp->timer); 1679 goto done; 1680 } 1681 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1682 goto done; 1683 case MPCG_STATE_XID2INITX: 1684 /* XID2 was received before ATTN Busy for second 1685 channel.Send yside xid for second channel. 1686 */ 1687 if (grp->changed_side == 1) { 1688 grp->changed_side = 2; 1689 break; 1690 } 1691 fallthrough; 1692 case MPCG_STATE_XID0IOWAIX: 1693 case MPCG_STATE_XID7INITW: 1694 case MPCG_STATE_XID7INITX: 1695 case MPCG_STATE_XID7INITI: 1696 case MPCG_STATE_XID7INITZ: 1697 default: 1698 /* multiple attn-busy indicates too out-of-sync */ 1699 /* and they are certainly not being received as part */ 1700 /* of valid mpc group negotiations.. */ 1701 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1702 goto done; 1703 } 1704 1705 if (grp->changed_side == 1) { 1706 fsm_deltimer(&grp->timer); 1707 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1708 MPCG_EVENT_TIMER, dev); 1709 } 1710 if (ch->in_mpcgroup) 1711 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1712 else 1713 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1714 "%s(%s): channel %s not added to group", 1715 CTCM_FUNTAIL, dev->name, ch->id); 1716 1717 done: 1718 return; 1719 } 1720 1721 /* 1722 * ctcmpc channel FSM action 1723 * called from several points in ctcmpc_ch_fsm 1724 * ctcmpc only 1725 */ 1726 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1727 { 1728 struct channel *ch = arg; 1729 struct net_device *dev = ch->netdev; 1730 struct ctcm_priv *priv = dev->ml_priv; 1731 struct mpc_group *grp = priv->mpcg; 1732 1733 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1734 return; 1735 } 1736 1737 /* 1738 * ctcmpc channel FSM action 1739 * called from several points in ctcmpc_ch_fsm 1740 * ctcmpc only 1741 */ 1742 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1743 { 1744 struct channel *ach = arg; 1745 struct net_device *dev = ach->netdev; 1746 struct ctcm_priv *priv = dev->ml_priv; 1747 struct mpc_group *grp = priv->mpcg; 1748 struct channel *wch = priv->channel[CTCM_WRITE]; 1749 struct channel *rch = priv->channel[CTCM_READ]; 1750 struct sk_buff *skb; 1751 struct th_sweep *header; 1752 int rc = 0; 1753 unsigned long saveflags = 0; 1754 1755 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1756 __func__, smp_processor_id(), ach, ach->id); 1757 1758 if (grp->in_sweep == 0) 1759 goto done; 1760 1761 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , 1762 __func__, wch->th_seq_num); 1763 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , 1764 __func__, rch->th_seq_num); 1765 1766 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1767 /* give the previous IO time to complete */ 1768 fsm_addtimer(&wch->sweep_timer, 1769 200, CTC_EVENT_RSWEEP_TIMER, wch); 1770 goto done; 1771 } 1772 1773 skb = skb_dequeue(&wch->sweep_queue); 1774 if (!skb) 1775 goto done; 1776 1777 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1778 grp->in_sweep = 0; 1779 ctcm_clear_busy_do(dev); 1780 dev_kfree_skb_any(skb); 1781 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1782 goto done; 1783 } else { 1784 refcount_inc(&skb->users); 1785 skb_queue_tail(&wch->io_queue, skb); 1786 } 1787 1788 /* send out the sweep */ 1789 wch->ccw[4].count = skb->len; 1790 1791 header = (struct th_sweep *)skb->data; 1792 switch (header->th.th_ch_flag) { 1793 case TH_SWEEP_REQ: 1794 grp->sweep_req_pend_num--; 1795 break; 1796 case TH_SWEEP_RESP: 1797 grp->sweep_rsp_pend_num--; 1798 break; 1799 } 1800 1801 header->sw.th_last_seq = wch->th_seq_num; 1802 1803 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1804 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); 1805 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); 1806 1807 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1808 fsm_newstate(wch->fsm, CTC_STATE_TX); 1809 1810 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1811 wch->prof.send_stamp = jiffies; 1812 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); 1813 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1814 1815 if ((grp->sweep_req_pend_num == 0) && 1816 (grp->sweep_rsp_pend_num == 0)) { 1817 grp->in_sweep = 0; 1818 rch->th_seq_num = 0x00; 1819 wch->th_seq_num = 0x00; 1820 ctcm_clear_busy_do(dev); 1821 } 1822 1823 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , 1824 __func__, wch->th_seq_num, rch->th_seq_num); 1825 1826 if (rc != 0) 1827 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1828 1829 done: 1830 return; 1831 } 1832 1833 1834 /* 1835 * The ctcmpc statemachine for a channel. 1836 */ 1837 1838 const fsm_node ctcmpc_ch_fsm[] = { 1839 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1840 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1841 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1842 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1843 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1844 1845 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1846 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1847 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1848 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1849 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1850 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1851 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1852 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1853 1854 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1855 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1856 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1857 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1858 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1859 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1860 1861 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1862 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1863 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1864 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1865 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1866 1867 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1868 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1869 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1870 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1871 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1872 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1873 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1874 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1875 1876 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1877 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1878 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1879 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1880 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1881 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1882 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1883 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1884 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1885 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1886 1887 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1888 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1889 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1890 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1891 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1892 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1893 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1894 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1895 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1896 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1897 1898 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1899 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1900 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1901 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1902 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1903 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1904 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1905 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1906 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1907 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1908 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1909 1910 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1911 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1912 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1913 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1914 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1915 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1916 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1917 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1918 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1919 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1920 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1921 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1922 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1923 1924 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1925 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1926 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1927 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1928 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1929 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1930 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1931 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1932 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1933 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1934 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1935 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1936 1937 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1938 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1939 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 1940 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 1941 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1942 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1943 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1944 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1945 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1946 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1947 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1948 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1949 1950 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1951 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1952 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 1953 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 1954 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1955 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1956 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1957 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1958 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1959 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1960 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1961 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1962 1963 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1964 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1965 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 1966 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 1967 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1968 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1969 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1970 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1971 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1972 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1973 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1974 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1975 1976 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1977 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1978 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1979 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1980 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 1981 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1982 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1983 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1984 1985 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1986 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1987 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1988 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1989 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1990 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1991 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1992 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1993 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 1994 1995 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1996 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1997 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1998 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 1999 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2000 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2001 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2002 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2003 2004 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2005 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2006 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2007 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2008 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2009 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2010 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2011 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2012 2013 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2014 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2015 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2016 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2017 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2018 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2019 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2020 2021 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2022 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2023 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2024 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2025 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2026 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2027 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2028 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2029 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2030 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2031 2032 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2033 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2034 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2035 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2036 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2037 }; 2038 2039 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2040 2041 /* 2042 * Actions for interface - statemachine. 2043 */ 2044 2045 /* 2046 * Startup channels by sending CTC_EVENT_START to each channel. 2047 * 2048 * fi An instance of an interface statemachine. 2049 * event The event, just happened. 2050 * arg Generic pointer, casted from struct net_device * upon call. 2051 */ 2052 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2053 { 2054 struct net_device *dev = arg; 2055 struct ctcm_priv *priv = dev->ml_priv; 2056 int direction; 2057 2058 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2059 2060 fsm_deltimer(&priv->restart_timer); 2061 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2062 if (IS_MPC(priv)) 2063 priv->mpcg->channels_terminating = 0; 2064 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2065 struct channel *ch = priv->channel[direction]; 2066 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2067 } 2068 } 2069 2070 /* 2071 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2072 * 2073 * fi An instance of an interface statemachine. 2074 * event The event, just happened. 2075 * arg Generic pointer, casted from struct net_device * upon call. 2076 */ 2077 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2078 { 2079 int direction; 2080 struct net_device *dev = arg; 2081 struct ctcm_priv *priv = dev->ml_priv; 2082 2083 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2084 2085 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2086 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2087 struct channel *ch = priv->channel[direction]; 2088 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2089 ch->th_seq_num = 0x00; 2090 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", 2091 __func__, ch->th_seq_num); 2092 } 2093 if (IS_MPC(priv)) 2094 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2095 } 2096 2097 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2098 { 2099 int restart_timer; 2100 struct net_device *dev = arg; 2101 struct ctcm_priv *priv = dev->ml_priv; 2102 2103 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2104 2105 if (IS_MPC(priv)) { 2106 restart_timer = CTCM_TIME_1_SEC; 2107 } else { 2108 restart_timer = CTCM_TIME_5_SEC; 2109 } 2110 dev_info(&dev->dev, "Restarting device\n"); 2111 2112 dev_action_stop(fi, event, arg); 2113 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2114 if (IS_MPC(priv)) 2115 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2116 2117 /* going back into start sequence too quickly can */ 2118 /* result in the other side becoming unreachable due */ 2119 /* to sense reported when IO is aborted */ 2120 fsm_addtimer(&priv->restart_timer, restart_timer, 2121 DEV_EVENT_START, dev); 2122 } 2123 2124 /* 2125 * Called from channel statemachine 2126 * when a channel is up and running. 2127 * 2128 * fi An instance of an interface statemachine. 2129 * event The event, just happened. 2130 * arg Generic pointer, casted from struct net_device * upon call. 2131 */ 2132 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2133 { 2134 struct net_device *dev = arg; 2135 struct ctcm_priv *priv = dev->ml_priv; 2136 int dev_stat = fsm_getstate(fi); 2137 2138 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2139 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2140 dev->name, dev->ml_priv, dev_stat, event); 2141 2142 switch (fsm_getstate(fi)) { 2143 case DEV_STATE_STARTWAIT_RXTX: 2144 if (event == DEV_EVENT_RXUP) 2145 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2146 else 2147 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2148 break; 2149 case DEV_STATE_STARTWAIT_RX: 2150 if (event == DEV_EVENT_RXUP) { 2151 fsm_newstate(fi, DEV_STATE_RUNNING); 2152 dev_info(&dev->dev, 2153 "Connected with remote side\n"); 2154 ctcm_clear_busy(dev); 2155 } 2156 break; 2157 case DEV_STATE_STARTWAIT_TX: 2158 if (event == DEV_EVENT_TXUP) { 2159 fsm_newstate(fi, DEV_STATE_RUNNING); 2160 dev_info(&dev->dev, 2161 "Connected with remote side\n"); 2162 ctcm_clear_busy(dev); 2163 } 2164 break; 2165 case DEV_STATE_STOPWAIT_TX: 2166 if (event == DEV_EVENT_RXUP) 2167 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2168 break; 2169 case DEV_STATE_STOPWAIT_RX: 2170 if (event == DEV_EVENT_TXUP) 2171 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2172 break; 2173 } 2174 2175 if (IS_MPC(priv)) { 2176 if (event == DEV_EVENT_RXUP) 2177 mpc_channel_action(priv->channel[CTCM_READ], 2178 CTCM_READ, MPC_CHANNEL_ADD); 2179 else 2180 mpc_channel_action(priv->channel[CTCM_WRITE], 2181 CTCM_WRITE, MPC_CHANNEL_ADD); 2182 } 2183 } 2184 2185 /* 2186 * Called from device statemachine 2187 * when a channel has been shutdown. 2188 * 2189 * fi An instance of an interface statemachine. 2190 * event The event, just happened. 2191 * arg Generic pointer, casted from struct net_device * upon call. 2192 */ 2193 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2194 { 2195 2196 struct net_device *dev = arg; 2197 struct ctcm_priv *priv = dev->ml_priv; 2198 2199 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2200 2201 switch (fsm_getstate(fi)) { 2202 case DEV_STATE_RUNNING: 2203 if (event == DEV_EVENT_TXDOWN) 2204 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2205 else 2206 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2207 break; 2208 case DEV_STATE_STARTWAIT_RX: 2209 if (event == DEV_EVENT_TXDOWN) 2210 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2211 break; 2212 case DEV_STATE_STARTWAIT_TX: 2213 if (event == DEV_EVENT_RXDOWN) 2214 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2215 break; 2216 case DEV_STATE_STOPWAIT_RXTX: 2217 if (event == DEV_EVENT_TXDOWN) 2218 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2219 else 2220 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2221 break; 2222 case DEV_STATE_STOPWAIT_RX: 2223 if (event == DEV_EVENT_RXDOWN) 2224 fsm_newstate(fi, DEV_STATE_STOPPED); 2225 break; 2226 case DEV_STATE_STOPWAIT_TX: 2227 if (event == DEV_EVENT_TXDOWN) 2228 fsm_newstate(fi, DEV_STATE_STOPPED); 2229 break; 2230 } 2231 if (IS_MPC(priv)) { 2232 if (event == DEV_EVENT_RXDOWN) 2233 mpc_channel_action(priv->channel[CTCM_READ], 2234 CTCM_READ, MPC_CHANNEL_REMOVE); 2235 else 2236 mpc_channel_action(priv->channel[CTCM_WRITE], 2237 CTCM_WRITE, MPC_CHANNEL_REMOVE); 2238 } 2239 } 2240 2241 const fsm_node dev_fsm[] = { 2242 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2243 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2244 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2245 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2246 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2247 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2248 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2249 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2250 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2251 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2252 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2253 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2254 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2255 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2256 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2257 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2258 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2259 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2260 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2261 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2262 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2263 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2264 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2265 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2266 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2267 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2268 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2269 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2270 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2271 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2272 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2273 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2274 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2275 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2276 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2277 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2278 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2279 }; 2280 2281 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2282 2283 /* --- This is the END my friend --- */ 2284 2285