1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright IBM Corp. 2001, 2007 4 * Authors: Fritz Elfert (felfert@millenux.com) 5 * Peter Tiedemann (ptiedem@de.ibm.com) 6 * MPC additions : 7 * Belinda Thompson (belindat@us.ibm.com) 8 * Andy Richter (richtera@us.ibm.com) 9 */ 10 11 #undef DEBUG 12 #undef DEBUGDATA 13 #undef DEBUGCCW 14 15 #define pr_fmt(fmt) "ctcm: " fmt 16 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/kernel.h> 20 #include <linux/slab.h> 21 #include <linux/errno.h> 22 #include <linux/types.h> 23 #include <linux/interrupt.h> 24 #include <linux/timer.h> 25 #include <linux/bitops.h> 26 27 #include <linux/signal.h> 28 #include <linux/string.h> 29 30 #include <linux/ip.h> 31 #include <linux/if_arp.h> 32 #include <linux/tcp.h> 33 #include <linux/skbuff.h> 34 #include <linux/ctype.h> 35 #include <net/dst.h> 36 37 #include <linux/io.h> 38 #include <asm/ccwdev.h> 39 #include <asm/ccwgroup.h> 40 #include <linux/uaccess.h> 41 42 #include <asm/idals.h> 43 44 #include "fsm.h" 45 46 #include "ctcm_dbug.h" 47 #include "ctcm_main.h" 48 #include "ctcm_fsms.h" 49 50 const char *dev_state_names[] = { 51 [DEV_STATE_STOPPED] = "Stopped", 52 [DEV_STATE_STARTWAIT_RXTX] = "StartWait RXTX", 53 [DEV_STATE_STARTWAIT_RX] = "StartWait RX", 54 [DEV_STATE_STARTWAIT_TX] = "StartWait TX", 55 [DEV_STATE_STOPWAIT_RXTX] = "StopWait RXTX", 56 [DEV_STATE_STOPWAIT_RX] = "StopWait RX", 57 [DEV_STATE_STOPWAIT_TX] = "StopWait TX", 58 [DEV_STATE_RUNNING] = "Running", 59 }; 60 61 const char *dev_event_names[] = { 62 [DEV_EVENT_START] = "Start", 63 [DEV_EVENT_STOP] = "Stop", 64 [DEV_EVENT_RXUP] = "RX up", 65 [DEV_EVENT_TXUP] = "TX up", 66 [DEV_EVENT_RXDOWN] = "RX down", 67 [DEV_EVENT_TXDOWN] = "TX down", 68 [DEV_EVENT_RESTART] = "Restart", 69 }; 70 71 const char *ctc_ch_event_names[] = { 72 [CTC_EVENT_IO_SUCCESS] = "ccw_device success", 73 [CTC_EVENT_IO_EBUSY] = "ccw_device busy", 74 [CTC_EVENT_IO_ENODEV] = "ccw_device enodev", 75 [CTC_EVENT_IO_UNKNOWN] = "ccw_device unknown", 76 [CTC_EVENT_ATTNBUSY] = "Status ATTN & BUSY", 77 [CTC_EVENT_ATTN] = "Status ATTN", 78 [CTC_EVENT_BUSY] = "Status BUSY", 79 [CTC_EVENT_UC_RCRESET] = "Unit check remote reset", 80 [CTC_EVENT_UC_RSRESET] = "Unit check remote system reset", 81 [CTC_EVENT_UC_TXTIMEOUT] = "Unit check TX timeout", 82 [CTC_EVENT_UC_TXPARITY] = "Unit check TX parity", 83 [CTC_EVENT_UC_HWFAIL] = "Unit check Hardware failure", 84 [CTC_EVENT_UC_RXPARITY] = "Unit check RX parity", 85 [CTC_EVENT_UC_ZERO] = "Unit check ZERO", 86 [CTC_EVENT_UC_UNKNOWN] = "Unit check Unknown", 87 [CTC_EVENT_SC_UNKNOWN] = "SubChannel check Unknown", 88 [CTC_EVENT_MC_FAIL] = "Machine check failure", 89 [CTC_EVENT_MC_GOOD] = "Machine check operational", 90 [CTC_EVENT_IRQ] = "IRQ normal", 91 [CTC_EVENT_FINSTAT] = "IRQ final", 92 [CTC_EVENT_TIMER] = "Timer", 93 [CTC_EVENT_START] = "Start", 94 [CTC_EVENT_STOP] = "Stop", 95 /* 96 * additional MPC events 97 */ 98 [CTC_EVENT_SEND_XID] = "XID Exchange", 99 [CTC_EVENT_RSWEEP_TIMER] = "MPC Group Sweep Timer", 100 }; 101 102 const char *ctc_ch_state_names[] = { 103 [CTC_STATE_IDLE] = "Idle", 104 [CTC_STATE_STOPPED] = "Stopped", 105 [CTC_STATE_STARTWAIT] = "StartWait", 106 [CTC_STATE_STARTRETRY] = "StartRetry", 107 [CTC_STATE_SETUPWAIT] = "SetupWait", 108 [CTC_STATE_RXINIT] = "RX init", 109 [CTC_STATE_TXINIT] = "TX init", 110 [CTC_STATE_RX] = "RX", 111 [CTC_STATE_TX] = "TX", 112 [CTC_STATE_RXIDLE] = "RX idle", 113 [CTC_STATE_TXIDLE] = "TX idle", 114 [CTC_STATE_RXERR] = "RX error", 115 [CTC_STATE_TXERR] = "TX error", 116 [CTC_STATE_TERM] = "Terminating", 117 [CTC_STATE_DTERM] = "Restarting", 118 [CTC_STATE_NOTOP] = "Not operational", 119 /* 120 * additional MPC states 121 */ 122 [CH_XID0_PENDING] = "Pending XID0 Start", 123 [CH_XID0_INPROGRESS] = "In XID0 Negotiations ", 124 [CH_XID7_PENDING] = "Pending XID7 P1 Start", 125 [CH_XID7_PENDING1] = "Active XID7 P1 Exchange ", 126 [CH_XID7_PENDING2] = "Pending XID7 P2 Start ", 127 [CH_XID7_PENDING3] = "Active XID7 P2 Exchange ", 128 [CH_XID7_PENDING4] = "XID7 Complete - Pending READY ", 129 }; 130 131 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg); 132 133 /* 134 * ----- static ctcm actions for channel statemachine ----- 135 * 136 */ 137 static void chx_txdone(fsm_instance *fi, int event, void *arg); 138 static void chx_rx(fsm_instance *fi, int event, void *arg); 139 static void chx_rxidle(fsm_instance *fi, int event, void *arg); 140 static void chx_firstio(fsm_instance *fi, int event, void *arg); 141 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 142 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 143 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 144 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 145 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 146 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 147 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 148 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 149 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 150 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 151 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 152 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 153 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 154 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 155 156 /* 157 * ----- static ctcmpc actions for ctcmpc channel statemachine ----- 158 * 159 */ 160 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg); 161 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg); 162 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg); 163 /* shared : 164 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg); 165 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg); 166 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg); 167 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg); 168 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg); 169 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg); 170 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg); 171 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg); 172 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg); 173 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg); 174 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg); 175 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg); 176 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg); 177 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg); 178 */ 179 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg); 180 static void ctcmpc_chx_attnbusy(fsm_instance *, int, void *); 181 static void ctcmpc_chx_resend(fsm_instance *, int, void *); 182 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); 183 184 /* 185 * Check return code of a preceding ccw_device call, halt_IO etc... 186 * 187 * ch : The channel, the error belongs to. 188 * Returns the error code (!= 0) to inspect. 189 */ 190 void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) 191 { 192 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 193 "%s(%s): %s: %04x\n", 194 CTCM_FUNTAIL, ch->id, msg, rc); 195 switch (rc) { 196 case -EBUSY: 197 pr_info("%s: The communication peer is busy\n", 198 ch->id); 199 fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); 200 break; 201 case -ENODEV: 202 pr_err("%s: The specified target device is not valid\n", 203 ch->id); 204 fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); 205 break; 206 default: 207 pr_err("An I/O operation resulted in error %04x\n", 208 rc); 209 fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); 210 } 211 } 212 213 void ctcm_purge_skb_queue(struct sk_buff_head *q) 214 { 215 struct sk_buff *skb; 216 217 CTCM_DBF_TEXT(TRACE, CTC_DBF_DEBUG, __func__); 218 219 while ((skb = skb_dequeue(q))) { 220 refcount_dec(&skb->users); 221 dev_kfree_skb_any(skb); 222 } 223 } 224 225 /* 226 * NOP action for statemachines 227 */ 228 static void ctcm_action_nop(fsm_instance *fi, int event, void *arg) 229 { 230 } 231 232 /* 233 * Actions for channel - statemachines. 234 */ 235 236 /* 237 * Normal data has been send. Free the corresponding 238 * skb (it's in io_queue), reset dev->tbusy and 239 * revert to idle state. 240 * 241 * fi An instance of a channel statemachine. 242 * event The event, just happened. 243 * arg Generic pointer, casted from channel * upon call. 244 */ 245 static void chx_txdone(fsm_instance *fi, int event, void *arg) 246 { 247 struct channel *ch = arg; 248 struct net_device *dev = ch->netdev; 249 struct ctcm_priv *priv = dev->ml_priv; 250 struct sk_buff *skb; 251 int first = 1; 252 int i; 253 unsigned long duration; 254 unsigned long done_stamp = jiffies; 255 256 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 257 258 duration = done_stamp - ch->prof.send_stamp; 259 if (duration > ch->prof.tx_time) 260 ch->prof.tx_time = duration; 261 262 if (ch->irb->scsw.cmd.count != 0) 263 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 264 "%s(%s): TX not complete, remaining %d bytes", 265 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 266 fsm_deltimer(&ch->timer); 267 while ((skb = skb_dequeue(&ch->io_queue))) { 268 priv->stats.tx_packets++; 269 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 270 if (first) { 271 priv->stats.tx_bytes += 2; 272 first = 0; 273 } 274 refcount_dec(&skb->users); 275 dev_kfree_skb_irq(skb); 276 } 277 spin_lock(&ch->collect_lock); 278 clear_normalized_cda(&ch->ccw[4]); 279 if (ch->collect_len > 0) { 280 int rc; 281 282 if (ctcm_checkalloc_buffer(ch)) { 283 spin_unlock(&ch->collect_lock); 284 return; 285 } 286 ch->trans_skb->data = ch->trans_skb_data; 287 skb_reset_tail_pointer(ch->trans_skb); 288 ch->trans_skb->len = 0; 289 if (ch->prof.maxmulti < (ch->collect_len + 2)) 290 ch->prof.maxmulti = ch->collect_len + 2; 291 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 292 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 293 *((__u16 *)skb_put(ch->trans_skb, 2)) = ch->collect_len + 2; 294 i = 0; 295 while ((skb = skb_dequeue(&ch->collect_queue))) { 296 skb_copy_from_linear_data(skb, 297 skb_put(ch->trans_skb, skb->len), skb->len); 298 priv->stats.tx_packets++; 299 priv->stats.tx_bytes += skb->len - LL_HEADER_LENGTH; 300 refcount_dec(&skb->users); 301 dev_kfree_skb_irq(skb); 302 i++; 303 } 304 ch->collect_len = 0; 305 spin_unlock(&ch->collect_lock); 306 ch->ccw[1].count = ch->trans_skb->len; 307 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 308 ch->prof.send_stamp = jiffies; 309 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 310 ch->prof.doios_multi++; 311 if (rc != 0) { 312 priv->stats.tx_dropped += i; 313 priv->stats.tx_errors += i; 314 fsm_deltimer(&ch->timer); 315 ctcm_ccw_check_rc(ch, rc, "chained TX"); 316 } 317 } else { 318 spin_unlock(&ch->collect_lock); 319 fsm_newstate(fi, CTC_STATE_TXIDLE); 320 } 321 ctcm_clear_busy_do(dev); 322 } 323 324 /* 325 * Initial data is sent. 326 * Notify device statemachine that we are up and 327 * running. 328 * 329 * fi An instance of a channel statemachine. 330 * event The event, just happened. 331 * arg Generic pointer, casted from channel * upon call. 332 */ 333 void ctcm_chx_txidle(fsm_instance *fi, int event, void *arg) 334 { 335 struct channel *ch = arg; 336 struct net_device *dev = ch->netdev; 337 struct ctcm_priv *priv = dev->ml_priv; 338 339 CTCM_PR_DEBUG("%s(%s): %s\n", __func__, ch->id, dev->name); 340 341 fsm_deltimer(&ch->timer); 342 fsm_newstate(fi, CTC_STATE_TXIDLE); 343 fsm_event(priv->fsm, DEV_EVENT_TXUP, ch->netdev); 344 } 345 346 /* 347 * Got normal data, check for sanity, queue it up, allocate new buffer 348 * trigger bottom half, and initiate next read. 349 * 350 * fi An instance of a channel statemachine. 351 * event The event, just happened. 352 * arg Generic pointer, casted from channel * upon call. 353 */ 354 static void chx_rx(fsm_instance *fi, int event, void *arg) 355 { 356 struct channel *ch = arg; 357 struct net_device *dev = ch->netdev; 358 struct ctcm_priv *priv = dev->ml_priv; 359 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 360 struct sk_buff *skb = ch->trans_skb; 361 __u16 block_len = *((__u16 *)skb->data); 362 int check_len; 363 int rc; 364 365 fsm_deltimer(&ch->timer); 366 if (len < 8) { 367 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 368 "%s(%s): got packet with length %d < 8\n", 369 CTCM_FUNTAIL, dev->name, len); 370 priv->stats.rx_dropped++; 371 priv->stats.rx_length_errors++; 372 goto again; 373 } 374 if (len > ch->max_bufsize) { 375 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 376 "%s(%s): got packet with length %d > %d\n", 377 CTCM_FUNTAIL, dev->name, len, ch->max_bufsize); 378 priv->stats.rx_dropped++; 379 priv->stats.rx_length_errors++; 380 goto again; 381 } 382 383 /* 384 * VM TCP seems to have a bug sending 2 trailing bytes of garbage. 385 */ 386 switch (ch->protocol) { 387 case CTCM_PROTO_S390: 388 case CTCM_PROTO_OS390: 389 check_len = block_len + 2; 390 break; 391 default: 392 check_len = block_len; 393 break; 394 } 395 if ((len < block_len) || (len > check_len)) { 396 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 397 "%s(%s): got block length %d != rx length %d\n", 398 CTCM_FUNTAIL, dev->name, block_len, len); 399 if (do_debug) 400 ctcmpc_dump_skb(skb, 0); 401 402 *((__u16 *)skb->data) = len; 403 priv->stats.rx_dropped++; 404 priv->stats.rx_length_errors++; 405 goto again; 406 } 407 if (block_len > 2) { 408 *((__u16 *)skb->data) = block_len - 2; 409 ctcm_unpack_skb(ch, skb); 410 } 411 again: 412 skb->data = ch->trans_skb_data; 413 skb_reset_tail_pointer(skb); 414 skb->len = 0; 415 if (ctcm_checkalloc_buffer(ch)) 416 return; 417 ch->ccw[1].count = ch->max_bufsize; 418 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 419 if (rc != 0) 420 ctcm_ccw_check_rc(ch, rc, "normal RX"); 421 } 422 423 /* 424 * Initialize connection by sending a __u16 of value 0. 425 * 426 * fi An instance of a channel statemachine. 427 * event The event, just happened. 428 * arg Generic pointer, casted from channel * upon call. 429 */ 430 static void chx_firstio(fsm_instance *fi, int event, void *arg) 431 { 432 int rc; 433 struct channel *ch = arg; 434 int fsmstate = fsm_getstate(fi); 435 436 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 437 "%s(%s) : %02x", 438 CTCM_FUNTAIL, ch->id, fsmstate); 439 440 ch->sense_rc = 0; /* reset unit check report control */ 441 if (fsmstate == CTC_STATE_TXIDLE) 442 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 443 "%s(%s): remote side issued READ?, init.\n", 444 CTCM_FUNTAIL, ch->id); 445 fsm_deltimer(&ch->timer); 446 if (ctcm_checkalloc_buffer(ch)) 447 return; 448 if ((fsmstate == CTC_STATE_SETUPWAIT) && 449 (ch->protocol == CTCM_PROTO_OS390)) { 450 /* OS/390 resp. z/OS */ 451 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 452 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 453 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, 454 CTC_EVENT_TIMER, ch); 455 chx_rxidle(fi, event, arg); 456 } else { 457 struct net_device *dev = ch->netdev; 458 struct ctcm_priv *priv = dev->ml_priv; 459 fsm_newstate(fi, CTC_STATE_TXIDLE); 460 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 461 } 462 return; 463 } 464 /* 465 * Don't setup a timer for receiving the initial RX frame 466 * if in compatibility mode, since VM TCP delays the initial 467 * frame until it has some data to send. 468 */ 469 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_WRITE) || 470 (ch->protocol != CTCM_PROTO_S390)) 471 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 472 473 *((__u16 *)ch->trans_skb->data) = CTCM_INITIAL_BLOCKLEN; 474 ch->ccw[1].count = 2; /* Transfer only length */ 475 476 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 477 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 478 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 479 if (rc != 0) { 480 fsm_deltimer(&ch->timer); 481 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 482 ctcm_ccw_check_rc(ch, rc, "init IO"); 483 } 484 /* 485 * If in compatibility mode since we don't setup a timer, we 486 * also signal RX channel up immediately. This enables us 487 * to send packets early which in turn usually triggers some 488 * reply from VM TCP which brings up the RX channel to it's 489 * final state. 490 */ 491 if ((CHANNEL_DIRECTION(ch->flags) == CTCM_READ) && 492 (ch->protocol == CTCM_PROTO_S390)) { 493 struct net_device *dev = ch->netdev; 494 struct ctcm_priv *priv = dev->ml_priv; 495 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 496 } 497 } 498 499 /* 500 * Got initial data, check it. If OK, 501 * notify device statemachine that we are up and 502 * running. 503 * 504 * fi An instance of a channel statemachine. 505 * event The event, just happened. 506 * arg Generic pointer, casted from channel * upon call. 507 */ 508 static void chx_rxidle(fsm_instance *fi, int event, void *arg) 509 { 510 struct channel *ch = arg; 511 struct net_device *dev = ch->netdev; 512 struct ctcm_priv *priv = dev->ml_priv; 513 __u16 buflen; 514 int rc; 515 516 fsm_deltimer(&ch->timer); 517 buflen = *((__u16 *)ch->trans_skb->data); 518 CTCM_PR_DEBUG("%s: %s: Initial RX count = %d\n", 519 __func__, dev->name, buflen); 520 521 if (buflen >= CTCM_INITIAL_BLOCKLEN) { 522 if (ctcm_checkalloc_buffer(ch)) 523 return; 524 ch->ccw[1].count = ch->max_bufsize; 525 fsm_newstate(fi, CTC_STATE_RXIDLE); 526 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 527 if (rc != 0) { 528 fsm_newstate(fi, CTC_STATE_RXINIT); 529 ctcm_ccw_check_rc(ch, rc, "initial RX"); 530 } else 531 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 532 } else { 533 CTCM_PR_DEBUG("%s: %s: Initial RX count %d not %d\n", 534 __func__, dev->name, 535 buflen, CTCM_INITIAL_BLOCKLEN); 536 chx_firstio(fi, event, arg); 537 } 538 } 539 540 /* 541 * Set channel into extended mode. 542 * 543 * fi An instance of a channel statemachine. 544 * event The event, just happened. 545 * arg Generic pointer, casted from channel * upon call. 546 */ 547 static void ctcm_chx_setmode(fsm_instance *fi, int event, void *arg) 548 { 549 struct channel *ch = arg; 550 int rc; 551 unsigned long saveflags = 0; 552 int timeout = CTCM_TIME_5_SEC; 553 554 fsm_deltimer(&ch->timer); 555 if (IS_MPC(ch)) { 556 timeout = 1500; 557 CTCM_PR_DEBUG("enter %s: cp=%i ch=0x%p id=%s\n", 558 __func__, smp_processor_id(), ch, ch->id); 559 } 560 fsm_addtimer(&ch->timer, timeout, CTC_EVENT_TIMER, ch); 561 fsm_newstate(fi, CTC_STATE_SETUPWAIT); 562 CTCM_CCW_DUMP((char *)&ch->ccw[6], sizeof(struct ccw1) * 2); 563 564 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 565 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 566 /* Such conditional locking is undeterministic in 567 * static view. => ignore sparse warnings here. */ 568 569 rc = ccw_device_start(ch->cdev, &ch->ccw[6], 0, 0xff, 0); 570 if (event == CTC_EVENT_TIMER) /* see above comments */ 571 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 572 if (rc != 0) { 573 fsm_deltimer(&ch->timer); 574 fsm_newstate(fi, CTC_STATE_STARTWAIT); 575 ctcm_ccw_check_rc(ch, rc, "set Mode"); 576 } else 577 ch->retry = 0; 578 } 579 580 /* 581 * Setup channel. 582 * 583 * fi An instance of a channel statemachine. 584 * event The event, just happened. 585 * arg Generic pointer, casted from channel * upon call. 586 */ 587 static void ctcm_chx_start(fsm_instance *fi, int event, void *arg) 588 { 589 struct channel *ch = arg; 590 unsigned long saveflags; 591 int rc; 592 593 CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, "%s(%s): %s", 594 CTCM_FUNTAIL, ch->id, 595 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX"); 596 597 if (ch->trans_skb != NULL) { 598 clear_normalized_cda(&ch->ccw[1]); 599 dev_kfree_skb(ch->trans_skb); 600 ch->trans_skb = NULL; 601 } 602 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 603 ch->ccw[1].cmd_code = CCW_CMD_READ; 604 ch->ccw[1].flags = CCW_FLAG_SLI; 605 ch->ccw[1].count = 0; 606 } else { 607 ch->ccw[1].cmd_code = CCW_CMD_WRITE; 608 ch->ccw[1].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 609 ch->ccw[1].count = 0; 610 } 611 if (ctcm_checkalloc_buffer(ch)) { 612 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 613 "%s(%s): %s trans_skb alloc delayed " 614 "until first transfer", 615 CTCM_FUNTAIL, ch->id, 616 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? 617 "RX" : "TX"); 618 } 619 ch->ccw[0].cmd_code = CCW_CMD_PREPARE; 620 ch->ccw[0].flags = CCW_FLAG_SLI | CCW_FLAG_CC; 621 ch->ccw[0].count = 0; 622 ch->ccw[0].cda = 0; 623 ch->ccw[2].cmd_code = CCW_CMD_NOOP; /* jointed CE + DE */ 624 ch->ccw[2].flags = CCW_FLAG_SLI; 625 ch->ccw[2].count = 0; 626 ch->ccw[2].cda = 0; 627 memcpy(&ch->ccw[3], &ch->ccw[0], sizeof(struct ccw1) * 3); 628 ch->ccw[4].cda = 0; 629 ch->ccw[4].flags &= ~CCW_FLAG_IDA; 630 631 fsm_newstate(fi, CTC_STATE_STARTWAIT); 632 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 633 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 634 rc = ccw_device_halt(ch->cdev, 0); 635 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 636 if (rc != 0) { 637 if (rc != -EBUSY) 638 fsm_deltimer(&ch->timer); 639 ctcm_ccw_check_rc(ch, rc, "initial HaltIO"); 640 } 641 } 642 643 /* 644 * Shutdown a channel. 645 * 646 * fi An instance of a channel statemachine. 647 * event The event, just happened. 648 * arg Generic pointer, casted from channel * upon call. 649 */ 650 static void ctcm_chx_haltio(fsm_instance *fi, int event, void *arg) 651 { 652 struct channel *ch = arg; 653 unsigned long saveflags = 0; 654 int rc; 655 int oldstate; 656 657 fsm_deltimer(&ch->timer); 658 if (IS_MPC(ch)) 659 fsm_deltimer(&ch->sweep_timer); 660 661 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 662 663 if (event == CTC_EVENT_STOP) /* only for STOP not yet locked */ 664 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 665 /* Such conditional locking is undeterministic in 666 * static view. => ignore sparse warnings here. */ 667 oldstate = fsm_getstate(fi); 668 fsm_newstate(fi, CTC_STATE_TERM); 669 rc = ccw_device_halt(ch->cdev, 0); 670 671 if (event == CTC_EVENT_STOP) 672 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 673 /* see remark above about conditional locking */ 674 675 if (rc != 0 && rc != -EBUSY) { 676 fsm_deltimer(&ch->timer); 677 if (event != CTC_EVENT_STOP) { 678 fsm_newstate(fi, oldstate); 679 ctcm_ccw_check_rc(ch, rc, (char *)__func__); 680 } 681 } 682 } 683 684 /* 685 * Cleanup helper for chx_fail and chx_stopped 686 * cleanup channels queue and notify interface statemachine. 687 * 688 * fi An instance of a channel statemachine. 689 * state The next state (depending on caller). 690 * ch The channel to operate on. 691 */ 692 static void ctcm_chx_cleanup(fsm_instance *fi, int state, 693 struct channel *ch) 694 { 695 struct net_device *dev = ch->netdev; 696 struct ctcm_priv *priv = dev->ml_priv; 697 698 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 699 "%s(%s): %s[%d]\n", 700 CTCM_FUNTAIL, dev->name, ch->id, state); 701 702 fsm_deltimer(&ch->timer); 703 if (IS_MPC(ch)) 704 fsm_deltimer(&ch->sweep_timer); 705 706 fsm_newstate(fi, state); 707 if (state == CTC_STATE_STOPPED && ch->trans_skb != NULL) { 708 clear_normalized_cda(&ch->ccw[1]); 709 dev_kfree_skb_any(ch->trans_skb); 710 ch->trans_skb = NULL; 711 } 712 713 ch->th_seg = 0x00; 714 ch->th_seq_num = 0x00; 715 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 716 skb_queue_purge(&ch->io_queue); 717 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 718 } else { 719 ctcm_purge_skb_queue(&ch->io_queue); 720 if (IS_MPC(ch)) 721 ctcm_purge_skb_queue(&ch->sweep_queue); 722 spin_lock(&ch->collect_lock); 723 ctcm_purge_skb_queue(&ch->collect_queue); 724 ch->collect_len = 0; 725 spin_unlock(&ch->collect_lock); 726 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 727 } 728 } 729 730 /* 731 * A channel has successfully been halted. 732 * Cleanup it's queue and notify interface statemachine. 733 * 734 * fi An instance of a channel statemachine. 735 * event The event, just happened. 736 * arg Generic pointer, casted from channel * upon call. 737 */ 738 static void ctcm_chx_stopped(fsm_instance *fi, int event, void *arg) 739 { 740 ctcm_chx_cleanup(fi, CTC_STATE_STOPPED, arg); 741 } 742 743 /* 744 * A stop command from device statemachine arrived and we are in 745 * not operational mode. Set state to stopped. 746 * 747 * fi An instance of a channel statemachine. 748 * event The event, just happened. 749 * arg Generic pointer, casted from channel * upon call. 750 */ 751 static void ctcm_chx_stop(fsm_instance *fi, int event, void *arg) 752 { 753 fsm_newstate(fi, CTC_STATE_STOPPED); 754 } 755 756 /* 757 * A machine check for no path, not operational status or gone device has 758 * happened. 759 * Cleanup queue and notify interface statemachine. 760 * 761 * fi An instance of a channel statemachine. 762 * event The event, just happened. 763 * arg Generic pointer, casted from channel * upon call. 764 */ 765 static void ctcm_chx_fail(fsm_instance *fi, int event, void *arg) 766 { 767 ctcm_chx_cleanup(fi, CTC_STATE_NOTOP, arg); 768 } 769 770 /* 771 * Handle error during setup of channel. 772 * 773 * fi An instance of a channel statemachine. 774 * event The event, just happened. 775 * arg Generic pointer, casted from channel * upon call. 776 */ 777 static void ctcm_chx_setuperr(fsm_instance *fi, int event, void *arg) 778 { 779 struct channel *ch = arg; 780 struct net_device *dev = ch->netdev; 781 struct ctcm_priv *priv = dev->ml_priv; 782 783 /* 784 * Special case: Got UC_RCRESET on setmode. 785 * This means that remote side isn't setup. In this case 786 * simply retry after some 10 secs... 787 */ 788 if ((fsm_getstate(fi) == CTC_STATE_SETUPWAIT) && 789 ((event == CTC_EVENT_UC_RCRESET) || 790 (event == CTC_EVENT_UC_RSRESET))) { 791 fsm_newstate(fi, CTC_STATE_STARTRETRY); 792 fsm_deltimer(&ch->timer); 793 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 794 if (!IS_MPC(ch) && 795 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ)) { 796 int rc = ccw_device_halt(ch->cdev, 0); 797 if (rc != 0) 798 ctcm_ccw_check_rc(ch, rc, 799 "HaltIO in chx_setuperr"); 800 } 801 return; 802 } 803 804 CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT, 805 "%s(%s) : %s error during %s channel setup state=%s\n", 806 CTCM_FUNTAIL, dev->name, ctc_ch_event_names[event], 807 (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) ? "RX" : "TX", 808 fsm_getstate_str(fi)); 809 810 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 811 fsm_newstate(fi, CTC_STATE_RXERR); 812 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 813 } else { 814 fsm_newstate(fi, CTC_STATE_TXERR); 815 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 816 } 817 } 818 819 /* 820 * Restart a channel after an error. 821 * 822 * fi An instance of a channel statemachine. 823 * event The event, just happened. 824 * arg Generic pointer, casted from channel * upon call. 825 */ 826 static void ctcm_chx_restart(fsm_instance *fi, int event, void *arg) 827 { 828 struct channel *ch = arg; 829 struct net_device *dev = ch->netdev; 830 unsigned long saveflags = 0; 831 int oldstate; 832 int rc; 833 834 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 835 "%s: %s[%d] of %s\n", 836 CTCM_FUNTAIL, ch->id, event, dev->name); 837 838 fsm_deltimer(&ch->timer); 839 840 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 841 oldstate = fsm_getstate(fi); 842 fsm_newstate(fi, CTC_STATE_STARTWAIT); 843 if (event == CTC_EVENT_TIMER) /* only for timer not yet locked */ 844 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 845 /* Such conditional locking is a known problem for 846 * sparse because its undeterministic in static view. 847 * Warnings should be ignored here. */ 848 rc = ccw_device_halt(ch->cdev, 0); 849 if (event == CTC_EVENT_TIMER) 850 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), saveflags); 851 if (rc != 0) { 852 if (rc != -EBUSY) { 853 fsm_deltimer(&ch->timer); 854 fsm_newstate(fi, oldstate); 855 } 856 ctcm_ccw_check_rc(ch, rc, "HaltIO in ctcm_chx_restart"); 857 } 858 } 859 860 /* 861 * Handle error during RX initial handshake (exchange of 862 * 0-length block header) 863 * 864 * fi An instance of a channel statemachine. 865 * event The event, just happened. 866 * arg Generic pointer, casted from channel * upon call. 867 */ 868 static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) 869 { 870 struct channel *ch = arg; 871 struct net_device *dev = ch->netdev; 872 struct ctcm_priv *priv = dev->ml_priv; 873 874 if (event == CTC_EVENT_TIMER) { 875 if (!IS_MPCDEV(dev)) 876 /* TODO : check if MPC deletes timer somewhere */ 877 fsm_deltimer(&ch->timer); 878 if (ch->retry++ < 3) 879 ctcm_chx_restart(fi, event, arg); 880 else { 881 fsm_newstate(fi, CTC_STATE_RXERR); 882 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 883 } 884 } else if (event == CTC_EVENT_UC_RCRESET) { 885 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 886 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 887 ctc_ch_event_names[event], fsm_getstate_str(fi)); 888 889 dev_info(&dev->dev, 890 "Init handshake not received, peer not ready yet\n"); 891 } else { 892 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 893 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 894 ctc_ch_event_names[event], fsm_getstate_str(fi)); 895 896 dev_warn(&dev->dev, 897 "Initialization failed with RX/TX init handshake " 898 "error %s\n", ctc_ch_event_names[event]); 899 } 900 } 901 902 /* 903 * Notify device statemachine if we gave up initialization 904 * of RX channel. 905 * 906 * fi An instance of a channel statemachine. 907 * event The event, just happened. 908 * arg Generic pointer, casted from channel * upon call. 909 */ 910 static void ctcm_chx_rxinitfail(fsm_instance *fi, int event, void *arg) 911 { 912 struct channel *ch = arg; 913 struct net_device *dev = ch->netdev; 914 struct ctcm_priv *priv = dev->ml_priv; 915 916 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 917 "%s(%s): RX %s busy, init. fail", 918 CTCM_FUNTAIL, dev->name, ch->id); 919 fsm_newstate(fi, CTC_STATE_RXERR); 920 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 921 } 922 923 /* 924 * Handle RX Unit check remote reset (remote disconnected) 925 * 926 * fi An instance of a channel statemachine. 927 * event The event, just happened. 928 * arg Generic pointer, casted from channel * upon call. 929 */ 930 static void ctcm_chx_rxdisc(fsm_instance *fi, int event, void *arg) 931 { 932 struct channel *ch = arg; 933 struct channel *ch2; 934 struct net_device *dev = ch->netdev; 935 struct ctcm_priv *priv = dev->ml_priv; 936 937 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 938 "%s: %s: remote disconnect - re-init ...", 939 CTCM_FUNTAIL, dev->name); 940 fsm_deltimer(&ch->timer); 941 /* 942 * Notify device statemachine 943 */ 944 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 945 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 946 947 fsm_newstate(fi, CTC_STATE_DTERM); 948 ch2 = priv->channel[CTCM_WRITE]; 949 fsm_newstate(ch2->fsm, CTC_STATE_DTERM); 950 951 ccw_device_halt(ch->cdev, 0); 952 ccw_device_halt(ch2->cdev, 0); 953 } 954 955 /* 956 * Handle error during TX channel initialization. 957 * 958 * fi An instance of a channel statemachine. 959 * event The event, just happened. 960 * arg Generic pointer, casted from channel * upon call. 961 */ 962 static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) 963 { 964 struct channel *ch = arg; 965 struct net_device *dev = ch->netdev; 966 struct ctcm_priv *priv = dev->ml_priv; 967 968 if (event == CTC_EVENT_TIMER) { 969 fsm_deltimer(&ch->timer); 970 if (ch->retry++ < 3) 971 ctcm_chx_restart(fi, event, arg); 972 else { 973 fsm_newstate(fi, CTC_STATE_TXERR); 974 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 975 } 976 } else if (event == CTC_EVENT_UC_RCRESET) { 977 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 978 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 979 ctc_ch_event_names[event], fsm_getstate_str(fi)); 980 981 dev_info(&dev->dev, 982 "Init handshake not sent, peer not ready yet\n"); 983 } else { 984 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 985 "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, 986 ctc_ch_event_names[event], fsm_getstate_str(fi)); 987 988 dev_warn(&dev->dev, 989 "Initialization failed with RX/TX init handshake " 990 "error %s\n", ctc_ch_event_names[event]); 991 } 992 } 993 994 /* 995 * Handle TX timeout by retrying operation. 996 * 997 * fi An instance of a channel statemachine. 998 * event The event, just happened. 999 * arg Generic pointer, casted from channel * upon call. 1000 */ 1001 static void ctcm_chx_txretry(fsm_instance *fi, int event, void *arg) 1002 { 1003 struct channel *ch = arg; 1004 struct net_device *dev = ch->netdev; 1005 struct ctcm_priv *priv = dev->ml_priv; 1006 struct sk_buff *skb; 1007 1008 CTCM_PR_DEBUG("Enter: %s: cp=%i ch=0x%p id=%s\n", 1009 __func__, smp_processor_id(), ch, ch->id); 1010 1011 fsm_deltimer(&ch->timer); 1012 if (ch->retry++ > 3) { 1013 struct mpc_group *gptr = priv->mpcg; 1014 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1015 "%s: %s: retries exceeded", 1016 CTCM_FUNTAIL, ch->id); 1017 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1018 /* call restart if not MPC or if MPC and mpcg fsm is ready. 1019 use gptr as mpc indicator */ 1020 if (!(gptr && (fsm_getstate(gptr->fsm) != MPCG_STATE_READY))) 1021 ctcm_chx_restart(fi, event, arg); 1022 goto done; 1023 } 1024 1025 CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, 1026 "%s : %s: retry %d", 1027 CTCM_FUNTAIL, ch->id, ch->retry); 1028 skb = skb_peek(&ch->io_queue); 1029 if (skb) { 1030 int rc = 0; 1031 unsigned long saveflags = 0; 1032 clear_normalized_cda(&ch->ccw[4]); 1033 ch->ccw[4].count = skb->len; 1034 if (set_normalized_cda(&ch->ccw[4], skb->data)) { 1035 CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, 1036 "%s: %s: IDAL alloc failed", 1037 CTCM_FUNTAIL, ch->id); 1038 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1039 ctcm_chx_restart(fi, event, arg); 1040 goto done; 1041 } 1042 fsm_addtimer(&ch->timer, 1000, CTC_EVENT_TIMER, ch); 1043 if (event == CTC_EVENT_TIMER) /* for TIMER not yet locked */ 1044 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1045 /* Such conditional locking is a known problem for 1046 * sparse because its undeterministic in static view. 1047 * Warnings should be ignored here. */ 1048 if (do_debug_ccw) 1049 ctcmpc_dumpit((char *)&ch->ccw[3], 1050 sizeof(struct ccw1) * 3); 1051 1052 rc = ccw_device_start(ch->cdev, &ch->ccw[3], 0, 0xff, 0); 1053 if (event == CTC_EVENT_TIMER) 1054 spin_unlock_irqrestore(get_ccwdev_lock(ch->cdev), 1055 saveflags); 1056 if (rc != 0) { 1057 fsm_deltimer(&ch->timer); 1058 ctcm_ccw_check_rc(ch, rc, "TX in chx_txretry"); 1059 ctcm_purge_skb_queue(&ch->io_queue); 1060 } 1061 } 1062 done: 1063 return; 1064 } 1065 1066 /* 1067 * Handle fatal errors during an I/O command. 1068 * 1069 * fi An instance of a channel statemachine. 1070 * event The event, just happened. 1071 * arg Generic pointer, casted from channel * upon call. 1072 */ 1073 static void ctcm_chx_iofatal(fsm_instance *fi, int event, void *arg) 1074 { 1075 struct channel *ch = arg; 1076 struct net_device *dev = ch->netdev; 1077 struct ctcm_priv *priv = dev->ml_priv; 1078 int rd = CHANNEL_DIRECTION(ch->flags); 1079 1080 fsm_deltimer(&ch->timer); 1081 CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, 1082 "%s: %s: %s unrecoverable channel error", 1083 CTCM_FUNTAIL, ch->id, rd == CTCM_READ ? "RX" : "TX"); 1084 1085 if (IS_MPC(ch)) { 1086 priv->stats.tx_dropped++; 1087 priv->stats.tx_errors++; 1088 } 1089 if (rd == CTCM_READ) { 1090 fsm_newstate(fi, CTC_STATE_RXERR); 1091 fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); 1092 } else { 1093 fsm_newstate(fi, CTC_STATE_TXERR); 1094 fsm_event(priv->fsm, DEV_EVENT_TXDOWN, dev); 1095 } 1096 } 1097 1098 /* 1099 * The ctcm statemachine for a channel. 1100 */ 1101 const fsm_node ch_fsm[] = { 1102 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1103 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1104 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1105 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1106 1107 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1108 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1109 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1110 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1111 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1112 1113 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1114 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1115 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1116 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1117 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1118 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1119 1120 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1121 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1122 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1123 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1124 1125 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1126 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1127 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, chx_firstio }, 1128 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1129 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1130 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1131 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1132 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1133 1134 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1135 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1136 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, chx_rxidle }, 1137 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1138 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1139 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1140 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1141 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1142 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, chx_firstio }, 1143 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1144 1145 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1146 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1147 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, chx_rx }, 1148 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1149 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1150 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1151 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, chx_rx }, 1152 1153 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1154 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 1155 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 1156 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 1157 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 1158 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 1159 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1160 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1161 1162 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1163 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1164 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, chx_firstio }, 1165 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1166 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1167 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1168 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1169 1170 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 1171 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 1172 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 1173 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1174 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1175 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1176 1177 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 1178 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 1179 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1180 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 1181 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 1182 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1183 1184 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 1185 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 1186 { CTC_STATE_TX, CTC_EVENT_FINSTAT, chx_txdone }, 1187 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_txretry }, 1188 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_txretry }, 1189 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 1190 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1191 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1192 1193 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1194 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 1195 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1196 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1197 }; 1198 1199 int ch_fsm_len = ARRAY_SIZE(ch_fsm); 1200 1201 /* 1202 * MPC actions for mpc channel statemachine 1203 * handling of MPC protocol requires extra 1204 * statemachine and actions which are prefixed ctcmpc_ . 1205 * The ctc_ch_states and ctc_ch_state_names, 1206 * ctc_ch_events and ctc_ch_event_names share the ctcm definitions 1207 * which are expanded by some elements. 1208 */ 1209 1210 /* 1211 * Actions for mpc channel statemachine. 1212 */ 1213 1214 /* 1215 * Normal data has been send. Free the corresponding 1216 * skb (it's in io_queue), reset dev->tbusy and 1217 * revert to idle state. 1218 * 1219 * fi An instance of a channel statemachine. 1220 * event The event, just happened. 1221 * arg Generic pointer, casted from channel * upon call. 1222 */ 1223 static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg) 1224 { 1225 struct channel *ch = arg; 1226 struct net_device *dev = ch->netdev; 1227 struct ctcm_priv *priv = dev->ml_priv; 1228 struct mpc_group *grp = priv->mpcg; 1229 struct sk_buff *skb; 1230 int first = 1; 1231 int i; 1232 __u32 data_space; 1233 unsigned long duration; 1234 struct sk_buff *peekskb; 1235 int rc; 1236 struct th_header *header; 1237 struct pdu *p_header; 1238 unsigned long done_stamp = jiffies; 1239 1240 CTCM_PR_DEBUG("Enter %s: %s cp:%i\n", 1241 __func__, dev->name, smp_processor_id()); 1242 1243 duration = done_stamp - ch->prof.send_stamp; 1244 if (duration > ch->prof.tx_time) 1245 ch->prof.tx_time = duration; 1246 1247 if (ch->irb->scsw.cmd.count != 0) 1248 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_DEBUG, 1249 "%s(%s): TX not complete, remaining %d bytes", 1250 CTCM_FUNTAIL, dev->name, ch->irb->scsw.cmd.count); 1251 fsm_deltimer(&ch->timer); 1252 while ((skb = skb_dequeue(&ch->io_queue))) { 1253 priv->stats.tx_packets++; 1254 priv->stats.tx_bytes += skb->len - TH_HEADER_LENGTH; 1255 if (first) { 1256 priv->stats.tx_bytes += 2; 1257 first = 0; 1258 } 1259 refcount_dec(&skb->users); 1260 dev_kfree_skb_irq(skb); 1261 } 1262 spin_lock(&ch->collect_lock); 1263 clear_normalized_cda(&ch->ccw[4]); 1264 if ((ch->collect_len <= 0) || (grp->in_sweep != 0)) { 1265 spin_unlock(&ch->collect_lock); 1266 fsm_newstate(fi, CTC_STATE_TXIDLE); 1267 goto done; 1268 } 1269 1270 if (ctcm_checkalloc_buffer(ch)) { 1271 spin_unlock(&ch->collect_lock); 1272 goto done; 1273 } 1274 ch->trans_skb->data = ch->trans_skb_data; 1275 skb_reset_tail_pointer(ch->trans_skb); 1276 ch->trans_skb->len = 0; 1277 if (ch->prof.maxmulti < (ch->collect_len + TH_HEADER_LENGTH)) 1278 ch->prof.maxmulti = ch->collect_len + TH_HEADER_LENGTH; 1279 if (ch->prof.maxcqueue < skb_queue_len(&ch->collect_queue)) 1280 ch->prof.maxcqueue = skb_queue_len(&ch->collect_queue); 1281 i = 0; 1282 p_header = NULL; 1283 data_space = grp->group_max_buflen - TH_HEADER_LENGTH; 1284 1285 CTCM_PR_DBGDATA("%s: building trans_skb from collect_q" 1286 " data_space:%04x\n", 1287 __func__, data_space); 1288 1289 while ((skb = skb_dequeue(&ch->collect_queue))) { 1290 skb_put_data(ch->trans_skb, skb->data, skb->len); 1291 p_header = (struct pdu *) 1292 (skb_tail_pointer(ch->trans_skb) - skb->len); 1293 p_header->pdu_flag = 0x00; 1294 if (be16_to_cpu(skb->protocol) == ETH_P_SNAP) 1295 p_header->pdu_flag |= 0x60; 1296 else 1297 p_header->pdu_flag |= 0x20; 1298 1299 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1300 __func__, ch->trans_skb->len); 1301 CTCM_PR_DBGDATA("%s: pdu header and data for up" 1302 " to 32 bytes sent to vtam\n", __func__); 1303 CTCM_D3_DUMP((char *)p_header, min_t(int, skb->len, 32)); 1304 1305 ch->collect_len -= skb->len; 1306 data_space -= skb->len; 1307 priv->stats.tx_packets++; 1308 priv->stats.tx_bytes += skb->len; 1309 refcount_dec(&skb->users); 1310 dev_kfree_skb_any(skb); 1311 peekskb = skb_peek(&ch->collect_queue); 1312 if (peekskb->len > data_space) 1313 break; 1314 i++; 1315 } 1316 /* p_header points to the last one we handled */ 1317 if (p_header) 1318 p_header->pdu_flag |= PDU_LAST; /*Say it's the last one*/ 1319 1320 header = skb_push(ch->trans_skb, TH_HEADER_LENGTH); 1321 memset(header, 0, TH_HEADER_LENGTH); 1322 1323 header->th_ch_flag = TH_HAS_PDU; /* Normal data */ 1324 ch->th_seq_num++; 1325 header->th_seq_num = ch->th_seq_num; 1326 1327 CTCM_PR_DBGDATA("%s: ToVTAM_th_seq= %08x\n" , 1328 __func__, ch->th_seq_num); 1329 1330 CTCM_PR_DBGDATA("%s: trans_skb len:%04x \n", 1331 __func__, ch->trans_skb->len); 1332 CTCM_PR_DBGDATA("%s: up-to-50 bytes of trans_skb " 1333 "data to vtam from collect_q\n", __func__); 1334 CTCM_D3_DUMP((char *)ch->trans_skb->data, 1335 min_t(int, ch->trans_skb->len, 50)); 1336 1337 spin_unlock(&ch->collect_lock); 1338 clear_normalized_cda(&ch->ccw[1]); 1339 1340 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1341 (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1342 ch->trans_skb->data); 1343 ch->ccw[1].count = ch->max_bufsize; 1344 1345 if (set_normalized_cda(&ch->ccw[1], ch->trans_skb->data)) { 1346 dev_kfree_skb_any(ch->trans_skb); 1347 ch->trans_skb = NULL; 1348 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_ERROR, 1349 "%s: %s: IDAL alloc failed", 1350 CTCM_FUNTAIL, ch->id); 1351 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1352 return; 1353 } 1354 1355 CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n", 1356 (void *)(u64)dma32_to_u32(ch->ccw[1].cda), 1357 ch->trans_skb->data); 1358 1359 ch->ccw[1].count = ch->trans_skb->len; 1360 fsm_addtimer(&ch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, ch); 1361 ch->prof.send_stamp = jiffies; 1362 if (do_debug_ccw) 1363 ctcmpc_dumpit((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1364 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1365 ch->prof.doios_multi++; 1366 if (rc != 0) { 1367 priv->stats.tx_dropped += i; 1368 priv->stats.tx_errors += i; 1369 fsm_deltimer(&ch->timer); 1370 ctcm_ccw_check_rc(ch, rc, "chained TX"); 1371 } 1372 done: 1373 ctcm_clear_busy(dev); 1374 return; 1375 } 1376 1377 /* 1378 * Got normal data, check for sanity, queue it up, allocate new buffer 1379 * trigger bottom half, and initiate next read. 1380 * 1381 * fi An instance of a channel statemachine. 1382 * event The event, just happened. 1383 * arg Generic pointer, casted from channel * upon call. 1384 */ 1385 static void ctcmpc_chx_rx(fsm_instance *fi, int event, void *arg) 1386 { 1387 struct channel *ch = arg; 1388 struct net_device *dev = ch->netdev; 1389 struct ctcm_priv *priv = dev->ml_priv; 1390 struct mpc_group *grp = priv->mpcg; 1391 struct sk_buff *skb = ch->trans_skb; 1392 struct sk_buff *new_skb; 1393 unsigned long saveflags = 0; /* avoids compiler warning */ 1394 int len = ch->max_bufsize - ch->irb->scsw.cmd.count; 1395 1396 CTCM_PR_DEBUG("%s: %s: cp:%i %s maxbuf : %04x, len: %04x\n", 1397 CTCM_FUNTAIL, dev->name, smp_processor_id(), 1398 ch->id, ch->max_bufsize, len); 1399 fsm_deltimer(&ch->timer); 1400 1401 if (skb == NULL) { 1402 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1403 "%s(%s): TRANS_SKB = NULL", 1404 CTCM_FUNTAIL, dev->name); 1405 goto again; 1406 } 1407 1408 if (len < TH_HEADER_LENGTH) { 1409 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1410 "%s(%s): packet length %d too short", 1411 CTCM_FUNTAIL, dev->name, len); 1412 priv->stats.rx_dropped++; 1413 priv->stats.rx_length_errors++; 1414 } else { 1415 /* must have valid th header or game over */ 1416 __u32 block_len = len; 1417 len = TH_HEADER_LENGTH + XID2_LENGTH + 4; 1418 new_skb = __dev_alloc_skb(ch->max_bufsize, GFP_ATOMIC); 1419 1420 if (new_skb == NULL) { 1421 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1422 "%s(%s): skb allocation failed", 1423 CTCM_FUNTAIL, dev->name); 1424 fsm_event(priv->mpcg->fsm, MPCG_EVENT_INOP, dev); 1425 goto again; 1426 } 1427 switch (fsm_getstate(grp->fsm)) { 1428 case MPCG_STATE_RESET: 1429 case MPCG_STATE_INOP: 1430 dev_kfree_skb_any(new_skb); 1431 break; 1432 case MPCG_STATE_FLOWC: 1433 case MPCG_STATE_READY: 1434 skb_put_data(new_skb, skb->data, block_len); 1435 skb_queue_tail(&ch->io_queue, new_skb); 1436 tasklet_schedule(&ch->ch_tasklet); 1437 break; 1438 default: 1439 skb_put_data(new_skb, skb->data, len); 1440 skb_queue_tail(&ch->io_queue, new_skb); 1441 tasklet_hi_schedule(&ch->ch_tasklet); 1442 break; 1443 } 1444 } 1445 1446 again: 1447 switch (fsm_getstate(grp->fsm)) { 1448 int rc, dolock; 1449 case MPCG_STATE_FLOWC: 1450 case MPCG_STATE_READY: 1451 if (ctcm_checkalloc_buffer(ch)) 1452 break; 1453 ch->trans_skb->data = ch->trans_skb_data; 1454 skb_reset_tail_pointer(ch->trans_skb); 1455 ch->trans_skb->len = 0; 1456 ch->ccw[1].count = ch->max_bufsize; 1457 if (do_debug_ccw) 1458 ctcmpc_dumpit((char *)&ch->ccw[0], 1459 sizeof(struct ccw1) * 3); 1460 dolock = !in_hardirq(); 1461 if (dolock) 1462 spin_lock_irqsave( 1463 get_ccwdev_lock(ch->cdev), saveflags); 1464 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1465 if (dolock) /* see remark about conditional locking */ 1466 spin_unlock_irqrestore( 1467 get_ccwdev_lock(ch->cdev), saveflags); 1468 if (rc != 0) 1469 ctcm_ccw_check_rc(ch, rc, "normal RX"); 1470 break; 1471 default: 1472 break; 1473 } 1474 1475 CTCM_PR_DEBUG("Exit %s: %s, ch=0x%p, id=%s\n", 1476 __func__, dev->name, ch, ch->id); 1477 1478 } 1479 1480 /* 1481 * Initialize connection by sending a __u16 of value 0. 1482 * 1483 * fi An instance of a channel statemachine. 1484 * event The event, just happened. 1485 * arg Generic pointer, casted from channel * upon call. 1486 */ 1487 static void ctcmpc_chx_firstio(fsm_instance *fi, int event, void *arg) 1488 { 1489 struct channel *ch = arg; 1490 struct net_device *dev = ch->netdev; 1491 struct ctcm_priv *priv = dev->ml_priv; 1492 struct mpc_group *gptr = priv->mpcg; 1493 1494 CTCM_PR_DEBUG("Enter %s: id=%s, ch=0x%p\n", 1495 __func__, ch->id, ch); 1496 1497 CTCM_DBF_TEXT_(MPC_TRACE, CTC_DBF_INFO, 1498 "%s: %s: chstate:%i, grpstate:%i, prot:%i\n", 1499 CTCM_FUNTAIL, ch->id, fsm_getstate(fi), 1500 fsm_getstate(gptr->fsm), ch->protocol); 1501 1502 if (fsm_getstate(fi) == CTC_STATE_TXIDLE) 1503 MPC_DBF_DEV_NAME(TRACE, dev, "remote side issued READ? "); 1504 1505 fsm_deltimer(&ch->timer); 1506 if (ctcm_checkalloc_buffer(ch)) 1507 goto done; 1508 1509 switch (fsm_getstate(fi)) { 1510 case CTC_STATE_STARTRETRY: 1511 case CTC_STATE_SETUPWAIT: 1512 if (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) { 1513 ctcmpc_chx_rxidle(fi, event, arg); 1514 } else { 1515 fsm_newstate(fi, CTC_STATE_TXIDLE); 1516 fsm_event(priv->fsm, DEV_EVENT_TXUP, dev); 1517 } 1518 goto done; 1519 default: 1520 break; 1521 } 1522 1523 fsm_newstate(fi, (CHANNEL_DIRECTION(ch->flags) == CTCM_READ) 1524 ? CTC_STATE_RXINIT : CTC_STATE_TXINIT); 1525 1526 done: 1527 CTCM_PR_DEBUG("Exit %s: id=%s, ch=0x%p\n", 1528 __func__, ch->id, ch); 1529 return; 1530 } 1531 1532 /* 1533 * Got initial data, check it. If OK, 1534 * notify device statemachine that we are up and 1535 * running. 1536 * 1537 * fi An instance of a channel statemachine. 1538 * event The event, just happened. 1539 * arg Generic pointer, casted from channel * upon call. 1540 */ 1541 void ctcmpc_chx_rxidle(fsm_instance *fi, int event, void *arg) 1542 { 1543 struct channel *ch = arg; 1544 struct net_device *dev = ch->netdev; 1545 struct ctcm_priv *priv = dev->ml_priv; 1546 struct mpc_group *grp = priv->mpcg; 1547 int rc; 1548 unsigned long saveflags = 0; /* avoids compiler warning */ 1549 1550 fsm_deltimer(&ch->timer); 1551 CTCM_PR_DEBUG("%s: %s: %s: cp:%i, chstate:%i grpstate:%i\n", 1552 __func__, ch->id, dev->name, smp_processor_id(), 1553 fsm_getstate(fi), fsm_getstate(grp->fsm)); 1554 1555 fsm_newstate(fi, CTC_STATE_RXIDLE); 1556 /* XID processing complete */ 1557 1558 switch (fsm_getstate(grp->fsm)) { 1559 case MPCG_STATE_FLOWC: 1560 case MPCG_STATE_READY: 1561 if (ctcm_checkalloc_buffer(ch)) 1562 goto done; 1563 ch->trans_skb->data = ch->trans_skb_data; 1564 skb_reset_tail_pointer(ch->trans_skb); 1565 ch->trans_skb->len = 0; 1566 ch->ccw[1].count = ch->max_bufsize; 1567 CTCM_CCW_DUMP((char *)&ch->ccw[0], sizeof(struct ccw1) * 3); 1568 if (event == CTC_EVENT_START) 1569 /* see remark about conditional locking */ 1570 spin_lock_irqsave(get_ccwdev_lock(ch->cdev), saveflags); 1571 rc = ccw_device_start(ch->cdev, &ch->ccw[0], 0, 0xff, 0); 1572 if (event == CTC_EVENT_START) 1573 spin_unlock_irqrestore( 1574 get_ccwdev_lock(ch->cdev), saveflags); 1575 if (rc != 0) { 1576 fsm_newstate(fi, CTC_STATE_RXINIT); 1577 ctcm_ccw_check_rc(ch, rc, "initial RX"); 1578 goto done; 1579 } 1580 break; 1581 default: 1582 break; 1583 } 1584 1585 fsm_event(priv->fsm, DEV_EVENT_RXUP, dev); 1586 done: 1587 return; 1588 } 1589 1590 /* 1591 * ctcmpc channel FSM action 1592 * called from several points in ctcmpc_ch_fsm 1593 * ctcmpc only 1594 */ 1595 static void ctcmpc_chx_attn(fsm_instance *fsm, int event, void *arg) 1596 { 1597 struct channel *ch = arg; 1598 struct net_device *dev = ch->netdev; 1599 struct ctcm_priv *priv = dev->ml_priv; 1600 struct mpc_group *grp = priv->mpcg; 1601 1602 CTCM_PR_DEBUG("%s(%s): %s(ch=0x%p), cp=%i, ChStat:%s, GrpStat:%s\n", 1603 __func__, dev->name, ch->id, ch, smp_processor_id(), 1604 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1605 1606 switch (fsm_getstate(grp->fsm)) { 1607 case MPCG_STATE_XID2INITW: 1608 /* ok..start yside xid exchanges */ 1609 if (!ch->in_mpcgroup) 1610 break; 1611 if (fsm_getstate(ch->fsm) == CH_XID0_PENDING) { 1612 fsm_deltimer(&grp->timer); 1613 fsm_addtimer(&grp->timer, 1614 MPC_XID_TIMEOUT_VALUE, 1615 MPCG_EVENT_TIMER, dev); 1616 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1617 1618 } else if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1619 /* attn rcvd before xid0 processed via bh */ 1620 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1621 break; 1622 case MPCG_STATE_XID2INITX: 1623 case MPCG_STATE_XID0IOWAIT: 1624 case MPCG_STATE_XID0IOWAIX: 1625 /* attn rcvd before xid0 processed on ch 1626 but mid-xid0 processing for group */ 1627 if (fsm_getstate(ch->fsm) < CH_XID7_PENDING1) 1628 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1629 break; 1630 case MPCG_STATE_XID7INITW: 1631 case MPCG_STATE_XID7INITX: 1632 case MPCG_STATE_XID7INITI: 1633 case MPCG_STATE_XID7INITZ: 1634 switch (fsm_getstate(ch->fsm)) { 1635 case CH_XID7_PENDING: 1636 fsm_newstate(ch->fsm, CH_XID7_PENDING1); 1637 break; 1638 case CH_XID7_PENDING2: 1639 fsm_newstate(ch->fsm, CH_XID7_PENDING3); 1640 break; 1641 } 1642 fsm_event(grp->fsm, MPCG_EVENT_XID7DONE, dev); 1643 break; 1644 } 1645 1646 return; 1647 } 1648 1649 /* 1650 * ctcmpc channel FSM action 1651 * called from one point in ctcmpc_ch_fsm 1652 * ctcmpc only 1653 */ 1654 static void ctcmpc_chx_attnbusy(fsm_instance *fsm, int event, void *arg) 1655 { 1656 struct channel *ch = arg; 1657 struct net_device *dev = ch->netdev; 1658 struct ctcm_priv *priv = dev->ml_priv; 1659 struct mpc_group *grp = priv->mpcg; 1660 1661 CTCM_PR_DEBUG("%s(%s): %s\n ChState:%s GrpState:%s\n", 1662 __func__, dev->name, ch->id, 1663 fsm_getstate_str(ch->fsm), fsm_getstate_str(grp->fsm)); 1664 1665 fsm_deltimer(&ch->timer); 1666 1667 switch (fsm_getstate(grp->fsm)) { 1668 case MPCG_STATE_XID0IOWAIT: 1669 /* vtam wants to be primary.start yside xid exchanges*/ 1670 /* only receive one attn-busy at a time so must not */ 1671 /* change state each time */ 1672 grp->changed_side = 1; 1673 fsm_newstate(grp->fsm, MPCG_STATE_XID2INITW); 1674 break; 1675 case MPCG_STATE_XID2INITW: 1676 if (grp->changed_side == 1) { 1677 grp->changed_side = 2; 1678 break; 1679 } 1680 /* process began via call to establish_conn */ 1681 /* so must report failure instead of reverting */ 1682 /* back to ready-for-xid passive state */ 1683 if (grp->estconnfunc) 1684 goto done; 1685 /* this attnbusy is NOT the result of xside xid */ 1686 /* collisions so yside must have been triggered */ 1687 /* by an ATTN that was not intended to start XID */ 1688 /* processing. Revert back to ready-for-xid and */ 1689 /* wait for ATTN interrupt to signal xid start */ 1690 if (fsm_getstate(ch->fsm) == CH_XID0_INPROGRESS) { 1691 fsm_newstate(ch->fsm, CH_XID0_PENDING) ; 1692 fsm_deltimer(&grp->timer); 1693 goto done; 1694 } 1695 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1696 goto done; 1697 case MPCG_STATE_XID2INITX: 1698 /* XID2 was received before ATTN Busy for second 1699 channel.Send yside xid for second channel. 1700 */ 1701 if (grp->changed_side == 1) { 1702 grp->changed_side = 2; 1703 break; 1704 } 1705 fallthrough; 1706 case MPCG_STATE_XID0IOWAIX: 1707 case MPCG_STATE_XID7INITW: 1708 case MPCG_STATE_XID7INITX: 1709 case MPCG_STATE_XID7INITI: 1710 case MPCG_STATE_XID7INITZ: 1711 default: 1712 /* multiple attn-busy indicates too out-of-sync */ 1713 /* and they are certainly not being received as part */ 1714 /* of valid mpc group negotiations.. */ 1715 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1716 goto done; 1717 } 1718 1719 if (grp->changed_side == 1) { 1720 fsm_deltimer(&grp->timer); 1721 fsm_addtimer(&grp->timer, MPC_XID_TIMEOUT_VALUE, 1722 MPCG_EVENT_TIMER, dev); 1723 } 1724 if (ch->in_mpcgroup) 1725 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1726 else 1727 CTCM_DBF_TEXT_(MPC_ERROR, CTC_DBF_ERROR, 1728 "%s(%s): channel %s not added to group", 1729 CTCM_FUNTAIL, dev->name, ch->id); 1730 1731 done: 1732 return; 1733 } 1734 1735 /* 1736 * ctcmpc channel FSM action 1737 * called from several points in ctcmpc_ch_fsm 1738 * ctcmpc only 1739 */ 1740 static void ctcmpc_chx_resend(fsm_instance *fsm, int event, void *arg) 1741 { 1742 struct channel *ch = arg; 1743 struct net_device *dev = ch->netdev; 1744 struct ctcm_priv *priv = dev->ml_priv; 1745 struct mpc_group *grp = priv->mpcg; 1746 1747 fsm_event(grp->fsm, MPCG_EVENT_XID0DO, ch); 1748 return; 1749 } 1750 1751 /* 1752 * ctcmpc channel FSM action 1753 * called from several points in ctcmpc_ch_fsm 1754 * ctcmpc only 1755 */ 1756 static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg) 1757 { 1758 struct channel *ach = arg; 1759 struct net_device *dev = ach->netdev; 1760 struct ctcm_priv *priv = dev->ml_priv; 1761 struct mpc_group *grp = priv->mpcg; 1762 struct channel *wch = priv->channel[CTCM_WRITE]; 1763 struct channel *rch = priv->channel[CTCM_READ]; 1764 struct sk_buff *skb; 1765 struct th_sweep *header; 1766 int rc = 0; 1767 unsigned long saveflags = 0; 1768 1769 CTCM_PR_DEBUG("ctcmpc enter: %s(): cp=%i ch=0x%p id=%s\n", 1770 __func__, smp_processor_id(), ach, ach->id); 1771 1772 if (grp->in_sweep == 0) 1773 goto done; 1774 1775 CTCM_PR_DBGDATA("%s: 1: ToVTAM_th_seq= %08x\n" , 1776 __func__, wch->th_seq_num); 1777 CTCM_PR_DBGDATA("%s: 1: FromVTAM_th_seq= %08x\n" , 1778 __func__, rch->th_seq_num); 1779 1780 if (fsm_getstate(wch->fsm) != CTC_STATE_TXIDLE) { 1781 /* give the previous IO time to complete */ 1782 fsm_addtimer(&wch->sweep_timer, 1783 200, CTC_EVENT_RSWEEP_TIMER, wch); 1784 goto done; 1785 } 1786 1787 skb = skb_dequeue(&wch->sweep_queue); 1788 if (!skb) 1789 goto done; 1790 1791 if (set_normalized_cda(&wch->ccw[4], skb->data)) { 1792 grp->in_sweep = 0; 1793 ctcm_clear_busy_do(dev); 1794 dev_kfree_skb_any(skb); 1795 fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); 1796 goto done; 1797 } else { 1798 refcount_inc(&skb->users); 1799 skb_queue_tail(&wch->io_queue, skb); 1800 } 1801 1802 /* send out the sweep */ 1803 wch->ccw[4].count = skb->len; 1804 1805 header = (struct th_sweep *)skb->data; 1806 switch (header->th.th_ch_flag) { 1807 case TH_SWEEP_REQ: 1808 grp->sweep_req_pend_num--; 1809 break; 1810 case TH_SWEEP_RESP: 1811 grp->sweep_rsp_pend_num--; 1812 break; 1813 } 1814 1815 header->sw.th_last_seq = wch->th_seq_num; 1816 1817 CTCM_CCW_DUMP((char *)&wch->ccw[3], sizeof(struct ccw1) * 3); 1818 CTCM_PR_DBGDATA("%s: sweep packet\n", __func__); 1819 CTCM_D3_DUMP((char *)header, TH_SWEEP_LENGTH); 1820 1821 fsm_addtimer(&wch->timer, CTCM_TIME_5_SEC, CTC_EVENT_TIMER, wch); 1822 fsm_newstate(wch->fsm, CTC_STATE_TX); 1823 1824 spin_lock_irqsave(get_ccwdev_lock(wch->cdev), saveflags); 1825 wch->prof.send_stamp = jiffies; 1826 rc = ccw_device_start(wch->cdev, &wch->ccw[3], 0, 0xff, 0); 1827 spin_unlock_irqrestore(get_ccwdev_lock(wch->cdev), saveflags); 1828 1829 if ((grp->sweep_req_pend_num == 0) && 1830 (grp->sweep_rsp_pend_num == 0)) { 1831 grp->in_sweep = 0; 1832 rch->th_seq_num = 0x00; 1833 wch->th_seq_num = 0x00; 1834 ctcm_clear_busy_do(dev); 1835 } 1836 1837 CTCM_PR_DBGDATA("%s: To-/From-VTAM_th_seq = %08x/%08x\n" , 1838 __func__, wch->th_seq_num, rch->th_seq_num); 1839 1840 if (rc != 0) 1841 ctcm_ccw_check_rc(wch, rc, "send sweep"); 1842 1843 done: 1844 return; 1845 } 1846 1847 1848 /* 1849 * The ctcmpc statemachine for a channel. 1850 */ 1851 1852 const fsm_node ctcmpc_ch_fsm[] = { 1853 { CTC_STATE_STOPPED, CTC_EVENT_STOP, ctcm_action_nop }, 1854 { CTC_STATE_STOPPED, CTC_EVENT_START, ctcm_chx_start }, 1855 { CTC_STATE_STOPPED, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1856 { CTC_STATE_STOPPED, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1857 { CTC_STATE_STOPPED, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1858 1859 { CTC_STATE_NOTOP, CTC_EVENT_STOP, ctcm_chx_stop }, 1860 { CTC_STATE_NOTOP, CTC_EVENT_START, ctcm_action_nop }, 1861 { CTC_STATE_NOTOP, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1862 { CTC_STATE_NOTOP, CTC_EVENT_MC_FAIL, ctcm_action_nop }, 1863 { CTC_STATE_NOTOP, CTC_EVENT_MC_GOOD, ctcm_chx_start }, 1864 { CTC_STATE_NOTOP, CTC_EVENT_UC_RCRESET, ctcm_chx_stop }, 1865 { CTC_STATE_NOTOP, CTC_EVENT_UC_RSRESET, ctcm_chx_stop }, 1866 { CTC_STATE_NOTOP, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1867 1868 { CTC_STATE_STARTWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1869 { CTC_STATE_STARTWAIT, CTC_EVENT_START, ctcm_action_nop }, 1870 { CTC_STATE_STARTWAIT, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1871 { CTC_STATE_STARTWAIT, CTC_EVENT_TIMER, ctcm_chx_setuperr }, 1872 { CTC_STATE_STARTWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1873 { CTC_STATE_STARTWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1874 1875 { CTC_STATE_STARTRETRY, CTC_EVENT_STOP, ctcm_chx_haltio }, 1876 { CTC_STATE_STARTRETRY, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1877 { CTC_STATE_STARTRETRY, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 1878 { CTC_STATE_STARTRETRY, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1879 { CTC_STATE_STARTRETRY, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1880 1881 { CTC_STATE_SETUPWAIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1882 { CTC_STATE_SETUPWAIT, CTC_EVENT_START, ctcm_action_nop }, 1883 { CTC_STATE_SETUPWAIT, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 1884 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1885 { CTC_STATE_SETUPWAIT, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1886 { CTC_STATE_SETUPWAIT, CTC_EVENT_TIMER, ctcm_chx_setmode }, 1887 { CTC_STATE_SETUPWAIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1888 { CTC_STATE_SETUPWAIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1889 1890 { CTC_STATE_RXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 1891 { CTC_STATE_RXINIT, CTC_EVENT_START, ctcm_action_nop }, 1892 { CTC_STATE_RXINIT, CTC_EVENT_FINSTAT, ctcmpc_chx_rxidle }, 1893 { CTC_STATE_RXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_rxiniterr }, 1894 { CTC_STATE_RXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_rxiniterr }, 1895 { CTC_STATE_RXINIT, CTC_EVENT_TIMER, ctcm_chx_rxiniterr }, 1896 { CTC_STATE_RXINIT, CTC_EVENT_ATTNBUSY, ctcm_chx_rxinitfail }, 1897 { CTC_STATE_RXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1898 { CTC_STATE_RXINIT, CTC_EVENT_UC_ZERO, ctcmpc_chx_firstio }, 1899 { CTC_STATE_RXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1900 1901 { CH_XID0_PENDING, CTC_EVENT_FINSTAT, ctcm_action_nop }, 1902 { CH_XID0_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1903 { CH_XID0_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1904 { CH_XID0_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1905 { CH_XID0_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1906 { CH_XID0_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1907 { CH_XID0_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1908 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1909 { CH_XID0_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1910 { CH_XID0_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1911 1912 { CH_XID0_INPROGRESS, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1913 { CH_XID0_INPROGRESS, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1914 { CH_XID0_INPROGRESS, CTC_EVENT_STOP, ctcm_chx_haltio }, 1915 { CH_XID0_INPROGRESS, CTC_EVENT_START, ctcm_action_nop }, 1916 { CH_XID0_INPROGRESS, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1917 { CH_XID0_INPROGRESS, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1918 { CH_XID0_INPROGRESS, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1919 { CH_XID0_INPROGRESS, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1920 { CH_XID0_INPROGRESS, CTC_EVENT_ATTNBUSY, ctcmpc_chx_attnbusy }, 1921 { CH_XID0_INPROGRESS, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1922 { CH_XID0_INPROGRESS, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1923 1924 { CH_XID7_PENDING, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1925 { CH_XID7_PENDING, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1926 { CH_XID7_PENDING, CTC_EVENT_STOP, ctcm_chx_haltio }, 1927 { CH_XID7_PENDING, CTC_EVENT_START, ctcm_action_nop }, 1928 { CH_XID7_PENDING, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1929 { CH_XID7_PENDING, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1930 { CH_XID7_PENDING, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1931 { CH_XID7_PENDING, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1932 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1933 { CH_XID7_PENDING, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1934 { CH_XID7_PENDING, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1935 { CH_XID7_PENDING, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1936 { CH_XID7_PENDING, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1937 1938 { CH_XID7_PENDING1, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1939 { CH_XID7_PENDING1, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1940 { CH_XID7_PENDING1, CTC_EVENT_STOP, ctcm_chx_haltio }, 1941 { CH_XID7_PENDING1, CTC_EVENT_START, ctcm_action_nop }, 1942 { CH_XID7_PENDING1, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1943 { CH_XID7_PENDING1, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1944 { CH_XID7_PENDING1, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1945 { CH_XID7_PENDING1, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1946 { CH_XID7_PENDING1, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1947 { CH_XID7_PENDING1, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1948 { CH_XID7_PENDING1, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1949 { CH_XID7_PENDING1, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1950 1951 { CH_XID7_PENDING2, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1952 { CH_XID7_PENDING2, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1953 { CH_XID7_PENDING2, CTC_EVENT_STOP, ctcm_chx_haltio }, 1954 { CH_XID7_PENDING2, CTC_EVENT_START, ctcm_action_nop }, 1955 { CH_XID7_PENDING2, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1956 { CH_XID7_PENDING2, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1957 { CH_XID7_PENDING2, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1958 { CH_XID7_PENDING2, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1959 { CH_XID7_PENDING2, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1960 { CH_XID7_PENDING2, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1961 { CH_XID7_PENDING2, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1962 { CH_XID7_PENDING2, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1963 1964 { CH_XID7_PENDING3, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1965 { CH_XID7_PENDING3, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1966 { CH_XID7_PENDING3, CTC_EVENT_STOP, ctcm_chx_haltio }, 1967 { CH_XID7_PENDING3, CTC_EVENT_START, ctcm_action_nop }, 1968 { CH_XID7_PENDING3, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1969 { CH_XID7_PENDING3, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1970 { CH_XID7_PENDING3, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1971 { CH_XID7_PENDING3, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1972 { CH_XID7_PENDING3, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1973 { CH_XID7_PENDING3, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1974 { CH_XID7_PENDING3, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1975 { CH_XID7_PENDING3, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1976 1977 { CH_XID7_PENDING4, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1978 { CH_XID7_PENDING4, CTC_EVENT_ATTN, ctcmpc_chx_attn }, 1979 { CH_XID7_PENDING4, CTC_EVENT_STOP, ctcm_chx_haltio }, 1980 { CH_XID7_PENDING4, CTC_EVENT_START, ctcm_action_nop }, 1981 { CH_XID7_PENDING4, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1982 { CH_XID7_PENDING4, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1983 { CH_XID7_PENDING4, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1984 { CH_XID7_PENDING4, CTC_EVENT_UC_RCRESET, ctcm_chx_setuperr }, 1985 { CH_XID7_PENDING4, CTC_EVENT_UC_RSRESET, ctcm_chx_setuperr }, 1986 { CH_XID7_PENDING4, CTC_EVENT_ATTNBUSY, ctcm_chx_iofatal }, 1987 { CH_XID7_PENDING4, CTC_EVENT_TIMER, ctcmpc_chx_resend }, 1988 { CH_XID7_PENDING4, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 1989 1990 { CTC_STATE_RXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 1991 { CTC_STATE_RXIDLE, CTC_EVENT_START, ctcm_action_nop }, 1992 { CTC_STATE_RXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_rx }, 1993 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_rxdisc }, 1994 { CTC_STATE_RXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 1995 { CTC_STATE_RXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 1996 { CTC_STATE_RXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 1997 { CTC_STATE_RXIDLE, CTC_EVENT_UC_ZERO, ctcmpc_chx_rx }, 1998 1999 { CTC_STATE_TXINIT, CTC_EVENT_STOP, ctcm_chx_haltio }, 2000 { CTC_STATE_TXINIT, CTC_EVENT_START, ctcm_action_nop }, 2001 { CTC_STATE_TXINIT, CTC_EVENT_FINSTAT, ctcm_chx_txidle }, 2002 { CTC_STATE_TXINIT, CTC_EVENT_UC_RCRESET, ctcm_chx_txiniterr }, 2003 { CTC_STATE_TXINIT, CTC_EVENT_UC_RSRESET, ctcm_chx_txiniterr }, 2004 { CTC_STATE_TXINIT, CTC_EVENT_TIMER, ctcm_chx_txiniterr }, 2005 { CTC_STATE_TXINIT, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2006 { CTC_STATE_TXINIT, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2007 { CTC_STATE_TXINIT, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2008 2009 { CTC_STATE_TXIDLE, CTC_EVENT_STOP, ctcm_chx_haltio }, 2010 { CTC_STATE_TXIDLE, CTC_EVENT_START, ctcm_action_nop }, 2011 { CTC_STATE_TXIDLE, CTC_EVENT_FINSTAT, ctcmpc_chx_firstio }, 2012 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2013 { CTC_STATE_TXIDLE, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2014 { CTC_STATE_TXIDLE, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2015 { CTC_STATE_TXIDLE, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2016 { CTC_STATE_TXIDLE, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2017 2018 { CTC_STATE_TERM, CTC_EVENT_STOP, ctcm_action_nop }, 2019 { CTC_STATE_TERM, CTC_EVENT_START, ctcm_chx_restart }, 2020 { CTC_STATE_TERM, CTC_EVENT_FINSTAT, ctcm_chx_stopped }, 2021 { CTC_STATE_TERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2022 { CTC_STATE_TERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2023 { CTC_STATE_TERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2024 { CTC_STATE_TERM, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2025 { CTC_STATE_TERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2026 2027 { CTC_STATE_DTERM, CTC_EVENT_STOP, ctcm_chx_haltio }, 2028 { CTC_STATE_DTERM, CTC_EVENT_START, ctcm_chx_restart }, 2029 { CTC_STATE_DTERM, CTC_EVENT_FINSTAT, ctcm_chx_setmode }, 2030 { CTC_STATE_DTERM, CTC_EVENT_UC_RCRESET, ctcm_action_nop }, 2031 { CTC_STATE_DTERM, CTC_EVENT_UC_RSRESET, ctcm_action_nop }, 2032 { CTC_STATE_DTERM, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2033 { CTC_STATE_DTERM, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2034 2035 { CTC_STATE_TX, CTC_EVENT_STOP, ctcm_chx_haltio }, 2036 { CTC_STATE_TX, CTC_EVENT_START, ctcm_action_nop }, 2037 { CTC_STATE_TX, CTC_EVENT_FINSTAT, ctcmpc_chx_txdone }, 2038 { CTC_STATE_TX, CTC_EVENT_UC_RCRESET, ctcm_chx_fail }, 2039 { CTC_STATE_TX, CTC_EVENT_UC_RSRESET, ctcm_chx_fail }, 2040 { CTC_STATE_TX, CTC_EVENT_TIMER, ctcm_chx_txretry }, 2041 { CTC_STATE_TX, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2042 { CTC_STATE_TX, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2043 { CTC_STATE_TX, CTC_EVENT_RSWEEP_TIMER, ctcmpc_chx_send_sweep }, 2044 { CTC_STATE_TX, CTC_EVENT_IO_EBUSY, ctcm_chx_fail }, 2045 2046 { CTC_STATE_RXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2047 { CTC_STATE_TXERR, CTC_EVENT_STOP, ctcm_chx_haltio }, 2048 { CTC_STATE_TXERR, CTC_EVENT_IO_ENODEV, ctcm_chx_iofatal }, 2049 { CTC_STATE_TXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2050 { CTC_STATE_RXERR, CTC_EVENT_MC_FAIL, ctcm_chx_fail }, 2051 }; 2052 2053 int mpc_ch_fsm_len = ARRAY_SIZE(ctcmpc_ch_fsm); 2054 2055 /* 2056 * Actions for interface - statemachine. 2057 */ 2058 2059 /* 2060 * Startup channels by sending CTC_EVENT_START to each channel. 2061 * 2062 * fi An instance of an interface statemachine. 2063 * event The event, just happened. 2064 * arg Generic pointer, casted from struct net_device * upon call. 2065 */ 2066 static void dev_action_start(fsm_instance *fi, int event, void *arg) 2067 { 2068 struct net_device *dev = arg; 2069 struct ctcm_priv *priv = dev->ml_priv; 2070 int direction; 2071 2072 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2073 2074 fsm_deltimer(&priv->restart_timer); 2075 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2076 if (IS_MPC(priv)) 2077 priv->mpcg->channels_terminating = 0; 2078 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2079 struct channel *ch = priv->channel[direction]; 2080 fsm_event(ch->fsm, CTC_EVENT_START, ch); 2081 } 2082 } 2083 2084 /* 2085 * Shutdown channels by sending CTC_EVENT_STOP to each channel. 2086 * 2087 * fi An instance of an interface statemachine. 2088 * event The event, just happened. 2089 * arg Generic pointer, casted from struct net_device * upon call. 2090 */ 2091 static void dev_action_stop(fsm_instance *fi, int event, void *arg) 2092 { 2093 int direction; 2094 struct net_device *dev = arg; 2095 struct ctcm_priv *priv = dev->ml_priv; 2096 2097 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2098 2099 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2100 for (direction = CTCM_READ; direction <= CTCM_WRITE; direction++) { 2101 struct channel *ch = priv->channel[direction]; 2102 fsm_event(ch->fsm, CTC_EVENT_STOP, ch); 2103 ch->th_seq_num = 0x00; 2104 CTCM_PR_DEBUG("%s: CH_th_seq= %08x\n", 2105 __func__, ch->th_seq_num); 2106 } 2107 if (IS_MPC(priv)) 2108 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2109 } 2110 2111 static void dev_action_restart(fsm_instance *fi, int event, void *arg) 2112 { 2113 int restart_timer; 2114 struct net_device *dev = arg; 2115 struct ctcm_priv *priv = dev->ml_priv; 2116 2117 CTCMY_DBF_DEV_NAME(TRACE, dev, ""); 2118 2119 if (IS_MPC(priv)) { 2120 restart_timer = CTCM_TIME_1_SEC; 2121 } else { 2122 restart_timer = CTCM_TIME_5_SEC; 2123 } 2124 dev_info(&dev->dev, "Restarting device\n"); 2125 2126 dev_action_stop(fi, event, arg); 2127 fsm_event(priv->fsm, DEV_EVENT_STOP, dev); 2128 if (IS_MPC(priv)) 2129 fsm_newstate(priv->mpcg->fsm, MPCG_STATE_RESET); 2130 2131 /* going back into start sequence too quickly can */ 2132 /* result in the other side becoming unreachable due */ 2133 /* to sense reported when IO is aborted */ 2134 fsm_addtimer(&priv->restart_timer, restart_timer, 2135 DEV_EVENT_START, dev); 2136 } 2137 2138 /* 2139 * Called from channel statemachine 2140 * when a channel is up and running. 2141 * 2142 * fi An instance of an interface statemachine. 2143 * event The event, just happened. 2144 * arg Generic pointer, casted from struct net_device * upon call. 2145 */ 2146 static void dev_action_chup(fsm_instance *fi, int event, void *arg) 2147 { 2148 struct net_device *dev = arg; 2149 struct ctcm_priv *priv = dev->ml_priv; 2150 int dev_stat = fsm_getstate(fi); 2151 2152 CTCM_DBF_TEXT_(SETUP, CTC_DBF_NOTICE, 2153 "%s(%s): priv = %p [%d,%d]\n ", CTCM_FUNTAIL, 2154 dev->name, dev->ml_priv, dev_stat, event); 2155 2156 switch (fsm_getstate(fi)) { 2157 case DEV_STATE_STARTWAIT_RXTX: 2158 if (event == DEV_EVENT_RXUP) 2159 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2160 else 2161 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2162 break; 2163 case DEV_STATE_STARTWAIT_RX: 2164 if (event == DEV_EVENT_RXUP) { 2165 fsm_newstate(fi, DEV_STATE_RUNNING); 2166 dev_info(&dev->dev, 2167 "Connected with remote side\n"); 2168 ctcm_clear_busy(dev); 2169 } 2170 break; 2171 case DEV_STATE_STARTWAIT_TX: 2172 if (event == DEV_EVENT_TXUP) { 2173 fsm_newstate(fi, DEV_STATE_RUNNING); 2174 dev_info(&dev->dev, 2175 "Connected with remote side\n"); 2176 ctcm_clear_busy(dev); 2177 } 2178 break; 2179 case DEV_STATE_STOPWAIT_TX: 2180 if (event == DEV_EVENT_RXUP) 2181 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2182 break; 2183 case DEV_STATE_STOPWAIT_RX: 2184 if (event == DEV_EVENT_TXUP) 2185 fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX); 2186 break; 2187 } 2188 2189 if (IS_MPC(priv)) { 2190 if (event == DEV_EVENT_RXUP) 2191 mpc_channel_action(priv->channel[CTCM_READ], 2192 CTCM_READ, MPC_CHANNEL_ADD); 2193 else 2194 mpc_channel_action(priv->channel[CTCM_WRITE], 2195 CTCM_WRITE, MPC_CHANNEL_ADD); 2196 } 2197 } 2198 2199 /* 2200 * Called from device statemachine 2201 * when a channel has been shutdown. 2202 * 2203 * fi An instance of an interface statemachine. 2204 * event The event, just happened. 2205 * arg Generic pointer, casted from struct net_device * upon call. 2206 */ 2207 static void dev_action_chdown(fsm_instance *fi, int event, void *arg) 2208 { 2209 2210 struct net_device *dev = arg; 2211 struct ctcm_priv *priv = dev->ml_priv; 2212 2213 CTCMY_DBF_DEV_NAME(SETUP, dev, ""); 2214 2215 switch (fsm_getstate(fi)) { 2216 case DEV_STATE_RUNNING: 2217 if (event == DEV_EVENT_TXDOWN) 2218 fsm_newstate(fi, DEV_STATE_STARTWAIT_TX); 2219 else 2220 fsm_newstate(fi, DEV_STATE_STARTWAIT_RX); 2221 break; 2222 case DEV_STATE_STARTWAIT_RX: 2223 if (event == DEV_EVENT_TXDOWN) 2224 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2225 break; 2226 case DEV_STATE_STARTWAIT_TX: 2227 if (event == DEV_EVENT_RXDOWN) 2228 fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX); 2229 break; 2230 case DEV_STATE_STOPWAIT_RXTX: 2231 if (event == DEV_EVENT_TXDOWN) 2232 fsm_newstate(fi, DEV_STATE_STOPWAIT_RX); 2233 else 2234 fsm_newstate(fi, DEV_STATE_STOPWAIT_TX); 2235 break; 2236 case DEV_STATE_STOPWAIT_RX: 2237 if (event == DEV_EVENT_RXDOWN) 2238 fsm_newstate(fi, DEV_STATE_STOPPED); 2239 break; 2240 case DEV_STATE_STOPWAIT_TX: 2241 if (event == DEV_EVENT_TXDOWN) 2242 fsm_newstate(fi, DEV_STATE_STOPPED); 2243 break; 2244 } 2245 if (IS_MPC(priv)) { 2246 if (event == DEV_EVENT_RXDOWN) 2247 mpc_channel_action(priv->channel[CTCM_READ], 2248 CTCM_READ, MPC_CHANNEL_REMOVE); 2249 else 2250 mpc_channel_action(priv->channel[CTCM_WRITE], 2251 CTCM_WRITE, MPC_CHANNEL_REMOVE); 2252 } 2253 } 2254 2255 const fsm_node dev_fsm[] = { 2256 { DEV_STATE_STOPPED, DEV_EVENT_START, dev_action_start }, 2257 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_START, dev_action_start }, 2258 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2259 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2260 { DEV_STATE_STOPWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2261 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_START, dev_action_start }, 2262 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2263 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2264 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2265 { DEV_STATE_STOPWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2266 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_START, dev_action_start }, 2267 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2268 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2269 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2270 { DEV_STATE_STOPWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2271 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_STOP, dev_action_stop }, 2272 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXUP, dev_action_chup }, 2273 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXUP, dev_action_chup }, 2274 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2275 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2276 { DEV_STATE_STARTWAIT_RXTX, DEV_EVENT_RESTART, dev_action_restart }, 2277 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_STOP, dev_action_stop }, 2278 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXUP, dev_action_chup }, 2279 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_TXUP, dev_action_chup }, 2280 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RXDOWN, dev_action_chdown }, 2281 { DEV_STATE_STARTWAIT_TX, DEV_EVENT_RESTART, dev_action_restart }, 2282 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_STOP, dev_action_stop }, 2283 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RXUP, dev_action_chup }, 2284 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXUP, dev_action_chup }, 2285 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_TXDOWN, dev_action_chdown }, 2286 { DEV_STATE_STARTWAIT_RX, DEV_EVENT_RESTART, dev_action_restart }, 2287 { DEV_STATE_RUNNING, DEV_EVENT_STOP, dev_action_stop }, 2288 { DEV_STATE_RUNNING, DEV_EVENT_RXDOWN, dev_action_chdown }, 2289 { DEV_STATE_RUNNING, DEV_EVENT_TXDOWN, dev_action_chdown }, 2290 { DEV_STATE_RUNNING, DEV_EVENT_TXUP, ctcm_action_nop }, 2291 { DEV_STATE_RUNNING, DEV_EVENT_RXUP, ctcm_action_nop }, 2292 { DEV_STATE_RUNNING, DEV_EVENT_RESTART, dev_action_restart }, 2293 }; 2294 2295 int dev_fsm_len = ARRAY_SIZE(dev_fsm); 2296 2297 /* --- This is the END my friend --- */ 2298 2299