1 /*-
2 * Copyright (c) 2017 Broadcom. All rights reserved.
3 * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. Neither the name of the copyright holder nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /**
33 * @file
34 * Implement remote device state machine for target and initiator.
35 */
36
37 /*!
38 @defgroup device_sm Node State Machine: Remote Device States
39 */
40
41 #include "ocs.h"
42 #include "ocs_device.h"
43 #include "ocs_fabric.h"
44 #include "ocs_els.h"
45
46 static void *__ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
47 static void *__ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
48 static void *__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
49 static int32_t ocs_process_abts(ocs_io_t *io, fc_header_t *hdr);
50
51 /**
52 * @ingroup device_sm
53 * @brief Send response to PRLI.
54 *
55 * <h3 class="desc">Description</h3>
56 * For device nodes, this function sends a PRLI response.
57 *
58 * @param io Pointer to a SCSI IO object.
59 * @param ox_id OX_ID of PRLI
60 *
61 * @return Returns None.
62 */
63
64 void
ocs_d_send_prli_rsp(ocs_io_t * io,uint16_t ox_id)65 ocs_d_send_prli_rsp(ocs_io_t *io, uint16_t ox_id)
66 {
67 ocs_t *ocs = io->ocs;
68 ocs_node_t *node = io->node;
69
70 /* If the back-end doesn't support the fc-type, we send an LS_RJT */
71 if (ocs->fc_type != node->fc_type) {
72 node_printf(node, "PRLI rejected by target-server, fc-type not supported\n");
73 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
74 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
75 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
76 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
77 }
78
79 /* If the back-end doesn't want to talk to this initiator, we send an LS_RJT */
80 if (node->sport->enable_tgt && (ocs_scsi_validate_initiator(node) == 0)) {
81 node_printf(node, "PRLI rejected by target-server\n");
82 ocs_send_ls_rjt(io, ox_id, FC_REASON_UNABLE_TO_PERFORM,
83 FC_EXPL_NO_ADDITIONAL, 0, NULL, NULL);
84 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
85 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
86 } else {
87 /*sm: process PRLI payload, send PRLI acc */
88 ocs_send_prli_acc(io, ox_id, ocs->fc_type, NULL, NULL);
89
90 /* Immediately go to ready state to avoid window where we're
91 * waiting for the PRLI LS_ACC to complete while holding FCP_CMNDs
92 */
93 ocs_node_transition(node, __ocs_d_device_ready, NULL);
94 }
95 }
96
97 /**
98 * @ingroup device_sm
99 * @brief Device node state machine: Initiate node shutdown
100 *
101 * @param ctx Remote node state machine context.
102 * @param evt Event to process.
103 * @param arg Per event optional argument.
104 *
105 * @return Returns NULL.
106 */
107
108 void *
__ocs_d_initiate_shutdown(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)109 __ocs_d_initiate_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
110 {
111 std_node_state_decl();
112
113 node_sm_trace();
114
115 switch(evt) {
116 case OCS_EVT_ENTER: {
117 int32_t rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
118
119 ocs_scsi_io_alloc_disable(node);
120
121 /* make necessary delete upcall(s) */
122 if (node->init && !node->targ) {
123 ocs_log_debug(node->ocs,
124 "[%s] delete (initiator) WWPN %s WWNN %s\n",
125 node->display_name, node->wwpn, node->wwnn);
126 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
127 if (node->sport->enable_tgt) {
128 rc = ocs_scsi_del_initiator(node,
129 OCS_SCSI_INITIATOR_DELETED);
130 }
131 if (rc == OCS_SCSI_CALL_COMPLETE) {
132 ocs_node_post_event(node,
133 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
134 }
135 } else if (node->targ && !node->init) {
136 ocs_log_debug(node->ocs,
137 "[%s] delete (target) WWPN %s WWNN %s\n",
138 node->display_name, node->wwpn, node->wwnn);
139 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
140 if (node->sport->enable_ini) {
141 rc = ocs_scsi_del_target(node,
142 OCS_SCSI_TARGET_DELETED);
143 }
144 if (rc == OCS_SCSI_CALL_COMPLETE) {
145 ocs_node_post_event(node,
146 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
147 }
148 } else if (node->init && node->targ) {
149 ocs_log_debug(node->ocs,
150 "[%s] delete (initiator+target) WWPN %s WWNN %s\n",
151 node->display_name, node->wwpn, node->wwnn);
152 ocs_node_transition(node, __ocs_d_wait_del_ini_tgt, NULL);
153 if (node->sport->enable_tgt) {
154 rc = ocs_scsi_del_initiator(node,
155 OCS_SCSI_INITIATOR_DELETED);
156 }
157 if (rc == OCS_SCSI_CALL_COMPLETE) {
158 ocs_node_post_event(node,
159 OCS_EVT_NODE_DEL_INI_COMPLETE, NULL);
160 }
161 rc = OCS_SCSI_CALL_COMPLETE; /* assume no wait needed */
162 if (node->sport->enable_ini) {
163 rc = ocs_scsi_del_target(node,
164 OCS_SCSI_TARGET_DELETED);
165 }
166 if (rc == OCS_SCSI_CALL_COMPLETE) {
167 ocs_node_post_event(node,
168 OCS_EVT_NODE_DEL_TGT_COMPLETE, NULL);
169 }
170 }
171
172 /* we've initiated the upcalls as needed, now kick off the node
173 * detach to precipitate the aborting of outstanding exchanges
174 * associated with said node
175 *
176 * Beware: if we've made upcall(s), we've already transitioned
177 * to a new state by the time we execute this.
178 * TODO: consider doing this before the upcalls...
179 */
180 if (node->attached) {
181 /* issue hw node free; don't care if succeeds right away
182 * or sometime later, will check node->attached later in
183 * shutdown process
184 */
185 rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
186 if (node->rnode.free_group) {
187 ocs_remote_node_group_free(node->node_group);
188 node->node_group = NULL;
189 node->rnode.free_group = FALSE;
190 }
191 if (rc != OCS_HW_RTN_SUCCESS &&
192 rc != OCS_HW_RTN_SUCCESS_SYNC) {
193 node_printf(node,
194 "Failed freeing HW node, rc=%d\n", rc);
195 }
196 }
197
198 /* if neither initiator nor target, proceed to cleanup */
199 if (!node->init && !node->targ){
200 /*
201 * node has either been detached or is in the process
202 * of being detached, call common node's initiate
203 * cleanup function.
204 */
205 ocs_node_initiate_cleanup(node);
206 }
207 break;
208 }
209 case OCS_EVT_ALL_CHILD_NODES_FREE:
210 /* Ignore, this can happen if an ELS is aborted,
211 * while in a delay/retry state */
212 break;
213 default:
214 __ocs_d_common(__func__, ctx, evt, arg);
215 return NULL;
216 }
217 return NULL;
218 }
219
220 /**
221 * @ingroup device_sm
222 * @brief Device node state machine: Common device event handler.
223 *
224 * <h3 class="desc">Description</h3>
225 * For device nodes, this event handler manages default and common events.
226 *
227 * @param funcname Function name text.
228 * @param ctx Remote node state machine context.
229 * @param evt Event to process.
230 * @param arg Per event optional argument.
231 *
232 * @return Returns NULL.
233 */
234
235 static void *
__ocs_d_common(const char * funcname,ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)236 __ocs_d_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
237 {
238 ocs_node_t *node = NULL;
239 ocs_t *ocs = NULL;
240 ocs_assert(ctx, NULL);
241 node = ctx->app;
242 ocs_assert(node, NULL);
243 ocs = node->ocs;
244 ocs_assert(ocs, NULL);
245
246 switch(evt) {
247 /* Handle shutdown events */
248 case OCS_EVT_SHUTDOWN:
249 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
250 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
251 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
252 break;
253 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
254 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
255 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
256 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
257 break;
258 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
259 ocs_log_debug(ocs, "[%s] %-20s %-20s\n", node->display_name, funcname, ocs_sm_event_name(evt));
260 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
261 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
262 break;
263
264 default:
265 /* call default event handler common to all nodes */
266 __ocs_node_common(funcname, ctx, evt, arg);
267 break;
268 }
269 return NULL;
270 }
271
272 /**
273 * @ingroup device_sm
274 * @brief Device node state machine: Wait for a domain-attach completion in loop topology.
275 *
276 * <h3 class="desc">Description</h3>
277 * State waits for a domain-attached completion while in loop topology.
278 *
279 * @param ctx Remote node state machine context.
280 * @param evt Event to process.
281 * @param arg Per event optional argument.
282 *
283 * @return Returns NULL.
284 */
285
286 void *
__ocs_d_wait_loop(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)287 __ocs_d_wait_loop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
288 {
289 std_node_state_decl();
290
291 node_sm_trace();
292
293 switch(evt) {
294 case OCS_EVT_ENTER:
295 ocs_node_hold_frames(node);
296 break;
297
298 case OCS_EVT_EXIT:
299 ocs_node_accept_frames(node);
300 break;
301
302 case OCS_EVT_DOMAIN_ATTACH_OK: {
303 /* send PLOGI automatically if initiator */
304 ocs_node_init_device(node, TRUE);
305 break;
306 }
307 default:
308 __ocs_d_common(__func__, ctx, evt, arg);
309 return NULL;
310 }
311
312 return NULL;
313 }
314
315 /**
316 * @ingroup device_sm
317 * @brief state: wait for node resume event
318 *
319 * State is entered when a node is in I+T mode and sends a delete initiator/target
320 * call to the target-server/initiator-client and needs to wait for that work to complete.
321 *
322 * @param ctx Remote node state machine context.
323 * @param evt Event to process.
324 * @param arg per event optional argument
325 *
326 * @return returns NULL
327 */
328
329 void *
__ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)330 __ocs_d_wait_del_ini_tgt(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
331 {
332 std_node_state_decl();
333
334 node_sm_trace();
335
336 switch(evt) {
337 case OCS_EVT_ENTER:
338 ocs_node_hold_frames(node);
339 /* Fall through */
340
341 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
342 case OCS_EVT_ALL_CHILD_NODES_FREE:
343 /* These are expected events. */
344 break;
345
346 case OCS_EVT_NODE_DEL_INI_COMPLETE:
347 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
348 ocs_node_transition(node, __ocs_d_wait_del_node, NULL);
349 break;
350
351 case OCS_EVT_EXIT:
352 ocs_node_accept_frames(node);
353 break;
354
355 case OCS_EVT_SRRS_ELS_REQ_FAIL:
356 /* Can happen as ELS IO IO's complete */
357 ocs_assert(node->els_req_cnt, NULL);
358 node->els_req_cnt--;
359 break;
360
361 /* ignore shutdown events as we're already in shutdown path */
362 case OCS_EVT_SHUTDOWN:
363 /* have default shutdown event take precedence */
364 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
365 /* fall through */
366 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
367 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
368 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
369 break;
370 case OCS_EVT_DOMAIN_ATTACH_OK:
371 /* don't care about domain_attach_ok */
372 break;
373 default:
374 __ocs_d_common(__func__, ctx, evt, arg);
375 return NULL;
376 }
377
378 return NULL;
379 }
380
381 /**
382 * @ingroup device_sm
383 * @brief state: Wait for node resume event.
384 *
385 * State is entered when a node sends a delete initiator/target call to the
386 * target-server/initiator-client and needs to wait for that work to complete.
387 *
388 * @param ctx Remote node state machine context.
389 * @param evt Event to process.
390 * @param arg Per event optional argument.
391 *
392 * @return Returns NULL.
393 */
394
395 void *
__ocs_d_wait_del_node(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)396 __ocs_d_wait_del_node(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
397 {
398 std_node_state_decl();
399
400 node_sm_trace();
401
402 switch(evt) {
403 case OCS_EVT_ENTER:
404 ocs_node_hold_frames(node);
405 /* Fall through */
406
407 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
408 case OCS_EVT_ALL_CHILD_NODES_FREE:
409 /* These are expected events. */
410 break;
411
412 case OCS_EVT_NODE_DEL_INI_COMPLETE:
413 case OCS_EVT_NODE_DEL_TGT_COMPLETE:
414 /*
415 * node has either been detached or is in the process of being detached,
416 * call common node's initiate cleanup function
417 */
418 ocs_node_initiate_cleanup(node);
419 break;
420
421 case OCS_EVT_EXIT:
422 ocs_node_accept_frames(node);
423 break;
424
425 case OCS_EVT_SRRS_ELS_REQ_FAIL:
426 /* Can happen as ELS IO IO's complete */
427 ocs_assert(node->els_req_cnt, NULL);
428 node->els_req_cnt--;
429 break;
430
431 /* ignore shutdown events as we're already in shutdown path */
432 case OCS_EVT_SHUTDOWN:
433 /* have default shutdown event take precedence */
434 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
435 /* fall through */
436 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
437 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
438 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
439 break;
440 case OCS_EVT_DOMAIN_ATTACH_OK:
441 /* don't care about domain_attach_ok */
442 break;
443 default:
444 __ocs_d_common(__func__, ctx, evt, arg);
445 return NULL;
446 }
447
448 return NULL;
449 }
450
451 /**
452 * @brief Save the OX_ID for sending LS_ACC sometime later.
453 *
454 * <h3 class="desc">Description</h3>
455 * When deferring the response to an ELS request, the OX_ID of the request
456 * is saved using this function.
457 *
458 * @param io Pointer to a SCSI IO object.
459 * @param hdr Pointer to the FC header.
460 * @param ls Defines the type of ELS to send: LS_ACC, LS_ACC for PLOGI;
461 * or LSS_ACC for PRLI.
462 *
463 * @return None.
464 */
465
466 void
ocs_send_ls_acc_after_attach(ocs_io_t * io,fc_header_t * hdr,ocs_node_send_ls_acc_e ls)467 ocs_send_ls_acc_after_attach(ocs_io_t *io, fc_header_t *hdr, ocs_node_send_ls_acc_e ls)
468 {
469 ocs_node_t *node = io->node;
470 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
471
472 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_NONE);
473
474 node->ls_acc_oxid = ox_id;
475 node->send_ls_acc = ls;
476 node->ls_acc_io = io;
477 node->ls_acc_did = fc_be24toh(hdr->d_id);
478 }
479
480 /**
481 * @brief Process the PRLI payload.
482 *
483 * <h3 class="desc">Description</h3>
484 * The PRLI payload is processed; the initiator/target capabilities of the
485 * remote node are extracted and saved in the node object.
486 *
487 * @param node Pointer to the node object.
488 * @param prli Pointer to the PRLI payload.
489 *
490 * @return None.
491 */
492
493 void
ocs_process_prli_payload(ocs_node_t * node,fc_prli_payload_t * prli)494 ocs_process_prli_payload(ocs_node_t *node, fc_prli_payload_t *prli)
495 {
496 node->init = (ocs_be16toh(prli->service_params) & FC_PRLI_INITIATOR_FUNCTION) != 0;
497 node->targ = (ocs_be16toh(prli->service_params) & FC_PRLI_TARGET_FUNCTION) != 0;
498 node->fcp2device = (ocs_be16toh(prli->service_params) & FC_PRLI_RETRY) != 0;
499 node->fc_type = prli->type;
500 }
501
502 /**
503 * @brief Process the ABTS.
504 *
505 * <h3 class="desc">Description</h3>
506 * Common code to process a received ABTS. If an active IO can be found
507 * that matches the OX_ID of the ABTS request, a call is made to the
508 * backend. Otherwise, a BA_ACC is returned to the initiator.
509 *
510 * @param io Pointer to a SCSI IO object.
511 * @param hdr Pointer to the FC header.
512 *
513 * @return Returns 0 on success, or a negative error value on failure.
514 */
515
516 static int32_t
ocs_process_abts(ocs_io_t * io,fc_header_t * hdr)517 ocs_process_abts(ocs_io_t *io, fc_header_t *hdr)
518 {
519 ocs_node_t *node = io->node;
520 ocs_t *ocs = node->ocs;
521 uint16_t ox_id = ocs_be16toh(hdr->ox_id);
522 uint16_t rx_id = ocs_be16toh(hdr->rx_id);
523 ocs_io_t *abortio;
524
525 abortio = ocs_io_find_tgt_io(ocs, node, ox_id, rx_id);
526
527 /* If an IO was found, attempt to take a reference on it */
528 if (abortio != NULL && (ocs_ref_get_unless_zero(&abortio->ref) != 0)) {
529 /* Got a reference on the IO. Hold it until backend is notified below */
530 node_printf(node, "Abort request: ox_id [%04x] rx_id [%04x]\n",
531 ox_id, rx_id);
532
533 /*
534 * Save the ox_id for the ABTS as the init_task_tag in our manufactured
535 * TMF IO object
536 */
537 io->display_name = "abts";
538 io->init_task_tag = ox_id;
539 /* don't set tgt_task_tag, don't want to confuse with XRI */
540
541 /*
542 * Save the rx_id from the ABTS as it is needed for the BLS response,
543 * regardless of the IO context's rx_id
544 */
545 io->abort_rx_id = rx_id;
546
547 /* Call target server command abort */
548 io->tmf_cmd = OCS_SCSI_TMF_ABORT_TASK;
549 ocs_scsi_recv_tmf(io, abortio->tgt_io.lun, OCS_SCSI_TMF_ABORT_TASK, abortio, 0);
550
551 /*
552 * Backend will have taken an additional reference on the IO if needed;
553 * done with current reference.
554 */
555 ocs_ref_put(&abortio->ref); /* ocs_ref_get(): same function */
556 } else {
557 /*
558 * Either IO was not found or it has been freed between finding it
559 * and attempting to get the reference,
560 */
561 node_printf(node, "Abort request: ox_id [%04x], IO not found (exists=%d)\n",
562 ox_id, (abortio != NULL));
563
564 /* Send a BA_ACC */
565 ocs_bls_send_acc_hdr(io, hdr);
566 }
567 return 0;
568 }
569
570 /**
571 * @ingroup device_sm
572 * @brief Device node state machine: Wait for the PLOGI accept to complete.
573 *
574 * @param ctx Remote node state machine context.
575 * @param evt Event to process.
576 * @param arg Per event optional argument.
577 *
578 * @return Returns NULL.
579 */
580
581 void *
__ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)582 __ocs_d_wait_plogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
583 {
584 std_node_state_decl();
585
586 node_sm_trace();
587
588 switch(evt) {
589 case OCS_EVT_ENTER:
590 ocs_node_hold_frames(node);
591 break;
592
593 case OCS_EVT_EXIT:
594 ocs_node_accept_frames(node);
595 break;
596
597 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
598 ocs_assert(node->els_cmpl_cnt, NULL);
599 node->els_cmpl_cnt--;
600 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
601 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
602 break;
603
604 case OCS_EVT_SRRS_ELS_CMPL_OK: /* PLOGI ACC completions */
605 ocs_assert(node->els_cmpl_cnt, NULL);
606 node->els_cmpl_cnt--;
607 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
608 break;
609
610 default:
611 __ocs_d_common(__func__, ctx, evt, arg);
612 return NULL;
613 }
614
615 return NULL;
616 }
617
618 /**
619 * @ingroup device_sm
620 * @brief Device node state machine: Wait for the LOGO response.
621 *
622 * @param ctx Remote node state machine context.
623 * @param evt Event to process.
624 * @param arg Per event optional argument.
625 *
626 * @return Returns NULL.
627 */
628
629 void *
__ocs_d_wait_logo_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)630 __ocs_d_wait_logo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
631 {
632 std_node_state_decl();
633
634 node_sm_trace();
635
636 switch(evt) {
637 case OCS_EVT_ENTER:
638 /* TODO: may want to remove this;
639 * if we'll want to know about PLOGI */
640 ocs_node_hold_frames(node);
641 break;
642
643 case OCS_EVT_EXIT:
644 ocs_node_accept_frames(node);
645 break;
646
647 case OCS_EVT_SRRS_ELS_REQ_OK:
648 case OCS_EVT_SRRS_ELS_REQ_RJT:
649 case OCS_EVT_SRRS_ELS_REQ_FAIL:
650 /* LOGO response received, sent shutdown */
651 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_LOGO, __ocs_d_common, __func__)) {
652 return NULL;
653 }
654 ocs_assert(node->els_req_cnt, NULL);
655 node->els_req_cnt--;
656 node_printf(node, "LOGO sent (evt=%s), shutdown node\n", ocs_sm_event_name(evt));
657 /* sm: post explicit logout */
658 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
659 break;
660
661 /* TODO: PLOGI: abort LOGO and process PLOGI? (SHUTDOWN_EXPLICIT/IMPLICIT_LOGO?) */
662
663 default:
664 __ocs_d_common(__func__, ctx, evt, arg);
665 return NULL;
666 }
667 return NULL;
668 }
669
670 /**
671 * @ingroup device_sm
672 * @brief Device node state machine: Wait for the PRLO response.
673 *
674 * @param ctx Remote node state machine context.
675 * @param evt Event to process.
676 * @param arg Per event optional argument.
677 *
678 * @return Returns NULL.
679 */
680
681 void *
__ocs_d_wait_prlo_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)682 __ocs_d_wait_prlo_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
683 {
684 std_node_state_decl();
685
686 node_sm_trace();
687
688 switch(evt) {
689 case OCS_EVT_ENTER:
690 ocs_node_hold_frames(node);
691 break;
692
693 case OCS_EVT_EXIT:
694 ocs_node_accept_frames(node);
695 break;
696
697 case OCS_EVT_SRRS_ELS_REQ_OK:
698 case OCS_EVT_SRRS_ELS_REQ_RJT:
699 case OCS_EVT_SRRS_ELS_REQ_FAIL:
700 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLO, __ocs_d_common, __func__)) {
701 return NULL;
702 }
703 ocs_assert(node->els_req_cnt, NULL);
704 node->els_req_cnt--;
705 node_printf(node, "PRLO sent (evt=%s)\n", ocs_sm_event_name(evt));
706 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
707 break;
708
709 default:
710 __ocs_node_common(__func__, ctx, evt, arg);
711 return NULL;
712 }
713 return NULL;
714 }
715
716 /**
717 * @brief Initialize device node.
718 *
719 * Initialize device node. If a node is an initiator, then send a PLOGI and transition
720 * to __ocs_d_wait_plogi_rsp, otherwise transition to __ocs_d_init.
721 *
722 * @param node Pointer to the node object.
723 * @param send_plogi Boolean indicating to send PLOGI command or not.
724 *
725 * @return none
726 */
727
728 void
ocs_node_init_device(ocs_node_t * node,int send_plogi)729 ocs_node_init_device(ocs_node_t *node, int send_plogi)
730 {
731 node->send_plogi = send_plogi;
732 if ((node->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NEW_NODES) && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)) {
733 node->nodedb_state = __ocs_d_init;
734 ocs_node_transition(node, __ocs_node_paused, NULL);
735 } else {
736 ocs_node_transition(node, __ocs_d_init, NULL);
737 }
738 }
739
740 /**
741 * @ingroup device_sm
742 * @brief Device node state machine: Initial node state for an initiator or a target.
743 *
744 * <h3 class="desc">Description</h3>
745 * This state is entered when a node is instantiated, either having been
746 * discovered from a name services query, or having received a PLOGI/FLOGI.
747 *
748 * @param ctx Remote node state machine context.
749 * @param evt Event to process.
750 * @param arg Per event optional argument.
751 * - OCS_EVT_ENTER: (uint8_t *) - 1 to send a PLOGI on
752 * entry (initiator-only); 0 indicates a PLOGI is
753 * not sent on entry (initiator-only). Not applicable for a target.
754 *
755 * @return Returns NULL.
756 */
757
758 void *
__ocs_d_init(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)759 __ocs_d_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
760 {
761 int32_t rc;
762 ocs_node_cb_t *cbdata = arg;
763 std_node_state_decl();
764
765 node_sm_trace();
766
767 switch(evt) {
768 case OCS_EVT_ENTER:
769 /* check if we need to send PLOGI */
770 if (node->send_plogi) {
771 /* only send if we have initiator capability, and domain is attached */
772 if (node->sport->enable_ini && node->sport->domain->attached) {
773 ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
774 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
775 ocs_node_transition(node, __ocs_d_wait_plogi_rsp, NULL);
776 } else {
777 node_printf(node, "not sending plogi sport.ini=%d, domain attached=%d\n",
778 node->sport->enable_ini, node->sport->domain->attached);
779 }
780 }
781 break;
782 case OCS_EVT_PLOGI_RCVD: {
783 /* T, or I+T */
784 fc_header_t *hdr = cbdata->header->dma.virt;
785 uint32_t d_id = fc_be24toh(hdr->d_id);
786
787 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
788 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
789
790 /* domain already attached */
791 if (node->sport->domain->attached) {
792 rc = ocs_node_attach(node);
793 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
794 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
795 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
796 }
797 break;
798 }
799
800 /* domain not attached; several possibilities: */
801 switch (node->sport->topology) {
802 case OCS_SPORT_TOPOLOGY_P2P:
803 /* we're not attached and sport is p2p, need to attach */
804 ocs_domain_attach(node->sport->domain, d_id);
805 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
806 break;
807 case OCS_SPORT_TOPOLOGY_FABRIC:
808 /* we're not attached and sport is fabric, domain attach should have
809 * already been requested as part of the fabric state machine, wait for it
810 */
811 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
812 break;
813 case OCS_SPORT_TOPOLOGY_UNKNOWN:
814 /* Two possibilities:
815 * 1. received a PLOGI before our FLOGI has completed (possible since
816 * completion comes in on another CQ), thus we don't know what we're
817 * connected to yet; transition to a state to wait for the fabric
818 * node to tell us;
819 * 2. PLOGI received before link went down and we haven't performed
820 * domain attach yet.
821 * Note: we cannot distinguish between 1. and 2. so have to assume PLOGI
822 * was received after link back up.
823 */
824 node_printf(node, "received PLOGI, with unknown topology did=0x%x\n", d_id);
825 ocs_node_transition(node, __ocs_d_wait_topology_notify, NULL);
826 break;
827 default:
828 node_printf(node, "received PLOGI, with unexpectd topology %d\n",
829 node->sport->topology);
830 ocs_assert(FALSE, NULL);
831 break;
832 }
833 break;
834 }
835
836 case OCS_EVT_FDISC_RCVD: {
837 __ocs_d_common(__func__, ctx, evt, arg);
838 break;
839 }
840
841 case OCS_EVT_FLOGI_RCVD: {
842 fc_header_t *hdr = cbdata->header->dma.virt;
843
844 /* this better be coming from an NPort */
845 ocs_assert(ocs_rnode_is_nport(cbdata->payload->dma.virt), NULL);
846
847 /* sm: save sparams, send FLOGI acc */
848 ocs_domain_save_sparms(node->sport->domain, cbdata->payload->dma.virt);
849
850 /* send FC LS_ACC response, override s_id */
851 ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
852 ocs_send_flogi_p2p_acc(cbdata->io, ocs_be16toh(hdr->ox_id), fc_be24toh(hdr->d_id), NULL, NULL);
853 if (ocs_p2p_setup(node->sport)) {
854 node_printf(node, "p2p setup failed, shutting down node\n");
855 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
856 } else {
857 ocs_node_transition(node, __ocs_p2p_wait_flogi_acc_cmpl, NULL);
858 }
859
860 break;
861 }
862
863 case OCS_EVT_LOGO_RCVD: {
864 fc_header_t *hdr = cbdata->header->dma.virt;
865
866 if (!node->sport->domain->attached) {
867 /* most likely a frame left over from before a link down; drop and
868 * shut node down w/ "explicit logout" so pending frames are processed */
869 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
870 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
871 break;
872 }
873 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
874 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
875 break;
876 }
877
878 case OCS_EVT_PRLI_RCVD:
879 case OCS_EVT_PRLO_RCVD:
880 case OCS_EVT_PDISC_RCVD:
881 case OCS_EVT_ADISC_RCVD:
882 case OCS_EVT_RSCN_RCVD: {
883 fc_header_t *hdr = cbdata->header->dma.virt;
884 if (!node->sport->domain->attached) {
885 /* most likely a frame left over from before a link down; drop and
886 * shut node down w/ "explicit logout" so pending frames are processed */
887 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
888 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
889 break;
890 }
891 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
892 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
893 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
894 NULL, NULL);
895
896 break;
897 }
898
899 case OCS_EVT_FCP_CMD_RCVD: {
900 /* note: problem, we're now expecting an ELS REQ completion
901 * from both the LOGO and PLOGI */
902 if (!node->sport->domain->attached) {
903 /* most likely a frame left over from before a link down; drop and
904 * shut node down w/ "explicit logout" so pending frames are processed */
905 node_printf(node, "%s domain not attached, dropping\n", ocs_sm_event_name(evt));
906 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
907 break;
908 }
909
910 /* Send LOGO */
911 node_printf(node, "FCP_CMND received, send LOGO\n");
912 if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, 0, NULL, NULL) == NULL) {
913 /* failed to send LOGO, go ahead and cleanup node anyways */
914 node_printf(node, "Failed to send LOGO\n");
915 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
916 } else {
917 /* sent LOGO, wait for response */
918 ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
919 }
920 break;
921 }
922 case OCS_EVT_DOMAIN_ATTACH_OK:
923 /* don't care about domain_attach_ok */
924 break;
925
926 default:
927 __ocs_d_common(__func__, ctx, evt, arg);
928 return NULL;
929 }
930
931 return NULL;
932 }
933
934 /**
935 * @ingroup device_sm
936 * @brief Device node state machine: Wait on a response for a sent PLOGI.
937 *
938 * <h3 class="desc">Description</h3>
939 * State is entered when an initiator-capable node has sent
940 * a PLOGI and is waiting for a response.
941 *
942 * @param ctx Remote node state machine context.
943 * @param evt Event to process.
944 * @param arg Per event optional argument.
945 *
946 * @return Returns NULL.
947 */
948
949 void *
__ocs_d_wait_plogi_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)950 __ocs_d_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
951 {
952 int32_t rc;
953 ocs_node_cb_t *cbdata = arg;
954 std_node_state_decl();
955
956 node_sm_trace();
957
958 switch(evt) {
959 case OCS_EVT_PLOGI_RCVD: {
960 /* T, or I+T */
961 /* received PLOGI with svc parms, go ahead and attach node
962 * when PLOGI that was sent ultimately completes, it'll be a no-op
963 */
964
965 /* TODO: there is an outstanding PLOGI sent, can we set a flag
966 * to indicate that we don't want to retry it if it times out? */
967 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
968 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
969 /* sm: domain->attached / ocs_node_attach */
970 rc = ocs_node_attach(node);
971 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
972 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
973 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
974 }
975 break;
976 }
977
978 case OCS_EVT_PRLI_RCVD:
979 /* I, or I+T */
980 /* sent PLOGI and before completion was seen, received the
981 * PRLI from the remote node (WCQEs and RCQEs come in on
982 * different queues and order of processing cannot be assumed)
983 * Save OXID so PRLI can be sent after the attach and continue
984 * to wait for PLOGI response
985 */
986 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
987 if (ocs->fc_type == node->fc_type) {
988 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
989 ocs_node_transition(node, __ocs_d_wait_plogi_rsp_recvd_prli, NULL);
990 } else {
991 /* TODO this need to be looked at. What do we do here ? */
992 }
993 break;
994
995 /* TODO this need to be looked at. we could very well be logged in */
996 case OCS_EVT_LOGO_RCVD: /* why don't we do a shutdown here?? */
997 case OCS_EVT_PRLO_RCVD:
998 case OCS_EVT_PDISC_RCVD:
999 case OCS_EVT_FDISC_RCVD:
1000 case OCS_EVT_ADISC_RCVD:
1001 case OCS_EVT_RSCN_RCVD:
1002 case OCS_EVT_SCR_RCVD: {
1003 fc_header_t *hdr = cbdata->header->dma.virt;
1004 node_printf(node, "%s received, sending reject\n", ocs_sm_event_name(evt));
1005 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1006 FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NPORT_LOGIN_REQUIRED, 0,
1007 NULL, NULL);
1008
1009 break;
1010 }
1011
1012 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1013 /* Completion from PLOGI sent */
1014 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1015 return NULL;
1016 }
1017 ocs_assert(node->els_req_cnt, NULL);
1018 node->els_req_cnt--;
1019 /* sm: save sparams, ocs_node_attach */
1020 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1021 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1022 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1023 rc = ocs_node_attach(node);
1024 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1025 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1026 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1027 }
1028 break;
1029
1030 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1031 /* PLOGI failed, shutdown the node */
1032 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1033 return NULL;
1034 }
1035 ocs_assert(node->els_req_cnt, NULL);
1036 node->els_req_cnt--;
1037 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1038 break;
1039
1040 case OCS_EVT_SRRS_ELS_REQ_RJT: /* Our PLOGI was rejected, this is ok in some cases */
1041 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1042 return NULL;
1043 }
1044 ocs_assert(node->els_req_cnt, NULL);
1045 node->els_req_cnt--;
1046 break;
1047
1048 case OCS_EVT_FCP_CMD_RCVD: {
1049 /* not logged in yet and outstanding PLOGI so don't send LOGO,
1050 * just drop
1051 */
1052 node_printf(node, "FCP_CMND received, drop\n");
1053 break;
1054 }
1055
1056 default:
1057 __ocs_d_common(__func__, ctx, evt, arg);
1058 return NULL;
1059 }
1060
1061 return NULL;
1062 }
1063
1064 /**
1065 * @ingroup device_sm
1066 * @brief Device node state machine: Waiting on a response for a
1067 * sent PLOGI.
1068 *
1069 * <h3 class="desc">Description</h3>
1070 * State is entered when an initiator-capable node has sent
1071 * a PLOGI and is waiting for a response. Before receiving the
1072 * response, a PRLI was received, implying that the PLOGI was
1073 * successful.
1074 *
1075 * @param ctx Remote node state machine context.
1076 * @param evt Event to process.
1077 * @param arg Per event optional argument.
1078 *
1079 * @return Returns NULL.
1080 */
1081
1082 void *
__ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1083 __ocs_d_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1084 {
1085 int32_t rc;
1086 ocs_node_cb_t *cbdata = arg;
1087 std_node_state_decl();
1088
1089 node_sm_trace();
1090
1091 switch(evt) {
1092 case OCS_EVT_ENTER:
1093 /*
1094 * Since we've received a PRLI, we have a port login and will
1095 * just need to wait for the PLOGI response to do the node
1096 * attach and then we can send the LS_ACC for the PRLI. If,
1097 * during this time, we receive FCP_CMNDs (which is possible
1098 * since we've already sent a PRLI and our peer may have accepted).
1099 * At this time, we are not waiting on any other unsolicited
1100 * frames to continue with the login process. Thus, it will not
1101 * hurt to hold frames here.
1102 */
1103 ocs_node_hold_frames(node);
1104 break;
1105
1106 case OCS_EVT_EXIT:
1107 ocs_node_accept_frames(node);
1108 break;
1109
1110 case OCS_EVT_SRRS_ELS_REQ_OK: /* PLOGI response received */
1111 /* Completion from PLOGI sent */
1112 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1113 return NULL;
1114 }
1115 ocs_assert(node->els_req_cnt, NULL);
1116 node->els_req_cnt--;
1117 /* sm: save sparams, ocs_node_attach */
1118 ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1119 ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1120 ((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1121 rc = ocs_node_attach(node);
1122 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1123 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1124 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1125 }
1126 break;
1127
1128 case OCS_EVT_SRRS_ELS_REQ_FAIL: /* PLOGI response received */
1129 case OCS_EVT_SRRS_ELS_REQ_RJT:
1130 /* PLOGI failed, shutdown the node */
1131 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_d_common, __func__)) {
1132 return NULL;
1133 }
1134 ocs_assert(node->els_req_cnt, NULL);
1135 node->els_req_cnt--;
1136 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1137 break;
1138
1139 default:
1140 __ocs_d_common(__func__, ctx, evt, arg);
1141 return NULL;
1142 }
1143
1144 return NULL;
1145 }
1146
1147 /**
1148 * @ingroup device_sm
1149 * @brief Device node state machine: Wait for a domain attach.
1150 *
1151 * <h3 class="desc">Description</h3>
1152 * Waits for a domain-attach complete ok event.
1153 *
1154 * @param ctx Remote node state machine context.
1155 * @param evt Event to process.
1156 * @param arg Per event optional argument.
1157 *
1158 * @return Returns NULL.
1159 */
1160
1161 void *
__ocs_d_wait_domain_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1162 __ocs_d_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1163 {
1164 int32_t rc;
1165 std_node_state_decl();
1166
1167 node_sm_trace();
1168
1169 switch(evt) {
1170 case OCS_EVT_ENTER:
1171 ocs_node_hold_frames(node);
1172 break;
1173
1174 case OCS_EVT_EXIT:
1175 ocs_node_accept_frames(node);
1176 break;
1177
1178 case OCS_EVT_DOMAIN_ATTACH_OK:
1179 ocs_assert(node->sport->domain->attached, NULL);
1180 /* sm: ocs_node_attach */
1181 rc = ocs_node_attach(node);
1182 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1183 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1184 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1185 }
1186 break;
1187
1188 default:
1189 __ocs_d_common(__func__, ctx, evt, arg);
1190 return NULL;
1191 }
1192 return NULL;
1193 }
1194
1195 /**
1196 * @ingroup device_sm
1197 * @brief Device node state machine: Wait for topology
1198 * notification
1199 *
1200 * <h3 class="desc">Description</h3>
1201 * Waits for topology notification from fabric node, then
1202 * attaches domain and node.
1203 *
1204 * @param ctx Remote node state machine context.
1205 * @param evt Event to process.
1206 * @param arg Per event optional argument.
1207 *
1208 * @return Returns NULL.
1209 */
1210
1211 void *
__ocs_d_wait_topology_notify(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1212 __ocs_d_wait_topology_notify(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1213 {
1214 int32_t rc;
1215 std_node_state_decl();
1216
1217 node_sm_trace();
1218
1219 switch(evt) {
1220 case OCS_EVT_ENTER:
1221 ocs_node_hold_frames(node);
1222 break;
1223
1224 case OCS_EVT_EXIT:
1225 ocs_node_accept_frames(node);
1226 break;
1227
1228 case OCS_EVT_SPORT_TOPOLOGY_NOTIFY: {
1229 ocs_sport_topology_e topology = (ocs_sport_topology_e)(uintptr_t)arg;
1230 ocs_assert(!node->sport->domain->attached, NULL);
1231 ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
1232 node_printf(node, "topology notification, topology=%d\n", topology);
1233
1234 /* At the time the PLOGI was received, the topology was unknown,
1235 * so we didn't know which node would perform the domain attach:
1236 * 1. The node from which the PLOGI was sent (p2p) or
1237 * 2. The node to which the FLOGI was sent (fabric).
1238 */
1239 if (topology == OCS_SPORT_TOPOLOGY_P2P) {
1240 /* if this is p2p, need to attach to the domain using the
1241 * d_id from the PLOGI received
1242 */
1243 ocs_domain_attach(node->sport->domain, node->ls_acc_did);
1244 }
1245 /* else, if this is fabric, the domain attach should be performed
1246 * by the fabric node (node sending FLOGI); just wait for attach
1247 * to complete
1248 */
1249
1250 ocs_node_transition(node, __ocs_d_wait_domain_attach, NULL);
1251 break;
1252 }
1253 case OCS_EVT_DOMAIN_ATTACH_OK:
1254 ocs_assert(node->sport->domain->attached, NULL);
1255 node_printf(node, "domain attach ok\n");
1256 /*sm: ocs_node_attach */
1257 rc = ocs_node_attach(node);
1258 ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
1259 if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1260 ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1261 }
1262 break;
1263
1264 default:
1265 __ocs_d_common(__func__, ctx, evt, arg);
1266 return NULL;
1267 }
1268 return NULL;
1269 }
1270
1271 /**
1272 * @ingroup device_sm
1273 * @brief Device node state machine: Wait for a node attach when found by a remote node.
1274 *
1275 * @param ctx Remote node state machine context.
1276 * @param evt Event to process.
1277 * @param arg Per event optional argument.
1278 *
1279 * @return Returns NULL.
1280 */
1281
1282 void *
__ocs_d_wait_node_attach(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1283 __ocs_d_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1284 {
1285 std_node_state_decl();
1286
1287 node_sm_trace();
1288
1289 switch(evt) {
1290 case OCS_EVT_ENTER:
1291 ocs_node_hold_frames(node);
1292 break;
1293
1294 case OCS_EVT_EXIT:
1295 ocs_node_accept_frames(node);
1296 break;
1297
1298 case OCS_EVT_NODE_ATTACH_OK:
1299 node->attached = TRUE;
1300 switch (node->send_ls_acc) {
1301 case OCS_NODE_SEND_LS_ACC_PLOGI: {
1302 /* sm: send_plogi_acc is set / send PLOGI acc */
1303 /* Normal case for T, or I+T */
1304 ocs_send_plogi_acc(node->ls_acc_io, node->ls_acc_oxid, NULL, NULL);
1305 ocs_node_transition(node, __ocs_d_wait_plogi_acc_cmpl, NULL);
1306 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1307 node->ls_acc_io = NULL;
1308 break;
1309 }
1310 case OCS_NODE_SEND_LS_ACC_PRLI: {
1311 ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1312 node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1313 node->ls_acc_io = NULL;
1314 break;
1315 }
1316 case OCS_NODE_SEND_LS_ACC_NONE:
1317 default:
1318 /* Normal case for I */
1319 /* sm: send_plogi_acc is not set / send PLOGI acc */
1320 ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1321 break;
1322 }
1323 break;
1324
1325 case OCS_EVT_NODE_ATTACH_FAIL:
1326 /* node attach failed, shutdown the node */
1327 node->attached = FALSE;
1328 node_printf(node, "node attach failed\n");
1329 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1330 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1331 break;
1332
1333 /* Handle shutdown events */
1334 case OCS_EVT_SHUTDOWN:
1335 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1336 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1337 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1338 break;
1339 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1340 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1341 node->shutdown_reason = OCS_NODE_SHUTDOWN_EXPLICIT_LOGO;
1342 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1343 break;
1344 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1345 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1346 node->shutdown_reason = OCS_NODE_SHUTDOWN_IMPLICIT_LOGO;
1347 ocs_node_transition(node, __ocs_d_wait_attach_evt_shutdown, NULL);
1348 break;
1349 default:
1350 __ocs_d_common(__func__, ctx, evt, arg);
1351 return NULL;
1352 }
1353
1354 return NULL;
1355 }
1356
1357 /**
1358 * @ingroup device_sm
1359 * @brief Device node state machine: Wait for a node/domain
1360 * attach then shutdown node.
1361 *
1362 * @param ctx Remote node state machine context.
1363 * @param evt Event to process.
1364 * @param arg Per event optional argument.
1365 *
1366 * @return Returns NULL.
1367 */
1368
1369 void *
__ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1370 __ocs_d_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1371 {
1372 std_node_state_decl();
1373
1374 node_sm_trace();
1375
1376 switch(evt) {
1377 case OCS_EVT_ENTER:
1378 ocs_node_hold_frames(node);
1379 break;
1380
1381 case OCS_EVT_EXIT:
1382 ocs_node_accept_frames(node);
1383 break;
1384
1385 /* wait for any of these attach events and then shutdown */
1386 case OCS_EVT_NODE_ATTACH_OK:
1387 node->attached = TRUE;
1388 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1389 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1390 break;
1391
1392 case OCS_EVT_NODE_ATTACH_FAIL:
1393 /* node attach failed, shutdown the node */
1394 node->attached = FALSE;
1395 node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
1396 ocs_node_transition(node, __ocs_d_initiate_shutdown, NULL);
1397 break;
1398
1399 /* ignore shutdown events as we're already in shutdown path */
1400 case OCS_EVT_SHUTDOWN:
1401 /* have default shutdown event take precedence */
1402 node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1403 /* fall through */
1404 case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1405 case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1406 node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1407 break;
1408
1409 default:
1410 __ocs_d_common(__func__, ctx, evt, arg);
1411 return NULL;
1412 }
1413
1414 return NULL;
1415 }
1416
1417 /**
1418 * @ingroup device_sm
1419 * @brief Device node state machine: Port is logged in.
1420 *
1421 * <h3 class="desc">Description</h3>
1422 * This state is entered when a remote port has completed port login (PLOGI).
1423 *
1424 * @param ctx Remote node state machine context.
1425 * @param evt Event to process
1426 * @param arg Per event optional argument
1427 *
1428 * @return Returns NULL.
1429 */
1430 void *
__ocs_d_port_logged_in(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1431 __ocs_d_port_logged_in(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1432 {
1433 ocs_node_cb_t *cbdata = arg;
1434 std_node_state_decl();
1435
1436 node_sm_trace();
1437
1438 /* TODO: I+T: what if PLOGI response not yet received ? */
1439
1440 switch(evt) {
1441 case OCS_EVT_ENTER:
1442 /* Normal case for I or I+T */
1443 if (node->sport->enable_ini && !FC_ADDR_IS_DOMAIN_CTRL(node->rnode.fc_id)
1444 && !node->sent_prli) {
1445 /* sm: if enable_ini / send PRLI */
1446 ocs_send_prli(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1447 node->sent_prli = TRUE;
1448 /* can now expect ELS_REQ_OK/FAIL/RJT */
1449 }
1450 break;
1451
1452 case OCS_EVT_FCP_CMD_RCVD: {
1453 /* For target functionality send PRLO and drop the CMD frame. */
1454 if (node->sport->enable_tgt) {
1455 if (ocs_send_prlo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
1456 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL)) {
1457 ocs_node_transition(node, __ocs_d_wait_prlo_rsp, NULL);
1458 }
1459 }
1460 break;
1461 }
1462
1463 case OCS_EVT_PRLI_RCVD: {
1464 fc_header_t *hdr = cbdata->header->dma.virt;
1465
1466 /* Normal for T or I+T */
1467
1468 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1469 ocs_d_send_prli_rsp(cbdata->io, ocs_be16toh(hdr->ox_id));
1470 break;
1471 }
1472
1473 case OCS_EVT_SRRS_ELS_REQ_OK: { /* PRLI response */
1474 /* Normal case for I or I+T */
1475 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1476 return NULL;
1477 }
1478 ocs_assert(node->els_req_cnt, NULL);
1479 node->els_req_cnt--;
1480 /* sm: process PRLI payload */
1481 ocs_process_prli_payload(node, cbdata->els->els_rsp.virt);
1482 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1483 break;
1484 }
1485
1486 case OCS_EVT_SRRS_ELS_REQ_FAIL: { /* PRLI response failed */
1487 /* I, I+T, assume some link failure, shutdown node */
1488 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1489 return NULL;
1490 }
1491 ocs_assert(node->els_req_cnt, NULL);
1492 node->els_req_cnt--;
1493 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1494 break;
1495 }
1496
1497 case OCS_EVT_SRRS_ELS_REQ_RJT: {/* PRLI rejected by remote */
1498 /* Normal for I, I+T (connected to an I) */
1499 /* Node doesn't want to be a target, stay here and wait for a PRLI from the remote node
1500 * if it really wants to connect to us as target */
1501 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PRLI, __ocs_d_common, __func__)) {
1502 return NULL;
1503 }
1504 ocs_assert(node->els_req_cnt, NULL);
1505 node->els_req_cnt--;
1506 break;
1507 }
1508
1509 case OCS_EVT_SRRS_ELS_CMPL_OK: {
1510 /* Normal T, I+T, target-server rejected the process login */
1511 /* This would be received only in the case where we sent LS_RJT for the PRLI, so
1512 * do nothing. (note: as T only we could shutdown the node)
1513 */
1514 ocs_assert(node->els_cmpl_cnt, NULL);
1515 node->els_cmpl_cnt--;
1516 break;
1517 }
1518
1519 case OCS_EVT_PLOGI_RCVD: {
1520 /* sm: save sparams, set send_plogi_acc, post implicit logout
1521 * Save plogi parameters */
1522 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1523 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1524
1525 /* Restart node attach with new service parameters, and send ACC */
1526 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1527 break;
1528 }
1529
1530 case OCS_EVT_LOGO_RCVD: {
1531 /* I, T, I+T */
1532 fc_header_t *hdr = cbdata->header->dma.virt;
1533 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1534 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1535 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1536 break;
1537 }
1538
1539 default:
1540 __ocs_d_common(__func__, ctx, evt, arg);
1541 return NULL;
1542 }
1543
1544 return NULL;
1545 }
1546
1547 /**
1548 * @ingroup device_sm
1549 * @brief Device node state machine: Wait for a LOGO accept.
1550 *
1551 * <h3 class="desc">Description</h3>
1552 * Waits for a LOGO accept completion.
1553 *
1554 * @param ctx Remote node state machine context.
1555 * @param evt Event to process
1556 * @param arg Per event optional argument
1557 *
1558 * @return Returns NULL.
1559 */
1560
1561 void *
__ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1562 __ocs_d_wait_logo_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1563 {
1564 std_node_state_decl();
1565
1566 node_sm_trace();
1567
1568 switch(evt) {
1569 case OCS_EVT_ENTER:
1570 ocs_node_hold_frames(node);
1571 break;
1572
1573 case OCS_EVT_EXIT:
1574 ocs_node_accept_frames(node);
1575 break;
1576
1577 case OCS_EVT_SRRS_ELS_CMPL_OK:
1578 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1579 /* sm: / post explicit logout */
1580 ocs_assert(node->els_cmpl_cnt, NULL);
1581 node->els_cmpl_cnt--;
1582 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1583 break;
1584 default:
1585 __ocs_d_common(__func__, ctx, evt, arg);
1586 return NULL;
1587 }
1588
1589 return NULL;
1590 }
1591
1592 /**
1593 * @ingroup device_sm
1594 * @brief Device node state machine: Device is ready.
1595 *
1596 * @param ctx Remote node state machine context.
1597 * @param evt Event to process.
1598 * @param arg Per event optional argument.
1599 *
1600 * @return Returns NULL.
1601 */
1602
1603 void *
__ocs_d_device_ready(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1604 __ocs_d_device_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1605 {
1606 ocs_node_cb_t *cbdata = arg;
1607 std_node_state_decl();
1608
1609 if (evt != OCS_EVT_FCP_CMD_RCVD) {
1610 node_sm_trace();
1611 }
1612
1613 switch(evt) {
1614 case OCS_EVT_ENTER:
1615 node->fcp_enabled = TRUE;
1616 if (node->init) {
1617 device_printf(ocs->dev, "[%s] found (initiator) WWPN %s WWNN %s\n", node->display_name,
1618 node->wwpn, node->wwnn);
1619 if (node->sport->enable_tgt)
1620 ocs_scsi_new_initiator(node);
1621 }
1622 if (node->targ) {
1623 device_printf(ocs->dev, "[%s] found (target) WWPN %s WWNN %s\n", node->display_name,
1624 node->wwpn, node->wwnn);
1625 if (node->sport->enable_ini)
1626 ocs_scsi_new_target(node);
1627 }
1628 break;
1629
1630 case OCS_EVT_EXIT:
1631 node->fcp_enabled = FALSE;
1632 break;
1633
1634 case OCS_EVT_PLOGI_RCVD: {
1635 /* sm: save sparams, set send_plogi_acc, post implicit logout
1636 * Save plogi parameters */
1637 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1638 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1639
1640 /* Restart node attach with new service parameters, and send ACC */
1641 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1642 break;
1643 }
1644
1645 case OCS_EVT_PDISC_RCVD: {
1646 fc_header_t *hdr = cbdata->header->dma.virt;
1647 ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1648 break;
1649 }
1650
1651 case OCS_EVT_PRLI_RCVD: {
1652 /* T, I+T: remote initiator is slow to get started */
1653 fc_header_t *hdr = cbdata->header->dma.virt;
1654
1655 ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1656
1657 /* sm: send PRLI acc/reject */
1658 if (ocs->fc_type == node->fc_type)
1659 ocs_send_prli_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1660 else
1661 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1662 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1663 break;
1664 }
1665
1666 case OCS_EVT_PRLO_RCVD: {
1667 fc_header_t *hdr = cbdata->header->dma.virt;
1668 fc_prlo_payload_t *prlo = cbdata->payload->dma.virt;
1669
1670 /* sm: send PRLO acc/reject */
1671 if (ocs->fc_type == prlo->type)
1672 ocs_send_prlo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), ocs->fc_type, NULL, NULL);
1673 else
1674 ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id), FC_REASON_UNABLE_TO_PERFORM,
1675 FC_EXPL_REQUEST_NOT_SUPPORTED, 0, NULL, NULL);
1676 /*TODO: need implicit logout */
1677 break;
1678 }
1679
1680 case OCS_EVT_LOGO_RCVD: {
1681 fc_header_t *hdr = cbdata->header->dma.virt;
1682 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1683 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1684 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1685 break;
1686 }
1687
1688 case OCS_EVT_ADISC_RCVD: {
1689 fc_header_t *hdr = cbdata->header->dma.virt;
1690 ocs_send_adisc_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1691 break;
1692 }
1693
1694 case OCS_EVT_RRQ_RCVD: {
1695 fc_header_t *hdr = cbdata->header->dma.virt;
1696 /* Send LS_ACC */
1697 ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1698 break;
1699 }
1700
1701 case OCS_EVT_ABTS_RCVD:
1702 ocs_process_abts(cbdata->io, cbdata->header->dma.virt);
1703 break;
1704
1705 case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1706 break;
1707
1708 case OCS_EVT_NODE_REFOUND:
1709 break;
1710
1711 case OCS_EVT_NODE_MISSING:
1712 if (node->sport->enable_rscn) {
1713 ocs_node_transition(node, __ocs_d_device_gone, NULL);
1714 }
1715 break;
1716
1717 case OCS_EVT_SRRS_ELS_CMPL_OK:
1718 /* T, or I+T, PRLI accept completed ok */
1719 ocs_assert(node->els_cmpl_cnt, NULL);
1720 node->els_cmpl_cnt--;
1721 break;
1722
1723 case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1724 /* T, or I+T, PRLI accept failed to complete */
1725 ocs_assert(node->els_cmpl_cnt, NULL);
1726 node->els_cmpl_cnt--;
1727 node_printf(node, "Failed to send PRLI LS_ACC\n");
1728 break;
1729
1730 default:
1731 __ocs_d_common(__func__, ctx, evt, arg);
1732 return NULL;
1733 }
1734
1735 return NULL;
1736 }
1737
1738 /**
1739 * @ingroup device_sm
1740 * @brief Device node state machine: Node is gone (absent from GID_PT).
1741 *
1742 * <h3 class="desc">Description</h3>
1743 * State entered when a node is detected as being gone (absent from GID_PT).
1744 *
1745 * @param ctx Remote node state machine context.
1746 * @param evt Event to process
1747 * @param arg Per event optional argument
1748 *
1749 * @return Returns NULL.
1750 */
1751
1752 void *
__ocs_d_device_gone(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1753 __ocs_d_device_gone(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1754 {
1755 int32_t rc = OCS_SCSI_CALL_COMPLETE;
1756 int32_t rc_2 = OCS_SCSI_CALL_COMPLETE;
1757 ocs_node_cb_t *cbdata = arg;
1758 std_node_state_decl();
1759
1760 node_sm_trace();
1761
1762 switch(evt) {
1763 case OCS_EVT_ENTER: {
1764 const char *labels[] = {"none", "initiator", "target", "initiator+target"};
1765
1766 device_printf(ocs->dev, "[%s] missing (%s) WWPN %s WWNN %s\n", node->display_name,
1767 labels[(node->targ << 1) | (node->init)], node->wwpn, node->wwnn);
1768
1769 switch(ocs_node_get_enable(node)) {
1770 case OCS_NODE_ENABLE_T_TO_T:
1771 case OCS_NODE_ENABLE_I_TO_T:
1772 case OCS_NODE_ENABLE_IT_TO_T:
1773 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1774 break;
1775
1776 case OCS_NODE_ENABLE_T_TO_I:
1777 case OCS_NODE_ENABLE_I_TO_I:
1778 case OCS_NODE_ENABLE_IT_TO_I:
1779 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1780 break;
1781
1782 case OCS_NODE_ENABLE_T_TO_IT:
1783 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1784 break;
1785
1786 case OCS_NODE_ENABLE_I_TO_IT:
1787 rc = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1788 break;
1789
1790 case OCS_NODE_ENABLE_IT_TO_IT:
1791 rc = ocs_scsi_del_initiator(node, OCS_SCSI_INITIATOR_MISSING);
1792 rc_2 = ocs_scsi_del_target(node, OCS_SCSI_TARGET_MISSING);
1793 break;
1794
1795 default:
1796 rc = OCS_SCSI_CALL_COMPLETE;
1797 break;
1798 }
1799
1800 if ((rc == OCS_SCSI_CALL_COMPLETE) && (rc_2 == OCS_SCSI_CALL_COMPLETE)) {
1801 ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
1802 }
1803
1804 break;
1805 }
1806 case OCS_EVT_NODE_REFOUND:
1807 /* two approaches, reauthenticate with PLOGI/PRLI, or ADISC */
1808
1809 /* reauthenticate with PLOGI/PRLI */
1810 /* ocs_node_transition(node, __ocs_d_discovered, NULL); */
1811
1812 /* reauthenticate with ADISC
1813 * sm: send ADISC */
1814 ocs_send_adisc(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1815 ocs_node_transition(node, __ocs_d_wait_adisc_rsp, NULL);
1816 break;
1817
1818 case OCS_EVT_PLOGI_RCVD: {
1819 /* sm: save sparams, set send_plogi_acc, post implicit logout
1820 * Save plogi parameters */
1821 ocs_node_save_sparms(node, cbdata->payload->dma.virt);
1822 ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PLOGI);
1823
1824 /* Restart node attach with new service parameters, and send ACC */
1825 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_IMPLICIT_LOGO, NULL);
1826 break;
1827 }
1828
1829 case OCS_EVT_FCP_CMD_RCVD: {
1830 /* most likely a stale frame (received prior to link down), if attempt
1831 * to send LOGO, will probably timeout and eat up 20s; thus, drop FCP_CMND
1832 */
1833 node_printf(node, "FCP_CMND received, drop\n");
1834 break;
1835 }
1836 case OCS_EVT_LOGO_RCVD: {
1837 /* I, T, I+T */
1838 fc_header_t *hdr = cbdata->header->dma.virt;
1839 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1840 /* sm: send LOGO acc */
1841 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1842 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1843 break;
1844 }
1845 default:
1846 __ocs_d_common(__func__, ctx, evt, arg);
1847 return NULL;
1848 }
1849
1850 return NULL;
1851 }
1852
1853 /**
1854 * @ingroup device_sm
1855 * @brief Device node state machine: Wait for the ADISC response.
1856 *
1857 * <h3 class="desc">Description</h3>
1858 * Waits for the ADISC response from the remote node.
1859 *
1860 * @param ctx Remote node state machine context.
1861 * @param evt Event to process.
1862 * @param arg Per event optional argument.
1863 *
1864 * @return Returns NULL.
1865 */
1866
1867 void *
__ocs_d_wait_adisc_rsp(ocs_sm_ctx_t * ctx,ocs_sm_event_t evt,void * arg)1868 __ocs_d_wait_adisc_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1869 {
1870 ocs_node_cb_t *cbdata = arg;
1871 std_node_state_decl();
1872
1873 node_sm_trace();
1874
1875 switch(evt) {
1876 case OCS_EVT_SRRS_ELS_REQ_OK:
1877 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1878 return NULL;
1879 }
1880 ocs_assert(node->els_req_cnt, NULL);
1881 node->els_req_cnt--;
1882 ocs_node_transition(node, __ocs_d_device_ready, NULL);
1883 break;
1884
1885 case OCS_EVT_SRRS_ELS_REQ_RJT:
1886 /* received an LS_RJT, in this case, send shutdown (explicit logo)
1887 * event which will unregister the node, and start over with PLOGI
1888 */
1889 if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_ADISC, __ocs_d_common, __func__)) {
1890 return NULL;
1891 }
1892 ocs_assert(node->els_req_cnt, NULL);
1893 node->els_req_cnt--;
1894 /*sm: post explicit logout */
1895 ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
1896 break;
1897
1898 case OCS_EVT_LOGO_RCVD: {
1899 /* In this case, we have the equivalent of an LS_RJT for the ADISC,
1900 * so we need to abort the ADISC, and re-login with PLOGI
1901 */
1902 /*sm: request abort, send LOGO acc */
1903 fc_header_t *hdr = cbdata->header->dma.virt;
1904 node_printf(node, "%s received attached=%d\n", ocs_sm_event_name(evt), node->attached);
1905 ocs_send_logo_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1906 ocs_node_transition(node, __ocs_d_wait_logo_acc_cmpl, NULL);
1907 break;
1908 }
1909 default:
1910 __ocs_d_common(__func__, ctx, evt, arg);
1911 return NULL;
1912 }
1913
1914 return NULL;
1915 }
1916