xref: /freebsd/sys/dev/ocs_fc/ocs_fabric.c (revision 1323ec571215a77ddd21294f0871979d5ad6b992)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  *
37  * This file implements remote node state machines for:
38  * - Fabric logins.
39  * - Fabric controller events.
40  * - Name/directory services interaction.
41  * - Point-to-point logins.
42  */
43 
44 /*!
45 @defgroup fabric_sm Node State Machine: Fabric States
46 @defgroup ns_sm Node State Machine: Name/Directory Services States
47 @defgroup p2p_sm Node State Machine: Point-to-Point Node States
48 */
49 
50 #include "ocs.h"
51 #include "ocs_fabric.h"
52 #include "ocs_els.h"
53 #include "ocs_device.h"
54 
55 static void ocs_fabric_initiate_shutdown(ocs_node_t *node);
56 static void * __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg);
57 static int32_t ocs_start_ns_node(ocs_sport_t *sport);
58 static int32_t ocs_start_fabctl_node(ocs_sport_t *sport);
59 static int32_t ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len);
60 static void ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata);
61 static uint64_t ocs_get_wwpn(fc_plogi_payload_t *sp);
62 static void gidpt_delay_timer_cb(void *arg);
63 
64 /**
65  * @ingroup fabric_sm
66  * @brief Fabric node state machine: Initial state.
67  *
68  * @par Description
69  * Send an FLOGI to a well-known fabric.
70  *
71  * @param ctx Remote node sm context.
72  * @param evt Event to process.
73  * @param arg Per event optional argument.
74  *
75  * @return Returns NULL.
76  */
77 void *
78 __ocs_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
79 {
80 	std_node_state_decl();
81 
82 	node_sm_trace();
83 
84 	switch(evt) {
85 	case OCS_EVT_REENTER:	/* not sure why we're getting these ... */
86 		ocs_log_debug(node->ocs, ">>> reenter !!\n");
87 		/* fall through */
88 	case OCS_EVT_ENTER:
89 		/* sm: / send FLOGI */
90 		ocs_send_flogi(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
91 		ocs_node_transition(node, __ocs_fabric_flogi_wait_rsp, NULL);
92 		break;
93 
94 	default:
95 		__ocs_fabric_common(__func__, ctx, evt, arg);
96 		break;
97 	}
98 
99 	return NULL;
100 }
101 
102 /**
103  * @ingroup fabric_sm
104  * @brief Set sport topology.
105  *
106  * @par Description
107  * Set sport topology.
108  *
109  * @param node Pointer to the node for which the topology is set.
110  * @param topology Topology to set.
111  *
112  * @return Returns NULL.
113  */
114 void
115 ocs_fabric_set_topology(ocs_node_t *node, ocs_sport_topology_e topology)
116 {
117 	node->sport->topology = topology;
118 }
119 
120 /**
121  * @ingroup fabric_sm
122  * @brief Notify sport topology.
123  * @par Description
124  * notify sport topology.
125  * @param node Pointer to the node for which the topology is set.
126  * @return Returns NULL.
127  */
128 void
129 ocs_fabric_notify_topology(ocs_node_t *node)
130 {
131 	ocs_node_t *tmp_node;
132 	ocs_node_t *next;
133 	ocs_sport_topology_e topology = node->sport->topology;
134 
135 	/* now loop through the nodes in the sport and send topology notification */
136 	ocs_sport_lock(node->sport);
137 	ocs_list_foreach_safe(&node->sport->node_list, tmp_node, next) {
138 		if (tmp_node != node) {
139 			ocs_node_post_event(tmp_node, OCS_EVT_SPORT_TOPOLOGY_NOTIFY, (void *)topology);
140 		}
141 	}
142 	ocs_sport_unlock(node->sport);
143 }
144 
145 /**
146  * @ingroup fabric_sm
147  * @brief Fabric node state machine: Wait for an FLOGI response.
148  *
149  * @par Description
150  * Wait for an FLOGI response event.
151  *
152  * @param ctx Remote node state machine context.
153  * @param evt Event to process.
154  * @param arg Per event optional argument.
155  *
156  * @return Returns NULL.
157  */
158 
159 void *
160 __ocs_fabric_flogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
161 {
162 	ocs_node_cb_t *cbdata = arg;
163 	std_node_state_decl();
164 
165 	node_sm_trace();
166 
167 	switch(evt) {
168 	case OCS_EVT_SRRS_ELS_REQ_OK: {
169 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
170 			return NULL;
171 		}
172 		ocs_assert(node->els_req_cnt, NULL);
173 		node->els_req_cnt--;
174 
175 		ocs_domain_save_sparms(node->sport->domain, cbdata->els->els_rsp.virt);
176 
177 		ocs_display_sparams(node->display_name, "flogi rcvd resp", 0, NULL,
178 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
179 
180 		/* Check to see if the fabric is an F_PORT or and N_PORT */
181 		if (ocs_rnode_is_nport(cbdata->els->els_rsp.virt)) {
182 			/* sm: if nport and p2p_winner / ocs_domain_attach */
183 			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_P2P);
184 			if (ocs_p2p_setup(node->sport)) {
185 				node_printf(node, "p2p setup failed, shutting down node\n");
186 				node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
187 				ocs_fabric_initiate_shutdown(node);
188 			} else {
189 				if (node->sport->p2p_winner) {
190 					ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
191 					if (!node->sport->domain->attached) {
192 						node_printf(node, "p2p winner, domain not attached\n");
193 						ocs_domain_attach(node->sport->domain, node->sport->p2p_port_id);
194 					} else {
195 						/* already attached, just send ATTACH_OK */
196 						node_printf(node, "p2p winner, domain already attached\n");
197 						ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
198 					}
199 				} else {
200 					/* peer is p2p winner; PLOGI will be received on the
201 					 * remote SID=1 node; this node has served its purpose
202 					 */
203 					node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
204 					ocs_fabric_initiate_shutdown(node);
205 				}
206 			}
207 		} else {
208 			/* sm: if not nport / ocs_domain_attach */
209 			/* ext_status has the fc_id, attach domain */
210 			ocs_fabric_set_topology(node, OCS_SPORT_TOPOLOGY_FABRIC);
211 			ocs_fabric_notify_topology(node);
212 			ocs_assert(!node->sport->domain->attached, NULL);
213 			ocs_domain_attach(node->sport->domain, cbdata->ext_status);
214 			ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
215 		}
216 
217 		break;
218 	}
219 
220 	case OCS_EVT_ELS_REQ_ABORTED:
221 	case OCS_EVT_SRRS_ELS_REQ_RJT:
222 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
223 		ocs_sport_t *sport = node->sport;
224 		/*
225 		 * with these errors, we have no recovery, so shutdown the sport, leave the link
226 		 * up and the domain ready
227 		 */
228 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FLOGI, __ocs_fabric_common, __func__)) {
229 			return NULL;
230 		}
231 		ocs_assert(node->els_req_cnt, NULL);
232 		node->els_req_cnt--;
233 
234 		if (node->sport->topology == OCS_SPORT_TOPOLOGY_P2P && !node->sport->p2p_winner) {
235 			node_printf(node, "FLOGI failed, peer p2p winner, shutdown node\n");
236 			node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
237 			ocs_fabric_initiate_shutdown(node);
238 			break;
239 		}
240 
241 		node_printf(node, "FLOGI failed evt=%s, shutting down sport [%s]\n", ocs_sm_event_name(evt),
242 			sport->display_name);
243 		ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
244 		break;
245 	}
246 
247 	default:
248 		__ocs_fabric_common(__func__, ctx, evt, arg);
249 		break;
250 	}
251 
252 	return NULL;
253 }
254 
255 /**
256  * @ingroup fabric_sm
257  * @brief Fabric node state machine: Initial state for a virtual port.
258  *
259  * @par Description
260  * State entered when a virtual port is created. Send FDISC.
261  *
262  * @param ctx Remote node state machine context.
263  * @param evt Event to process.
264  * @param arg Per event optional argument.
265  *
266  * @return Returns NULL.
267  */
268 void *
269 __ocs_vport_fabric_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
270 {
271 	std_node_state_decl();
272 
273 	node_sm_trace();
274 
275 	switch(evt) {
276 	case OCS_EVT_ENTER:
277 		/* sm: send FDISC */
278 		ocs_send_fdisc(node, OCS_FC_FLOGI_TIMEOUT_SEC, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
279 		ocs_node_transition(node, __ocs_fabric_fdisc_wait_rsp, NULL);
280 		break;
281 
282 	default:
283 		__ocs_fabric_common(__func__, ctx, evt, arg);
284 		break;
285 	}
286 
287 	return NULL;
288 }
289 
290 /**
291  * @ingroup fabric_sm
292  * @brief Fabric node state machine: Wait for an FDISC response
293  *
294  * @par Description
295  * Used for a virtual port. Waits for an FDISC response. If OK, issue a HW port attach.
296  *
297  * @param ctx Remote node state machine context.
298  * @param evt Event to process.
299  * @param arg Per event optional argument.
300  *
301  * @return Returns NULL.
302  */
303 void *
304 __ocs_fabric_fdisc_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
305 {
306 	ocs_node_cb_t *cbdata = arg;
307 	std_node_state_decl();
308 
309 	node_sm_trace();
310 
311 	switch(evt) {
312 	case OCS_EVT_SRRS_ELS_REQ_OK: {
313 		/* fc_id is in ext_status */
314 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
315 			return NULL;
316 		}
317 
318 		ocs_display_sparams(node->display_name, "fdisc rcvd resp", 0, NULL,
319 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
320 
321 		ocs_assert(node->els_req_cnt, NULL);
322 		node->els_req_cnt--;
323 		/* sm: ocs_sport_attach */
324 		ocs_sport_attach(node->sport, cbdata->ext_status);
325 		ocs_node_transition(node, __ocs_fabric_wait_domain_attach, NULL);
326 		break;
327 	}
328 
329 	case OCS_EVT_SRRS_ELS_REQ_RJT:
330 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
331 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_FDISC, __ocs_fabric_common, __func__)) {
332 			return NULL;
333 		}
334 		ocs_assert(node->els_req_cnt, NULL);
335 		node->els_req_cnt--;
336 		ocs_log_err(ocs, "FDISC failed, shutting down sport\n");
337 		/* sm: shutdown sport */
338 		ocs_sm_post_event(&node->sport->sm, OCS_EVT_SHUTDOWN, NULL);
339 		break;
340 	}
341 
342 	default:
343 		__ocs_fabric_common(__func__, ctx, evt, arg);
344 		break;
345 	}
346 
347 	return NULL;
348 }
349 
350 /**
351  * @ingroup fabric_sm
352  * @brief Fabric node state machine: Wait for a domain/sport attach event.
353  *
354  * @par Description
355  * Waits for a domain/sport attach event.
356  *
357  * @param ctx Remote node state machine context.
358  * @param evt Event to process.
359  * @param arg Per event optional argument.
360  *
361  * @return Returns NULL.
362  */
363 void *
364 __ocs_fabric_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
365 {
366 	std_node_state_decl();
367 
368 	node_sm_trace();
369 
370 	switch(evt) {
371 	case OCS_EVT_ENTER:
372 		ocs_node_hold_frames(node);
373 		break;
374 
375 	case OCS_EVT_EXIT:
376 		ocs_node_accept_frames(node);
377 		break;
378 	case OCS_EVT_DOMAIN_ATTACH_OK:
379 	case OCS_EVT_SPORT_ATTACH_OK: {
380 		int rc;
381 
382 		rc = ocs_start_ns_node(node->sport);
383 		if (rc)
384 			return NULL;
385 
386 		/* sm: if enable_ini / start fabctl node
387 		 * Instantiate the fabric controller (sends SCR) */
388 		if (node->sport->enable_rscn) {
389 			rc = ocs_start_fabctl_node(node->sport);
390 			if (rc)
391 				return NULL;
392 		}
393 		ocs_node_transition(node, __ocs_fabric_idle, NULL);
394 		break;
395 	}
396 	default:
397 		__ocs_fabric_common(__func__, ctx, evt, arg);
398 		return NULL;
399 	}
400 
401 	return NULL;
402 }
403 
404 /**
405  * @ingroup fabric_sm
406  * @brief Fabric node state machine: Fabric node is idle.
407  *
408  * @par Description
409  * Wait for fabric node events.
410  *
411  * @param ctx Remote node state machine context.
412  * @param evt Event to process.
413  * @param arg Per event optional argument.
414  *
415  * @return Returns NULL.
416  */
417 void *
418 __ocs_fabric_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
419 {
420 	std_node_state_decl();
421 
422 	node_sm_trace();
423 
424 	switch(evt) {
425 	case OCS_EVT_DOMAIN_ATTACH_OK:
426 		break;
427 	default:
428 		__ocs_fabric_common(__func__, ctx, evt, arg);
429 		return NULL;
430 	}
431 
432 	return NULL;
433 }
434 
435 /**
436  * @ingroup ns_sm
437  * @brief Name services node state machine: Initialize.
438  *
439  * @par Description
440  * A PLOGI is sent to the well-known name/directory services node.
441  *
442  * @param ctx Remote node state machine context.
443  * @param evt Event to process.
444  * @param arg Per event optional argument.
445  *
446  * @return Returns NULL.
447  */
448 void *
449 __ocs_ns_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
450 {
451 	std_node_state_decl();
452 
453 	node_sm_trace();
454 
455 	switch(evt) {
456 	case OCS_EVT_ENTER:
457 		/* sm: send PLOGI */
458 		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
459 		ocs_node_transition(node, __ocs_ns_plogi_wait_rsp, NULL);
460 		break;
461 	default:
462 		__ocs_fabric_common(__func__, ctx, evt, arg);
463 		break;
464 	}
465 
466 	return NULL;
467 }
468 
469 /**
470  * @ingroup ns_sm
471  * @brief Name services node state machine: Wait for a PLOGI response.
472  *
473  * @par Description
474  * Waits for a response from PLOGI to name services node, then issues a
475  * node attach request to the HW.
476  *
477  * @param ctx Remote node state machine context.
478  * @param evt Event to process.
479  * @param arg Per event optional argument.
480  *
481  * @return Returns NULL.
482  */
483 void *
484 __ocs_ns_plogi_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
485 {
486 	int32_t rc;
487 	ocs_node_cb_t *cbdata = arg;
488 	std_node_state_decl();
489 
490 	node_sm_trace();
491 
492 	switch(evt) {
493 	case OCS_EVT_SRRS_ELS_REQ_OK: {
494 		/* Save service parameters */
495 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
496 			return NULL;
497 		}
498 		ocs_assert(node->els_req_cnt, NULL);
499 		node->els_req_cnt--;
500 		/* sm: save sparams, ocs_node_attach */
501 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
502 		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
503 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
504 		rc = ocs_node_attach(node);
505 		ocs_node_transition(node, __ocs_ns_wait_node_attach, NULL);
506 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
507 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
508 		}
509 		break;
510 	}
511 	default:
512 		__ocs_fabric_common(__func__, ctx, evt, arg);
513 		return NULL;
514 	}
515 
516 	return NULL;
517 }
518 
519 /**
520  * @ingroup ns_sm
521  * @brief Name services node state machine: Wait for a node attach completion.
522  *
523  * @par Description
524  * Waits for a node attach completion, then issues an RFTID name services
525  * request.
526  *
527  * @param ctx Remote node state machine context.
528  * @param evt Event to process.
529  * @param arg Per event optional argument.
530  *
531  * @return Returns NULL.
532  */
533 void *
534 __ocs_ns_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
535 {
536 	std_node_state_decl();
537 
538 	node_sm_trace();
539 
540 	switch(evt) {
541 	case OCS_EVT_ENTER:
542 		ocs_node_hold_frames(node);
543 		break;
544 
545 	case OCS_EVT_EXIT:
546 		ocs_node_accept_frames(node);
547 		break;
548 
549 	case OCS_EVT_NODE_ATTACH_OK:
550 		node->attached = TRUE;
551 		/* sm: send RFTID */
552 		ocs_ns_send_rftid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
553 				 OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
554 		ocs_node_transition(node, __ocs_ns_rftid_wait_rsp, NULL);
555 		break;
556 
557 	case OCS_EVT_NODE_ATTACH_FAIL:
558 		/* node attach failed, shutdown the node */
559 		node->attached = FALSE;
560 		node_printf(node, "Node attach failed\n");
561 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
562 		ocs_fabric_initiate_shutdown(node);
563 		break;
564 
565 	case OCS_EVT_SHUTDOWN:
566 		node_printf(node, "Shutdown event received\n");
567 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
568 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
569 		break;
570 
571 	/* if receive RSCN just ignore,
572 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
573 	case OCS_EVT_RSCN_RCVD:
574 		break;
575 
576 	default:
577 		__ocs_fabric_common(__func__, ctx, evt, arg);
578 		return NULL;
579 	}
580 
581 	return NULL;
582 }
583 
584 /**
585  * @ingroup ns_sm
586  * @brief Wait for a domain/sport/node attach completion, then
587  * shutdown.
588  *
589  * @par Description
590  * Waits for a domain/sport/node attach completion, then shuts
591  * node down.
592  *
593  * @param ctx Remote node state machine context.
594  * @param evt Event to process.
595  * @param arg Per event optional argument.
596  *
597  * @return Returns NULL.
598  */
599 void *
600 __ocs_fabric_wait_attach_evt_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
601 {
602 	std_node_state_decl();
603 
604 	node_sm_trace();
605 
606 	switch(evt) {
607 	case OCS_EVT_ENTER:
608 		ocs_node_hold_frames(node);
609 		break;
610 
611 	case OCS_EVT_EXIT:
612 		ocs_node_accept_frames(node);
613 		break;
614 
615 	/* wait for any of these attach events and then shutdown */
616 	case OCS_EVT_NODE_ATTACH_OK:
617 		node->attached = TRUE;
618 		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
619 		ocs_fabric_initiate_shutdown(node);
620 		break;
621 
622 	case OCS_EVT_NODE_ATTACH_FAIL:
623 		node->attached = FALSE;
624 		node_printf(node, "Attach evt=%s, proceed to shutdown\n", ocs_sm_event_name(evt));
625 		ocs_fabric_initiate_shutdown(node);
626 		break;
627 
628 	/* ignore shutdown event as we're already in shutdown path */
629 	case OCS_EVT_SHUTDOWN:
630 		node_printf(node, "Shutdown event received\n");
631 		break;
632 
633 	default:
634 		__ocs_fabric_common(__func__, ctx, evt, arg);
635 		return NULL;
636 	}
637 
638 	return NULL;
639 }
640 
641 /**
642  * @ingroup ns_sm
643  * @brief Name services node state machine: Wait for an RFTID response event.
644  *
645  * @par Description
646  * Waits for an RFTID response event; if configured for an initiator operation,
647  * a GIDPT name services request is issued.
648  *
649  * @param ctx Remote node state machine context.
650  * @param evt Event to process.
651  * @param arg Per event optional argument.
652  *
653  * @return Returns NULL.
654  */
655 void *
656 __ocs_ns_rftid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
657 {
658 	std_node_state_decl();
659 
660 	node_sm_trace();
661 
662 	switch(evt) {
663 	case OCS_EVT_SRRS_ELS_REQ_OK:
664 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFT_ID, __ocs_fabric_common, __func__)) {
665 			return NULL;
666 		}
667 		ocs_assert(node->els_req_cnt, NULL);
668 		node->els_req_cnt--;
669 		/*sm: send RFFID */
670 		ocs_ns_send_rffid(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
671 				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
672 		ocs_node_transition(node, __ocs_ns_rffid_wait_rsp, NULL);
673 		break;
674 
675 	/* if receive RSCN just ignore,
676 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
677 	case OCS_EVT_RSCN_RCVD:
678 		break;
679 
680 	default:
681 		__ocs_fabric_common(__func__, ctx, evt, arg);
682 		return NULL;
683 	}
684 
685 	return NULL;
686 }
687 
688 /**
689  * @ingroup ns_sm
690  * @brief Fabric node state machine: Wait for RFFID response event.
691  *
692  * @par Description
693  * Waits for an RFFID response event; if configured for an initiator operation,
694  * a GIDPT name services request is issued.
695  *
696  * @param ctx Remote node state machine context.
697  * @param evt Event to process.
698  * @param arg Per event optional argument.
699  *
700  * @return Returns NULL.
701  */
702 void *
703 __ocs_ns_rffid_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
704 {
705 	std_node_state_decl();
706 
707 	node_sm_trace();
708 
709 	switch(evt) {
710 	case OCS_EVT_SRRS_ELS_REQ_OK:	{
711 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_RFF_ID, __ocs_fabric_common, __func__)) {
712 			return NULL;
713 		}
714 		ocs_assert(node->els_req_cnt, NULL);
715 		node->els_req_cnt--;
716 		if (node->sport->enable_rscn) {
717 			/* sm: if enable_rscn / send GIDPT */
718 			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
719 					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
720 			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
721 		} else {
722 			/* if 'T' only, we're done, go to idle */
723 			ocs_node_transition(node, __ocs_ns_idle, NULL);
724 		}
725 		break;
726 	}
727 	/* if receive RSCN just ignore,
728 	 * we haven't sent GID_PT yet (ACC sent by fabctl node) */
729 	case OCS_EVT_RSCN_RCVD:
730 		break;
731 
732 	default:
733 		__ocs_fabric_common(__func__, ctx, evt, arg);
734 		return NULL;
735 	}
736 
737 	return NULL;
738 }
739 
740 /**
741  * @ingroup ns_sm
742  * @brief Name services node state machine: Wait for a GIDPT response.
743  *
744  * @par Description
745  * Wait for a GIDPT response from the name server. Process the FC_IDs that are
746  * reported by creating new remote ports, as needed.
747  *
748  * @param ctx Remote node state machine context.
749  * @param evt Event to process.
750  * @param arg Per event optional argument.
751  *
752  * @return Returns NULL.
753  */
754 void *
755 __ocs_ns_gidpt_wait_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
756 {
757 	ocs_node_cb_t *cbdata = arg;
758 	std_node_state_decl();
759 
760 	node_sm_trace();
761 
762 	switch(evt) {
763 	case OCS_EVT_SRRS_ELS_REQ_OK:	{
764 		if (node_check_ns_req(ctx, evt, arg, FC_GS_NAMESERVER_GID_PT, __ocs_fabric_common, __func__)) {
765 			return NULL;
766 		}
767 		ocs_assert(node->els_req_cnt, NULL);
768 		node->els_req_cnt--;
769 		/* sm: / process GIDPT payload */
770 		ocs_process_gidpt_payload(node, cbdata->els->els_rsp.virt, cbdata->els->els_rsp.len);
771 		/* TODO: should we logout at this point or just go idle */
772 		ocs_node_transition(node, __ocs_ns_idle, NULL);
773 		break;
774 	}
775 
776 	case OCS_EVT_SRRS_ELS_REQ_FAIL:	{
777 		/* not much we can do; will retry with the next RSCN */
778 		node_printf(node, "GID_PT failed to complete\n");
779 		ocs_assert(node->els_req_cnt, NULL);
780 		node->els_req_cnt--;
781 		ocs_node_transition(node, __ocs_ns_idle, NULL);
782 		break;
783 	}
784 
785 	/* if receive RSCN here, queue up another discovery processing */
786 	case OCS_EVT_RSCN_RCVD: {
787 		node_printf(node, "RSCN received during GID_PT processing\n");
788 		node->rscn_pending = 1;
789 		break;
790 	}
791 
792 	default:
793 		__ocs_fabric_common(__func__, ctx, evt, arg);
794 		return NULL;
795 	}
796 
797 	return NULL;
798 }
799 
800 /**
801  * @ingroup ns_sm
802  * @brief Name services node state machine: Idle state.
803  *
804  * @par Description
805  * Idle. Waiting for RSCN received events (posted from the fabric controller), and
806  * restarts the GIDPT name services query and processing.
807  *
808  * @param ctx Remote node state machine context.
809  * @param evt Event to process.
810  * @param arg Per event optional argument.
811  *
812  * @return Returns NULL.
813  */
814 void *
815 __ocs_ns_idle(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
816 {
817 	std_node_state_decl();
818 
819 	node_sm_trace();
820 
821 	switch(evt) {
822 	case OCS_EVT_ENTER:
823 		if (!node->rscn_pending) {
824 			break;
825 		}
826 		node_printf(node, "RSCN pending, restart discovery\n");
827 		node->rscn_pending = 0;
828 
829 			/* fall through */
830 
831 	case OCS_EVT_RSCN_RCVD: {
832 		/* sm: / send GIDPT
833 		 * If target RSCN processing is enabled, and this is target only
834 		 * (not initiator), and tgt_rscn_delay is non-zero,
835 		 * then we delay issuing the GID_PT
836 		 */
837 		if ((ocs->tgt_rscn_delay_msec != 0) && !node->sport->enable_ini && node->sport->enable_tgt &&
838 			enable_target_rscn(ocs)) {
839 			ocs_node_transition(node, __ocs_ns_gidpt_delay, NULL);
840 		} else {
841 			ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
842 					OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
843 			ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
844 		}
845 		break;
846 	}
847 
848 	default:
849 		__ocs_fabric_common(__func__, ctx, evt, arg);
850 		break;
851 	}
852 
853 	return NULL;
854 }
855 
856 /**
857  * @brief Handle GIDPT delay timer callback
858  *
859  * @par Description
860  * Post an OCS_EVT_GIDPT_DEIALY_EXPIRED event to the passed in node.
861  *
862  * @param arg Pointer to node.
863  *
864  * @return None.
865  */
866 static void
867 gidpt_delay_timer_cb(void *arg)
868 {
869 	ocs_node_t *node = arg;
870 	int32_t rc;
871 
872 	ocs_del_timer(&node->gidpt_delay_timer);
873 	rc = ocs_xport_control(node->ocs->xport, OCS_XPORT_POST_NODE_EVENT, node, OCS_EVT_GIDPT_DELAY_EXPIRED, NULL);
874 	if (rc) {
875 		ocs_log_err(node->ocs, "ocs_xport_control(OCS_XPORT_POST_NODE_EVENT) failed: %d\n", rc);
876 	}
877 }
878 
879 /**
880  * @ingroup ns_sm
881  * @brief Name services node state machine: Delayed GIDPT.
882  *
883  * @par Description
884  * Waiting for GIDPT delay to expire before submitting GIDPT to name server.
885  *
886  * @param ctx Remote node state machine context.
887  * @param evt Event to process.
888  * @param arg Per event optional argument.
889  *
890  * @return Returns NULL.
891  */
892 void *
893 __ocs_ns_gidpt_delay(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
894 {
895 	std_node_state_decl();
896 
897 	node_sm_trace();
898 
899 	switch(evt) {
900 	case OCS_EVT_ENTER: {
901 		time_t delay_msec;
902 
903 		ocs_assert(ocs->tgt_rscn_delay_msec != 0, NULL);
904 
905 		/*
906 		 * Compute the delay time.   Set to tgt_rscn_delay, if the time since last GIDPT
907 		 * is less than tgt_rscn_period, then use tgt_rscn_period.
908 		 */
909 		delay_msec = ocs->tgt_rscn_delay_msec;
910 		if ((ocs_msectime() - node->time_last_gidpt_msec) < ocs->tgt_rscn_period_msec) {
911 			delay_msec = ocs->tgt_rscn_period_msec;
912 		}
913 
914 		ocs_setup_timer(ocs, &node->gidpt_delay_timer, gidpt_delay_timer_cb, node, delay_msec);
915 
916 		break;
917 	}
918 
919 	case OCS_EVT_GIDPT_DELAY_EXPIRED:
920 		node->time_last_gidpt_msec = ocs_msectime();
921 		ocs_ns_send_gidpt(node, OCS_FC_ELS_CT_SEND_DEFAULT_TIMEOUT,
922 				OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
923 		ocs_node_transition(node, __ocs_ns_gidpt_wait_rsp, NULL);
924 		break;
925 
926 	case OCS_EVT_RSCN_RCVD: {
927 		ocs_log_debug(ocs, "RSCN received while in GIDPT delay - no action\n");
928 		break;
929 	}
930 
931 	default:
932 		__ocs_fabric_common(__func__, ctx, evt, arg);
933 		break;
934 	}
935 
936 	return NULL;
937 }
938 
939 /**
940  * @ingroup fabric_sm
941  * @brief Fabric controller node state machine: Initial state.
942  *
943  * @par Description
944  * Issue a PLOGI to a well-known fabric controller address.
945  *
946  * @param ctx Remote node state machine context.
947  * @param evt Event to process.
948  * @param arg Per event optional argument.
949  *
950  * @return Returns NULL.
951  */
952 void *
953 __ocs_fabctl_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
954 {
955 	ocs_node_t *node = ctx->app;
956 
957 	node_sm_trace();
958 
959 	switch(evt) {
960 	case OCS_EVT_ENTER:
961 		/* no need to login to fabric controller, just send SCR */
962 		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
963 		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
964 		break;
965 
966 	case OCS_EVT_NODE_ATTACH_OK:
967 		node->attached = TRUE;
968 		break;
969 
970 	default:
971 		__ocs_fabric_common(__func__, ctx, evt, arg);
972 		return NULL;
973 	}
974 
975 	return NULL;
976 }
977 
978 /**
979  * @ingroup fabric_sm
980  * @brief Fabric controller node state machine: Wait for a node attach request
981  * to complete.
982  *
983  * @par Description
984  * Wait for a node attach to complete. If successful, issue an SCR
985  * to the fabric controller, subscribing to all RSCN.
986  *
987  * @param ctx Remote node state machine context.
988  * @param evt Event to process.
989  * @param arg Per event optional argument.
990  *
991  * @return Returns NULL.
992  *
993  */
994 void *
995 __ocs_fabctl_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
996 {
997 	std_node_state_decl();
998 
999 	node_sm_trace();
1000 
1001 	switch(evt) {
1002 	case OCS_EVT_ENTER:
1003 		ocs_node_hold_frames(node);
1004 		break;
1005 
1006 	case OCS_EVT_EXIT:
1007 		ocs_node_accept_frames(node);
1008 		break;
1009 
1010 	case OCS_EVT_NODE_ATTACH_OK:
1011 		node->attached = TRUE;
1012 		/* sm: / send SCR */
1013 		ocs_send_scr(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1014 		ocs_node_transition(node, __ocs_fabctl_wait_scr_rsp, NULL);
1015 		break;
1016 
1017 	case OCS_EVT_NODE_ATTACH_FAIL:
1018 		/* node attach failed, shutdown the node */
1019 		node->attached = FALSE;
1020 		node_printf(node, "Node attach failed\n");
1021 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1022 		ocs_fabric_initiate_shutdown(node);
1023 		break;
1024 
1025 	case OCS_EVT_SHUTDOWN:
1026 		node_printf(node, "Shutdown event received\n");
1027 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1028 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1029 		break;
1030 
1031 	default:
1032 		__ocs_fabric_common(__func__, ctx, evt, arg);
1033 		return NULL;
1034 	}
1035 
1036 	return NULL;
1037 }
1038 
1039 /**
1040  * @ingroup fabric_sm
1041  * @brief Fabric controller node state machine: Wait for an SCR response from the
1042  * fabric controller.
1043  *
1044  * @par Description
1045  * Waits for an SCR response from the fabric controller.
1046  *
1047  * @param ctx Remote node state machine context.
1048  * @param evt Event to process.
1049  * @param arg Per event optional argument.
1050  *
1051  * @return Returns NULL.
1052  */
1053 void *
1054 __ocs_fabctl_wait_scr_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1055 {
1056 	std_node_state_decl();
1057 
1058 	node_sm_trace();
1059 
1060 	switch(evt) {
1061 	case OCS_EVT_SRRS_ELS_REQ_OK:
1062 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_SCR, __ocs_fabric_common, __func__)) {
1063 			return NULL;
1064 		}
1065 		ocs_assert(node->els_req_cnt, NULL);
1066 		node->els_req_cnt--;
1067 		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1068 		break;
1069 
1070 	default:
1071 		__ocs_fabric_common(__func__, ctx, evt, arg);
1072 		return NULL;
1073 	}
1074 
1075 	return NULL;
1076 }
1077 
1078 /**
1079  * @ingroup fabric_sm
1080  * @brief Fabric controller node state machine: Ready.
1081  *
1082  * @par Description
1083  * In this state, the fabric controller sends a RSCN, which is received
1084  * by this node and is forwarded to the name services node object; and
1085  * the RSCN LS_ACC is sent.
1086  *
1087  * @param ctx Remote node state machine context.
1088  * @param evt Event to process.
1089  * @param arg Per event optional argument.
1090  *
1091  * @return Returns NULL.
1092  */
1093 
1094 void *
1095 __ocs_fabctl_ready(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1096 {
1097 	ocs_node_cb_t *cbdata = arg;
1098 	std_node_state_decl();
1099 
1100 	node_sm_trace();
1101 
1102 	switch(evt) {
1103 	case OCS_EVT_RSCN_RCVD: {
1104 		fc_header_t *hdr = cbdata->header->dma.virt;
1105 
1106 		/* sm: / process RSCN (forward to name services node),
1107 		 * send LS_ACC */
1108 		ocs_process_rscn(node, cbdata);
1109 		ocs_send_ls_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1110 		ocs_node_transition(node, __ocs_fabctl_wait_ls_acc_cmpl, NULL);
1111 		break;
1112 	}
1113 
1114 	default:
1115 		__ocs_fabric_common(__func__, ctx, evt, arg);
1116 		return NULL;
1117 	}
1118 
1119 	return NULL;
1120 }
1121 
1122 /**
1123  * @ingroup fabric_sm
1124  * @brief Fabric controller node state machine: Wait for LS_ACC.
1125  *
1126  * @par Description
1127  * Waits for the LS_ACC from the fabric controller.
1128  *
1129  * @param ctx Remote node state machine context.
1130  * @param evt Event to process.
1131  * @param arg Per event optional argument.
1132  *
1133  * @return Returns NULL.
1134  */
1135 
1136 void *
1137 __ocs_fabctl_wait_ls_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1138 {
1139 	std_node_state_decl();
1140 
1141 	node_sm_trace();
1142 
1143 	switch(evt) {
1144 	case OCS_EVT_ENTER:
1145 		ocs_node_hold_frames(node);
1146 		break;
1147 
1148 	case OCS_EVT_EXIT:
1149 		ocs_node_accept_frames(node);
1150 		break;
1151 
1152 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1153 		ocs_assert(node->els_cmpl_cnt, NULL);
1154 		node->els_cmpl_cnt--;
1155 		ocs_node_transition(node, __ocs_fabctl_ready, NULL);
1156 		break;
1157 
1158 	default:
1159 		__ocs_fabric_common(__func__, ctx, evt, arg);
1160 		return NULL;
1161 	}
1162 
1163 	return NULL;
1164 }
1165 
1166 /**
1167  * @ingroup fabric_sm
1168  * @brief Initiate fabric node shutdown.
1169  *
1170  * @param node Node for which shutdown is initiated.
1171  *
1172  * @return Returns None.
1173  */
1174 
1175 static void
1176 ocs_fabric_initiate_shutdown(ocs_node_t *node)
1177 {
1178 	ocs_hw_rtn_e rc;
1179 	ocs_t *ocs = node->ocs;
1180 	ocs_scsi_io_alloc_disable(node);
1181 
1182 	if (node->attached) {
1183 		/* issue hw node free; don't care if succeeds right away
1184 		 * or sometime later, will check node->attached later in
1185 		 * shutdown process
1186 		 */
1187 		rc = ocs_hw_node_detach(&ocs->hw, &node->rnode);
1188 		if (node->rnode.free_group) {
1189 			ocs_remote_node_group_free(node->node_group);
1190 			node->node_group = NULL;
1191 			node->rnode.free_group = FALSE;
1192 		}
1193 		if (rc != OCS_HW_RTN_SUCCESS && rc != OCS_HW_RTN_SUCCESS_SYNC) {
1194 			node_printf(node, "Failed freeing HW node, rc=%d\n", rc);
1195 		}
1196 	}
1197 	/*
1198 	 * node has either been detached or is in the process of being detached,
1199 	 * call common node's initiate cleanup function
1200 	 */
1201 	ocs_node_initiate_cleanup(node);
1202 }
1203 
1204 /**
1205  * @ingroup fabric_sm
1206  * @brief Fabric node state machine: Handle the common fabric node events.
1207  *
1208  * @param funcname Function name text.
1209  * @param ctx Remote node state machine context.
1210  * @param evt Event to process.
1211  * @param arg Per event optional argument.
1212  *
1213  * @return Returns NULL.
1214  */
1215 
1216 static void *
1217 __ocs_fabric_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1218 {
1219 	ocs_node_t *node = NULL;
1220 	ocs_assert(ctx, NULL);
1221 	ocs_assert(ctx->app, NULL);
1222 	node = ctx->app;
1223 
1224 	switch(evt) {
1225 	case OCS_EVT_DOMAIN_ATTACH_OK:
1226 		break;
1227 	case OCS_EVT_SHUTDOWN:
1228 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1229 		ocs_fabric_initiate_shutdown(node);
1230 		break;
1231 
1232 	default:
1233 		/* call default event handler common to all nodes */
1234 		__ocs_node_common(funcname, ctx, evt, arg);
1235 		break;
1236 	}
1237 	return NULL;
1238 }
1239 
1240 /**
1241  * @brief Return TRUE if the remote node is an NPORT.
1242  *
1243  * @par Description
1244  * Examines the service parameters. Returns TRUE if the node reports itself as
1245  * an NPORT.
1246  *
1247  * @param remote_sparms Remote node service parameters.
1248  *
1249  * @return Returns TRUE if NPORT.
1250  */
1251 
1252 int32_t
1253 ocs_rnode_is_nport(fc_plogi_payload_t *remote_sparms)
1254 {
1255 	return (ocs_be32toh(remote_sparms->common_service_parameters[1]) & (1U << 28)) == 0;
1256 }
1257 
1258 /**
1259  * @brief Return the node's WWPN as an uint64_t.
1260  *
1261  * @par Description
1262  * The WWPN is computed from service parameters, and returned as a uint64_t.
1263  *
1264  * @param sp Pointer to service parameters.
1265  *
1266  * @return Returns WWPN.
1267  *
1268  */
1269 
1270 static uint64_t
1271 ocs_get_wwpn(fc_plogi_payload_t *sp)
1272 {
1273 	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1274 }
1275 
1276 /**
1277  * @brief Return TRUE if the remote node is the point-to-point winner.
1278  *
1279  * @par Description
1280  * Compares WWPNs. Returns TRUE if the remote node's WWPN is numerically
1281  * higher than the local node's WWPN.
1282  *
1283  * @param sport Pointer to the sport object.
1284  *
1285  * @return
1286  * - 0, if the remote node is the loser.
1287  * - 1, if the remote node is the winner.
1288  * - (-1), if remote node is neither the loser nor the winner
1289  *   (WWPNs match)
1290  */
1291 
1292 static int32_t
1293 ocs_rnode_is_winner(ocs_sport_t *sport)
1294 {
1295 	fc_plogi_payload_t *remote_sparms = (fc_plogi_payload_t*) sport->domain->flogi_service_params;
1296 	uint64_t remote_wwpn = ocs_get_wwpn(remote_sparms);
1297 	uint64_t local_wwpn = sport->wwpn;
1298 	char prop_buf[32];
1299 	uint64_t wwn_bump = 0;
1300 
1301 	if (ocs_get_property("wwn_bump", prop_buf, sizeof(prop_buf)) == 0) {
1302 		wwn_bump = ocs_strtoull(prop_buf, 0, 0);
1303 	}
1304 	local_wwpn ^= wwn_bump;
1305 
1306 	remote_wwpn = ocs_get_wwpn(remote_sparms);
1307 
1308 	ocs_log_debug(sport->ocs, "r: %08x %08x\n", ocs_be32toh(remote_sparms->port_name_hi), ocs_be32toh(remote_sparms->port_name_lo));
1309 	ocs_log_debug(sport->ocs, "l: %08x %08x\n", (uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1310 
1311 	if (remote_wwpn == local_wwpn) {
1312 		ocs_log_warn(sport->ocs, "WWPN of remote node [%08x %08x] matches local WWPN\n",
1313 			(uint32_t) (local_wwpn >> 32ll), (uint32_t) local_wwpn);
1314 		return (-1);
1315 	}
1316 
1317 	return (remote_wwpn > local_wwpn);
1318 }
1319 
1320 /**
1321  * @ingroup p2p_sm
1322  * @brief Point-to-point state machine: Wait for the domain attach to complete.
1323  *
1324  * @par Description
1325  * Once the domain attach has completed, a PLOGI is sent (if we're the
1326  * winning point-to-point node).
1327  *
1328  * @param ctx Remote node state machine context.
1329  * @param evt Event to process.
1330  * @param arg Per event optional argument.
1331  *
1332  * @return Returns NULL.
1333  */
1334 
1335 void *
1336 __ocs_p2p_wait_domain_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1337 {
1338 	std_node_state_decl();
1339 
1340 	node_sm_trace();
1341 
1342 	switch(evt) {
1343 	case OCS_EVT_ENTER:
1344 		ocs_node_hold_frames(node);
1345 		break;
1346 
1347 	case OCS_EVT_EXIT:
1348 		ocs_node_accept_frames(node);
1349 		break;
1350 
1351 	case OCS_EVT_DOMAIN_ATTACH_OK: {
1352 		ocs_sport_t *sport = node->sport;
1353 		ocs_node_t *rnode;
1354 
1355 		/* this transient node (SID=0 (recv'd FLOGI) or DID=fabric (sent FLOGI))
1356 		 * is the p2p winner, will use a separate node to send PLOGI to peer
1357 		 */
1358 		ocs_assert (node->sport->p2p_winner, NULL);
1359 
1360 		rnode = ocs_node_find(sport, node->sport->p2p_remote_port_id);
1361 		if (rnode != NULL) {
1362 			/* the "other" transient p2p node has already kicked off the
1363 			 * new node from which PLOGI is sent */
1364 			node_printf(node, "Node with fc_id x%x already exists\n", rnode->rnode.fc_id);
1365 			ocs_assert (rnode != node, NULL);
1366 		} else {
1367 			/* create new node (SID=1, DID=2) from which to send PLOGI */
1368 			rnode = ocs_node_alloc(sport, sport->p2p_remote_port_id, FALSE, FALSE);
1369 			if (rnode == NULL) {
1370 				ocs_log_err(ocs, "node alloc failed\n");
1371 				return NULL;
1372 			}
1373 
1374 			ocs_fabric_notify_topology(node);
1375 			/* sm: allocate p2p remote node */
1376 			ocs_node_transition(rnode, __ocs_p2p_rnode_init, NULL);
1377 		}
1378 
1379 		/* the transient node (SID=0 or DID=fabric) has served its purpose */
1380 		if (node->rnode.fc_id == 0) {
1381 			/* if this is the SID=0 node, move to the init state in case peer
1382 			 * has restarted FLOGI discovery and FLOGI is pending
1383 			 */
1384 			/* don't send PLOGI on ocs_d_init entry */
1385 			ocs_node_init_device(node, FALSE);
1386 		} else {
1387 			/* if this is the DID=fabric node (we initiated FLOGI), shut it down */
1388 			node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1389 			ocs_fabric_initiate_shutdown(node);
1390 		}
1391 		break;
1392 	}
1393 
1394 	default:
1395 		__ocs_fabric_common(__func__, ctx, evt, arg);
1396 		return NULL;
1397 	}
1398 
1399 	return NULL;
1400 }
1401 
1402 /**
1403  * @ingroup p2p_sm
1404  * @brief Point-to-point state machine: Remote node initialization state.
1405  *
1406  * @par Description
1407  * This state is entered after winning point-to-point, and the remote node
1408  * is instantiated.
1409  *
1410  * @param ctx Remote node state machine context.
1411  * @param evt Event to process.
1412  * @param arg Per event optional argument.
1413  *
1414  * @return Returns NULL.
1415  */
1416 
1417 void *
1418 __ocs_p2p_rnode_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1419 {
1420 	ocs_node_cb_t *cbdata = arg;
1421 	std_node_state_decl();
1422 
1423 	node_sm_trace();
1424 
1425 	switch(evt) {
1426 	case OCS_EVT_ENTER:
1427 		/* sm: / send PLOGI */
1428 		ocs_send_plogi(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT, OCS_FC_ELS_DEFAULT_RETRIES, NULL, NULL);
1429 		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp, NULL);
1430 		break;
1431 
1432 	case OCS_EVT_ABTS_RCVD:
1433 		/* sm: send BA_ACC */
1434 		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1435 		break;
1436 
1437 	default:
1438 		__ocs_fabric_common(__func__, ctx, evt, arg);
1439 		return NULL;
1440 	}
1441 
1442 	return NULL;
1443 }
1444 
1445 /**
1446  * @ingroup p2p_sm
1447  * @brief Point-to-point node state machine: Wait for the FLOGI accept completion.
1448  *
1449  * @par Description
1450  * Wait for the FLOGI accept completion.
1451  *
1452  * @param ctx Remote node state machine context.
1453  * @param evt Event to process.
1454  * @param arg Per event optional argument.
1455  *
1456  * @return Returns NULL.
1457  */
1458 
1459 void *
1460 __ocs_p2p_wait_flogi_acc_cmpl(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1461 {
1462 	ocs_node_cb_t *cbdata = arg;
1463 	std_node_state_decl();
1464 
1465 	node_sm_trace();
1466 
1467 	switch(evt) {
1468 	case OCS_EVT_ENTER:
1469 		ocs_node_hold_frames(node);
1470 		break;
1471 
1472 	case OCS_EVT_EXIT:
1473 		ocs_node_accept_frames(node);
1474 		break;
1475 
1476 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1477 		ocs_assert(node->els_cmpl_cnt, NULL);
1478 		node->els_cmpl_cnt--;
1479 
1480 		/* sm: if p2p_winner / domain_attach */
1481 		if (node->sport->p2p_winner) {
1482 			ocs_node_transition(node, __ocs_p2p_wait_domain_attach, NULL);
1483 			if (node->sport->domain->attached &&
1484 			    !(node->sport->domain->domain_notify_pend)) {
1485 				node_printf(node, "Domain already attached\n");
1486 				ocs_node_post_event(node, OCS_EVT_DOMAIN_ATTACH_OK, NULL);
1487 			}
1488 		} else {
1489 			/* this node has served its purpose; we'll expect a PLOGI on a separate
1490 			 * node (remote SID=0x1); return this node to init state in case peer
1491 			 * restarts discovery -- it may already have (pending frames may exist).
1492 			 */
1493 			/* don't send PLOGI on ocs_d_init entry */
1494 			ocs_node_init_device(node, FALSE);
1495 		}
1496 		break;
1497 
1498 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1499 		/* LS_ACC failed, possibly due to link down; shutdown node and wait
1500 		 * for FLOGI discovery to restart */
1501 		node_printf(node, "FLOGI LS_ACC failed, shutting down\n");
1502 		ocs_assert(node->els_cmpl_cnt, NULL);
1503 		node->els_cmpl_cnt--;
1504 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1505 		ocs_fabric_initiate_shutdown(node);
1506 		break;
1507 
1508 	case OCS_EVT_ABTS_RCVD: {
1509 		/* sm: / send BA_ACC */
1510 		ocs_bls_send_acc_hdr(cbdata->io, cbdata->header->dma.virt);
1511 		break;
1512 	}
1513 
1514 	default:
1515 		__ocs_fabric_common(__func__, ctx, evt, arg);
1516 		return NULL;
1517 	}
1518 
1519 	return NULL;
1520 }
1521 
1522 /**
1523  * @ingroup p2p_sm
1524  * @brief Point-to-point node state machine: Wait for a PLOGI response
1525  * as a point-to-point winner.
1526  *
1527  * @par Description
1528  * Wait for a PLOGI response from the remote node as a point-to-point winner.
1529  * Submit node attach request to the HW.
1530  *
1531  * @param ctx Remote node state machine context.
1532  * @param evt Event to process.
1533  * @param arg Per event optional argument.
1534  *
1535  * @return Returns NULL.
1536  */
1537 
1538 void *
1539 __ocs_p2p_wait_plogi_rsp(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1540 {
1541 	int32_t rc;
1542 	ocs_node_cb_t *cbdata = arg;
1543 	std_node_state_decl();
1544 
1545 	node_sm_trace();
1546 
1547 	switch(evt) {
1548 	case OCS_EVT_SRRS_ELS_REQ_OK: {
1549 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1550 			return NULL;
1551 		}
1552 		ocs_assert(node->els_req_cnt, NULL);
1553 		node->els_req_cnt--;
1554 		/* sm: / save sparams, ocs_node_attach */
1555 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1556 		rc = ocs_node_attach(node);
1557 		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1558 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1559 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1560 		}
1561 		break;
1562 	}
1563 	case OCS_EVT_SRRS_ELS_REQ_FAIL: {
1564 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1565 			return NULL;
1566 		}
1567 		node_printf(node, "PLOGI failed, shutting down\n");
1568 		ocs_assert(node->els_req_cnt, NULL);
1569 		node->els_req_cnt--;
1570 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1571 		ocs_fabric_initiate_shutdown(node);
1572 		break;
1573 	}
1574 
1575 	case OCS_EVT_PLOGI_RCVD: {
1576 		fc_header_t *hdr = cbdata->header->dma.virt;
1577 		/* if we're in external loopback mode, just send LS_ACC */
1578 		if (node->ocs->external_loopback) {
1579 			ocs_send_plogi_acc(cbdata->io, ocs_be16toh(hdr->ox_id), NULL, NULL);
1580 			break;
1581 		} else{
1582 			/* if this isn't external loopback, pass to default handler */
1583 			__ocs_fabric_common(__func__, ctx, evt, arg);
1584 		}
1585 		break;
1586 	}
1587 	case OCS_EVT_PRLI_RCVD:
1588 		/* I, or I+T */
1589 		/* sent PLOGI and before completion was seen, received the
1590 		 * PRLI from the remote node (WCQEs and RCQEs come in on
1591 		 * different queues and order of processing cannot be assumed)
1592 		 * Save OXID so PRLI can be sent after the attach and continue
1593 		 * to wait for PLOGI response
1594 		 */
1595 		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1596 		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1597 		ocs_node_transition(node, __ocs_p2p_wait_plogi_rsp_recvd_prli, NULL);
1598 		break;
1599 	default:
1600 		__ocs_fabric_common(__func__, ctx, evt, arg);
1601 		return NULL;
1602 	}
1603 
1604 	return NULL;
1605 }
1606 
1607 /**
1608  * @ingroup p2p_sm
1609  * @brief Point-to-point node state machine: Waiting on a response for a
1610  *	sent PLOGI.
1611  *
1612  * @par Description
1613  * State is entered when the point-to-point winner has sent
1614  * a PLOGI and is waiting for a response. Before receiving the
1615  * response, a PRLI was received, implying that the PLOGI was
1616  * successful.
1617  *
1618  * @param ctx Remote node state machine context.
1619  * @param evt Event to process.
1620  * @param arg Per event optional argument.
1621  *
1622  * @return Returns NULL.
1623  */
1624 
1625 void *
1626 __ocs_p2p_wait_plogi_rsp_recvd_prli(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1627 {
1628 	int32_t rc;
1629 	ocs_node_cb_t *cbdata = arg;
1630 	std_node_state_decl();
1631 
1632 	node_sm_trace();
1633 
1634 	switch(evt) {
1635 	case OCS_EVT_ENTER:
1636 		/*
1637 		 * Since we've received a PRLI, we have a port login and will
1638 		 * just need to wait for the PLOGI response to do the node
1639 		 * attach and then we can send the LS_ACC for the PRLI. If,
1640 		 * during this time, we receive FCP_CMNDs (which is possible
1641 		 * since we've already sent a PRLI and our peer may have accepted).
1642 		 * At this time, we are not waiting on any other unsolicited
1643 		 * frames to continue with the login process. Thus, it will not
1644 		 * hurt to hold frames here.
1645 		 */
1646 		ocs_node_hold_frames(node);
1647 		break;
1648 
1649 	case OCS_EVT_EXIT:
1650 		ocs_node_accept_frames(node);
1651 		break;
1652 
1653 	case OCS_EVT_SRRS_ELS_REQ_OK:	/* PLOGI response received */
1654 		/* Completion from PLOGI sent */
1655 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1656 			return NULL;
1657 		}
1658 		ocs_assert(node->els_req_cnt, NULL);
1659 		node->els_req_cnt--;
1660 		/* sm: / save sparams, ocs_node_attach */
1661 		ocs_node_save_sparms(node, cbdata->els->els_rsp.virt);
1662 		ocs_display_sparams(node->display_name, "plogi rcvd resp", 0, NULL,
1663 			((uint8_t*)cbdata->els->els_rsp.virt) + 4);
1664 		rc = ocs_node_attach(node);
1665 		ocs_node_transition(node, __ocs_p2p_wait_node_attach, NULL);
1666 		if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
1667 			ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
1668 		}
1669 		break;
1670 
1671 	case OCS_EVT_SRRS_ELS_REQ_FAIL:	/* PLOGI response received */
1672 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1673 		/* PLOGI failed, shutdown the node */
1674 		if (node_check_els_req(ctx, evt, arg, FC_ELS_CMD_PLOGI, __ocs_fabric_common, __func__)) {
1675 			return NULL;
1676 		}
1677 		ocs_assert(node->els_req_cnt, NULL);
1678 		node->els_req_cnt--;
1679 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1680 		ocs_fabric_initiate_shutdown(node);
1681 		break;
1682 
1683 	default:
1684 		__ocs_fabric_common(__func__, ctx, evt, arg);
1685 		return NULL;
1686 	}
1687 
1688 	return NULL;
1689 }
1690 
1691 /**
1692  * @ingroup p2p_sm
1693  * @brief Point-to-point node state machine: Wait for a point-to-point node attach
1694  * to complete.
1695  *
1696  * @par Description
1697  * Waits for the point-to-point node attach to complete.
1698  *
1699  * @param ctx Remote node state machine context.
1700  * @param evt Event to process.
1701  * @param arg Per event optional argument.
1702  *
1703  * @return Returns NULL.
1704  */
1705 
1706 void *
1707 __ocs_p2p_wait_node_attach(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1708 {
1709 	ocs_node_cb_t *cbdata = arg;
1710 	std_node_state_decl();
1711 
1712 	node_sm_trace();
1713 
1714 	switch(evt) {
1715 	case OCS_EVT_ENTER:
1716 		ocs_node_hold_frames(node);
1717 		break;
1718 
1719 	case OCS_EVT_EXIT:
1720 		ocs_node_accept_frames(node);
1721 		break;
1722 
1723 	case OCS_EVT_NODE_ATTACH_OK:
1724 		node->attached = TRUE;
1725 		switch (node->send_ls_acc) {
1726 		case OCS_NODE_SEND_LS_ACC_PRLI: {
1727 			ocs_d_send_prli_rsp(node->ls_acc_io, node->ls_acc_oxid);
1728 			node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
1729 			node->ls_acc_io = NULL;
1730 			break;
1731 		}
1732 		case OCS_NODE_SEND_LS_ACC_PLOGI: /* Can't happen in P2P */
1733 		case OCS_NODE_SEND_LS_ACC_NONE:
1734 		default:
1735 			/* Normal case for I */
1736 			/* sm: send_plogi_acc is not set / send PLOGI acc */
1737 			ocs_node_transition(node, __ocs_d_port_logged_in, NULL);
1738 			break;
1739 		}
1740 		break;
1741 
1742 	case OCS_EVT_NODE_ATTACH_FAIL:
1743 		/* node attach failed, shutdown the node */
1744 		node->attached = FALSE;
1745 		node_printf(node, "Node attach failed\n");
1746 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1747 		ocs_fabric_initiate_shutdown(node);
1748 		break;
1749 
1750 	case OCS_EVT_SHUTDOWN:
1751 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1752 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1753 		ocs_node_transition(node, __ocs_fabric_wait_attach_evt_shutdown, NULL);
1754 		break;
1755 	case OCS_EVT_PRLI_RCVD:
1756 		node_printf(node, "%s: PRLI received before node is attached\n", ocs_sm_event_name(evt));
1757 		ocs_process_prli_payload(node, cbdata->payload->dma.virt);
1758 		ocs_send_ls_acc_after_attach(cbdata->io, cbdata->header->dma.virt, OCS_NODE_SEND_LS_ACC_PRLI);
1759 		break;
1760 	default:
1761 		__ocs_fabric_common(__func__, ctx, evt, arg);
1762 		return NULL;
1763 	}
1764 
1765 	return NULL;
1766 }
1767 
1768 /**
1769  * @brief Start up the name services node.
1770  *
1771  * @par Description
1772  * Allocates and starts up the name services node.
1773  *
1774  * @param sport Pointer to the sport structure.
1775  *
1776  * @return Returns 0 on success, or a negative error value on failure.
1777  */
1778 
1779 static int32_t
1780 ocs_start_ns_node(ocs_sport_t *sport)
1781 {
1782 	ocs_node_t *ns;
1783 
1784 	/* Instantiate a name services node */
1785 	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
1786 	if (ns == NULL) {
1787 		ns = ocs_node_alloc(sport, FC_ADDR_NAMESERVER, FALSE, FALSE);
1788 		if (ns == NULL) {
1789 			return -1;
1790 		}
1791 	}
1792 	/* TODO: for found ns, should we be transitioning from here?
1793 	 * breaks transition only 1. from within state machine or
1794 	 * 2. if after alloc
1795 	 */
1796 	if (ns->ocs->nodedb_mask & OCS_NODEDB_PAUSE_NAMESERVER) {
1797 		ocs_node_pause(ns, __ocs_ns_init);
1798 	} else {
1799 		ocs_node_transition(ns, __ocs_ns_init, NULL);
1800 	}
1801 	return 0;
1802 }
1803 
1804 /**
1805  * @brief Start up the fabric controller node.
1806  *
1807  * @par Description
1808  * Allocates and starts up the fabric controller node.
1809  *
1810  * @param sport Pointer to the sport structure.
1811  *
1812  * @return Returns 0 on success, or a negative error value on failure.
1813  */
1814 
1815 static int32_t
1816 ocs_start_fabctl_node(ocs_sport_t *sport)
1817 {
1818 	ocs_node_t *fabctl;
1819 
1820 	fabctl = ocs_node_find(sport, FC_ADDR_CONTROLLER);
1821 	if (fabctl == NULL) {
1822 		fabctl = ocs_node_alloc(sport, FC_ADDR_CONTROLLER, FALSE, FALSE);
1823 		if (fabctl == NULL) {
1824 			return -1;
1825 		}
1826 	}
1827 	/* TODO: for found ns, should we be transitioning from here?
1828 	 * breaks transition only 1. from within state machine or
1829 	 * 2. if after alloc
1830 	 */
1831 	ocs_node_transition(fabctl, __ocs_fabctl_init, NULL);
1832 	return 0;
1833 }
1834 
1835 /**
1836  * @brief Process the GIDPT payload.
1837  *
1838  * @par Description
1839  * The GIDPT payload is parsed, and new nodes are created, as needed.
1840  *
1841  * @param node Pointer to the node structure.
1842  * @param gidpt Pointer to the GIDPT payload.
1843  * @param gidpt_len Payload length
1844  *
1845  * @return Returns 0 on success, or a negative error value on failure.
1846  */
1847 
1848 static int32_t
1849 ocs_process_gidpt_payload(ocs_node_t *node, fcct_gidpt_acc_t *gidpt, uint32_t gidpt_len)
1850 {
1851 	uint32_t i;
1852 	uint32_t j;
1853 	ocs_node_t *newnode;
1854 	ocs_sport_t *sport = node->sport;
1855 	ocs_t *ocs = node->ocs;
1856 	uint32_t port_id;
1857 	uint32_t port_count;
1858 	ocs_node_t *n;
1859 	ocs_node_t **active_nodes;
1860 	uint32_t portlist_count;
1861 	uint16_t residual;
1862 
1863 	residual = ocs_be16toh(gidpt->hdr.max_residual_size);
1864 
1865 	if (residual != 0) {
1866 		ocs_log_debug(node->ocs, "residual is %u words\n", residual);
1867 	}
1868 
1869 	if (ocs_be16toh(gidpt->hdr.cmd_rsp_code) == FCCT_HDR_CMDRSP_REJECT) {
1870 		node_printf(node, "GIDPT request failed: rsn x%x rsn_expl x%x\n",
1871 			gidpt->hdr.reason_code, gidpt->hdr.reason_code_explanation);
1872 		return -1;
1873 	}
1874 
1875 	portlist_count = (gidpt_len - sizeof(fcct_iu_header_t)) / sizeof(gidpt->port_list);
1876 
1877 	/* Count the number of nodes */
1878 	port_count = 0;
1879 	ocs_sport_lock(sport);
1880 		ocs_list_foreach(&sport->node_list, n) {
1881 			port_count ++;
1882 		}
1883 
1884 		/* Allocate a buffer for all nodes */
1885 		active_nodes = ocs_malloc(node->ocs, port_count * sizeof(*active_nodes), OCS_M_NOWAIT | OCS_M_ZERO);
1886 		if (active_nodes == NULL) {
1887 			node_printf(node, "ocs_malloc failed\n");
1888 			ocs_sport_unlock(sport);
1889 			return -1;
1890 		}
1891 
1892 		/* Fill buffer with fc_id of active nodes */
1893 		i = 0;
1894 		ocs_list_foreach(&sport->node_list, n) {
1895 			port_id = n->rnode.fc_id;
1896 			switch (port_id) {
1897 			case FC_ADDR_FABRIC:
1898 			case FC_ADDR_CONTROLLER:
1899 			case FC_ADDR_NAMESERVER:
1900 				break;
1901 			default:
1902 				if (!FC_ADDR_IS_DOMAIN_CTRL(port_id)) {
1903 					active_nodes[i++] = n;
1904 				}
1905 				break;
1906 			}
1907 		}
1908 
1909 		/* update the active nodes buffer */
1910 		for (i = 0; i < portlist_count; i ++) {
1911 			port_id = fc_be24toh(gidpt->port_list[i].port_id);
1912 
1913 			for (j = 0; j < port_count; j ++) {
1914 				if ((active_nodes[j] != NULL) && (port_id == active_nodes[j]->rnode.fc_id)) {
1915 					active_nodes[j] = NULL;
1916 				}
1917 			}
1918 
1919 			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID)
1920 				break;
1921 		}
1922 
1923 		/* Those remaining in the active_nodes[] are now gone ! */
1924 		for (i = 0; i < port_count; i ++) {
1925 			/* if we're an initiator and the remote node is a target, then
1926 			 * post the node missing event.   if we're target and we have enabled
1927 			 * target RSCN, then post the node missing event.
1928 			 */
1929 			if (active_nodes[i] != NULL) {
1930 				if ((node->sport->enable_ini && active_nodes[i]->targ) ||
1931 				    (node->sport->enable_tgt && enable_target_rscn(ocs))) {
1932 					ocs_node_post_event(active_nodes[i], OCS_EVT_NODE_MISSING, NULL);
1933 				} else {
1934 					node_printf(node, "GID_PT: skipping non-tgt port_id x%06x\n",
1935 						active_nodes[i]->rnode.fc_id);
1936 				}
1937 			}
1938 		}
1939 		ocs_free(ocs, active_nodes, port_count * sizeof(*active_nodes));
1940 
1941 		for(i = 0; i < portlist_count; i ++) {
1942 			uint32_t port_id = fc_be24toh(gidpt->port_list[i].port_id);
1943 
1944 			/* node_printf(node, "GID_PT: port_id x%06x\n", port_id); */
1945 
1946 			/* Don't create node for ourselves or the associated NPIV ports */
1947 			if (port_id != node->rnode.sport->fc_id && !ocs_sport_find(sport->domain, port_id)) {
1948 				newnode = ocs_node_find(sport, port_id);
1949 				if (newnode) {
1950 					/* TODO: what if node deleted here?? */
1951 					if (node->sport->enable_ini && newnode->targ) {
1952 						ocs_node_post_event(newnode, OCS_EVT_NODE_REFOUND, NULL);
1953 					}
1954 					/* original code sends ADISC, has notion of "refound" */
1955 				} else {
1956 					if (node->sport->enable_ini) {
1957 						newnode = ocs_node_alloc(sport, port_id, 0, 0);
1958 						if (newnode == NULL) {
1959 							ocs_log_err(ocs, "ocs_node_alloc() failed\n");
1960 							ocs_sport_unlock(sport);
1961 							return -1;
1962 						}
1963 						/* send PLOGI automatically if initiator */
1964 						ocs_node_init_device(newnode, TRUE);
1965 					}
1966 				}
1967 			}
1968 
1969 			if (gidpt->port_list[i].ctl & FCCT_GID_PT_LAST_ID) {
1970 				break;
1971 			}
1972 		}
1973 	ocs_sport_unlock(sport);
1974 	return 0;
1975 }
1976 
1977 /**
1978  * @brief Set up the domain point-to-point parameters.
1979  *
1980  * @par Description
1981  * The remote node service parameters are examined, and various point-to-point
1982  * variables are set.
1983  *
1984  * @param sport Pointer to the sport object.
1985  *
1986  * @return Returns 0 on success, or a negative error value on failure.
1987  */
1988 
1989 int32_t
1990 ocs_p2p_setup(ocs_sport_t *sport)
1991 {
1992 	ocs_t *ocs = sport->ocs;
1993 	int32_t rnode_winner;
1994 	rnode_winner = ocs_rnode_is_winner(sport);
1995 
1996 	/* set sport flags to indicate p2p "winner" */
1997 	if (rnode_winner == 1) {
1998 		sport->p2p_remote_port_id = 0;
1999 		sport->p2p_port_id = 0;
2000 		sport->p2p_winner = FALSE;
2001 	} else if (rnode_winner == 0) {
2002 		sport->p2p_remote_port_id = 2;
2003 		sport->p2p_port_id = 1;
2004 		sport->p2p_winner = TRUE;
2005 	} else {
2006 		/* no winner; only okay if external loopback enabled */
2007 		if (sport->ocs->external_loopback) {
2008 			/*
2009 			 * External loopback mode enabled; local sport and remote node
2010 			 * will be registered with an NPortID = 1;
2011 			 */
2012 			ocs_log_debug(ocs, "External loopback mode enabled\n");
2013 			sport->p2p_remote_port_id = 1;
2014 			sport->p2p_port_id = 1;
2015 			sport->p2p_winner = TRUE;
2016 		} else {
2017 			ocs_log_warn(ocs, "failed to determine p2p winner\n");
2018 			return rnode_winner;
2019 		}
2020 	}
2021 	return 0;
2022 }
2023 
2024 /**
2025  * @brief Process the FABCTL node RSCN.
2026  *
2027  * <h3 class="desc">Description</h3>
2028  * Processes the FABCTL node RSCN payload, simply passes the event to the name server.
2029  *
2030  * @param node Pointer to the node structure.
2031  * @param cbdata Callback data to pass forward.
2032  *
2033  * @return None.
2034  */
2035 
2036 static void
2037 ocs_process_rscn(ocs_node_t *node, ocs_node_cb_t *cbdata)
2038 {
2039 	ocs_t *ocs = node->ocs;
2040 	ocs_sport_t *sport = node->sport;
2041 	ocs_node_t *ns;
2042 
2043 	/* Forward this event to the name-services node */
2044 	ns = ocs_node_find(sport, FC_ADDR_NAMESERVER);
2045 	if (ns != NULL)  {
2046 		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, cbdata);
2047 	} else {
2048 		ocs_log_warn(ocs, "can't find name server node\n");
2049 	}
2050 }
2051