xref: /freebsd/sys/dev/ocs_fc/ocs_node.c (revision 2e3507c25e42292b45a5482e116d278f5515d04d)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  * OCS driver remote node handler.  This file contains code that is shared
35  * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes.
36  */
37 
38 /*!
39  * @defgroup node_common Node common support
40  * @defgroup node_alloc Node allocation
41  */
42 
43 #include "ocs.h"
44 #include "ocs_els.h"
45 #include "ocs_device.h"
46 
47 #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
48 #define SCSI_ITT_SIZE(ocs)	((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
49 
50 #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
51 
52 #define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \
53 	io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
54 
55 void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node);
56 void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node);
57 int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node);
58 int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node);
59 int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
60 		void *arg_out, uint32_t arg_out_length, void *node);
61 static ocs_mgmt_functions_t node_mgmt_functions = {
62 	.get_list_handler	=	ocs_mgmt_node_list,
63 	.get_handler		=	ocs_mgmt_node_get,
64 	.get_all_handler	=	ocs_mgmt_node_get_all,
65 	.set_handler		=	ocs_mgmt_node_set,
66 	.exec_handler		=	ocs_mgmt_node_exec,
67 };
68 
69 /**
70  * @ingroup node_common
71  * @brief Device node state machine wait for all ELS's to
72  *        complete
73  *
74  * Abort all ELS's for given node.
75  *
76  * @param node node for which ELS's will be aborted
77  */
78 
79 void
80 ocs_node_abort_all_els(ocs_node_t *node)
81 {
82 	ocs_io_t *els;
83 	ocs_io_t *els_next;
84 	ocs_node_cb_t cbdata = {0};
85 
86 	ocs_node_hold_frames(node);
87 	ocs_lock(&node->active_ios_lock);
88 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
89 			ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name);
90 			ocs_unlock(&node->active_ios_lock);
91 			cbdata.els = els;
92 			ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata);
93 			ocs_lock(&node->active_ios_lock);
94 		}
95 	ocs_unlock(&node->active_ios_lock);
96 }
97 
98 /**
99  * @ingroup node_common
100  * @brief Handle remote node events from HW
101  *
102  * Handle remote node events from HW.   Essentially the HW event is translated into
103  * a node state machine event that is posted to the affected node.
104  *
105  * @param arg pointer to ocs
106  * @param event HW event to proceoss
107  * @param data application specific data (pointer to the affected node)
108  *
109  * @return returns 0 for success, a negative error code value for failure.
110  */
111 int32_t
112 ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data)
113 {
114 	ocs_t *ocs = arg;
115 	ocs_sm_event_t	sm_event = OCS_EVT_LAST;
116 	ocs_remote_node_t *rnode = data;
117 	ocs_node_t *node = rnode->node;
118 
119 	switch (event) {
120 	case OCS_HW_NODE_ATTACH_OK:
121 		sm_event = OCS_EVT_NODE_ATTACH_OK;
122 		break;
123 
124 	case OCS_HW_NODE_ATTACH_FAIL:
125 		sm_event = OCS_EVT_NODE_ATTACH_FAIL;
126 		break;
127 
128 	case OCS_HW_NODE_FREE_OK:
129 		sm_event = OCS_EVT_NODE_FREE_OK;
130 		break;
131 
132 	case OCS_HW_NODE_FREE_FAIL:
133 		sm_event = OCS_EVT_NODE_FREE_FAIL;
134 		break;
135 
136 	default:
137 		ocs_log_test(ocs, "unhandled event %#x\n", event);
138 		return -1;
139 	}
140 
141 	/* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */
142 	if ((node->node_group != NULL) &&
143 			((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) {
144 		ocs_node_t *n = NULL;
145 		uint8_t		attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK;
146 
147 		ocs_sport_lock(node->sport);
148 		{
149 			ocs_list_foreach(&node->sport->node_list, n) {
150 				if (node == n) {
151 					continue;
152 				}
153 				ocs_node_lock(n);
154 					if ((!n->rnode.attached) && (node->node_group == n->node_group)) {
155 						n->rnode.attached = attach_ok;
156 						node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n",
157 								n->rnode.index, attach_ok ? "ok" : "fail");
158 						ocs_node_post_event(n, sm_event, NULL);
159 					}
160 				ocs_node_unlock(n);
161 			}
162 		}
163 
164 		ocs_sport_unlock(node->sport);
165 	}
166 
167 	ocs_node_post_event(node, sm_event, NULL);
168 
169 	return 0;
170 }
171 
172 /**
173  * @ingroup node_alloc
174  * @brief Find an FC node structure given the FC port ID
175  *
176  * @param sport the SPORT to search
177  * @param port_id FC port ID
178  *
179  * @return pointer to the object or NULL if not found
180  */
181 ocs_node_t *
182 ocs_node_find(ocs_sport_t *sport, uint32_t port_id)
183 {
184 	ocs_node_t *node;
185 
186 	ocs_assert(sport->lookup, NULL);
187 	ocs_sport_lock(sport);
188 		node = spv_get(sport->lookup, port_id);
189 	ocs_sport_unlock(sport);
190 	return node;
191 }
192 
193 /**
194  * @ingroup node_alloc
195  * @brief Find an FC node structure given the WWPN
196  *
197  * @param sport the SPORT to search
198  * @param wwpn the WWPN to search for (host endian)
199  *
200  * @return pointer to the object or NULL if not found
201  */
202 ocs_node_t *
203 ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn)
204 {
205 	ocs_node_t *node = NULL;
206 
207 	ocs_assert(sport, NULL);
208 
209 	ocs_sport_lock(sport);
210 		ocs_list_foreach(&sport->node_list, node) {
211 			if (ocs_node_get_wwpn(node) == wwpn) {
212 				ocs_sport_unlock(sport);
213 				return node;
214 			}
215 		}
216 	ocs_sport_unlock(sport);
217 	return NULL;
218 }
219 
220 /**
221  * @ingroup node_alloc
222  * @brief allocate node object pool
223  *
224  * A pool of ocs_node_t objects is allocated.
225  *
226  * @param ocs pointer to driver instance context
227  * @param node_count count of nodes to allocate
228  *
229  * @return returns 0 for success, a negative error code value for failure.
230  */
231 
232 int32_t
233 ocs_node_create_pool(ocs_t *ocs, uint32_t node_count)
234 {
235 	ocs_xport_t *xport = ocs->xport;
236 	uint32_t i;
237 	ocs_node_t *node;
238 	uint32_t max_sge;
239 	uint32_t num_sgl;
240 	uint64_t max_xfer_size;
241 	int32_t rc;
242 
243 	xport->nodes_count = node_count;
244 
245 	xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT);
246 	if (xport->nodes == NULL) {
247 		ocs_log_err(ocs, "node ptrs allocation failed");
248 		return -1;
249 	}
250 
251 	if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) &&
252 	    0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) {
253 		max_xfer_size = (max_sge * (uint64_t)num_sgl);
254 	} else {
255 		max_xfer_size = 65536;
256 	}
257 
258 	if (max_xfer_size > 65536)
259 		max_xfer_size = 65536;
260 
261 	ocs_list_init(&xport->nodes_free_list, ocs_node_t, link);
262 
263 	for (i = 0; i < node_count; i ++) {
264 		node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT);
265 		if (node == NULL) {
266 			ocs_log_err(ocs, "node allocation failed");
267 			goto error;
268 		}
269 
270 		/* Assign any persistent field values */
271 		node->instance_index = i;
272 		node->max_wr_xfer_size = max_xfer_size;
273 		node->rnode.indicator = UINT32_MAX;
274 
275 		rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16);
276 		if (rc) {
277 			ocs_free(ocs, node, sizeof(ocs_node_t));
278 			ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc);
279 			goto error;
280 		}
281 
282 		xport->nodes[i] = node;
283 		ocs_list_add_tail(&xport->nodes_free_list, node);
284 	}
285 	return 0;
286 
287 error:
288 	ocs_node_free_pool(ocs);
289 	return -1;
290 }
291 
292 /**
293  * @ingroup node_alloc
294  * @brief free node object pool
295  *
296  * The pool of previously allocated node objects is freed
297  *
298  * @param ocs pointer to driver instance context
299  *
300  * @return none
301  */
302 
303 void
304 ocs_node_free_pool(ocs_t *ocs)
305 {
306 	ocs_xport_t *xport = ocs->xport;
307 	ocs_node_t *node;
308 	uint32_t i;
309 
310 	if (!xport->nodes)
311 		return;
312 
313 	ocs_device_lock(ocs);
314 
315 	for (i = 0; i < xport->nodes_count; i ++) {
316 		node = xport->nodes[i];
317 		if (node) {
318 			/* free sparam_dma_buf */
319 			ocs_dma_free(ocs, &node->sparm_dma_buf);
320 			ocs_free(ocs, node, sizeof(ocs_node_t));
321 		}
322 		xport->nodes[i] = NULL;
323 	}
324 
325 	ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *)));
326 
327 	ocs_device_unlock(ocs);
328 }
329 
330 /**
331  * @ingroup node_alloc
332  * @brief return pointer to node object given instance index
333  *
334  * A pointer to the node object given by an instance index is returned.
335  *
336  * @param ocs pointer to driver instance context
337  * @param index instance index
338  *
339  * @return returns pointer to node object, or NULL
340  */
341 
342 ocs_node_t *
343 ocs_node_get_instance(ocs_t *ocs, uint32_t index)
344 {
345 	ocs_xport_t *xport = ocs->xport;
346 	ocs_node_t *node = NULL;
347 
348 	if (index >= (xport->nodes_count)) {
349 		ocs_log_test(ocs, "invalid index: %d\n", index);
350 		return NULL;
351 	}
352 	node = xport->nodes[index];
353 	return node->attached ? node : NULL;
354 }
355 
356 /**
357  * @ingroup node_alloc
358  * @brief Allocate an fc node structure and add to node list
359  *
360  * @param sport pointer to the SPORT from which this node is allocated
361  * @param port_id FC port ID of new node
362  * @param init Port is an inititiator (sent a plogi)
363  * @param targ Port is potentially a target
364  *
365  * @return pointer to the object or NULL if none available
366  */
367 
368 ocs_node_t *
369 ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ)
370 {
371 	int32_t rc;
372 	ocs_node_t *node = NULL;
373 	uint32_t instance_index;
374 	uint32_t max_wr_xfer_size;
375 	ocs_t *ocs = sport->ocs;
376 	ocs_xport_t *xport = ocs->xport;
377 	ocs_dma_t sparm_dma_buf;
378 
379 	ocs_assert(sport, NULL);
380 
381 	if (sport->shutting_down) {
382 		ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id);
383 		return NULL;
384 	}
385 
386 	ocs_device_lock(ocs);
387 		node = ocs_list_remove_head(&xport->nodes_free_list);
388 	ocs_device_unlock(ocs);
389 	if (node == NULL) {
390 		ocs_log_err(ocs, "node allocation failed %06x", port_id);
391 		return NULL;
392 	}
393 
394 	/* Save persistent values across memset zero */
395 	instance_index = node->instance_index;
396 	max_wr_xfer_size = node->max_wr_xfer_size;
397 	sparm_dma_buf = node->sparm_dma_buf;
398 
399 	ocs_memset(node, 0, sizeof(*node));
400 	node->instance_index = instance_index;
401 	node->max_wr_xfer_size = max_wr_xfer_size;
402 	node->sparm_dma_buf = sparm_dma_buf;
403 	node->rnode.indicator = UINT32_MAX;
404 
405 	node->sport = sport;
406 	ocs_sport_lock(sport);
407 
408 		node->ocs = ocs;
409 		node->init = init;
410 		node->targ = targ;
411 
412 		rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport);
413 		if (rc) {
414 			ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc);
415 			ocs_sport_unlock(sport);
416 
417 			/* Return back to pool. */
418 			ocs_device_lock(ocs);
419 			ocs_list_add_tail(&xport->nodes_free_list, node);
420 			ocs_device_unlock(ocs);
421 
422 			return NULL;
423 		}
424 		ocs_list_add_tail(&sport->node_list, node);
425 
426 		ocs_node_lock_init(node);
427 		ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index);
428 		ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link);
429 		ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index);
430 		ocs_list_init(&node->active_ios, ocs_io_t, link);
431 		ocs_list_init(&node->els_io_pend_list, ocs_io_t, link);
432 		ocs_list_init(&node->els_io_active_list, ocs_io_t, link);
433 		ocs_scsi_io_alloc_enable(node);
434 
435 		/* zero the service parameters */
436 		ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size);
437 
438 		node->rnode.node = node;
439 		node->sm.app = node;
440 		node->evtdepth = 0;
441 
442 		ocs_node_update_display_name(node);
443 
444 		spv_set(sport->lookup, port_id, node);
445 	ocs_sport_unlock(sport);
446 	node->mgmt_functions = &node_mgmt_functions;
447 
448 	return node;
449 }
450 
451 /**
452  * @ingroup node_alloc
453  * @brief free a node structure
454  *
455  * The node structure given by 'node' is free'd
456  *
457  * @param node the node to free
458  *
459  * @return returns 0 for success, a negative error code value for failure.
460  */
461 
462 int32_t
463 ocs_node_free(ocs_node_t *node)
464 {
465 	ocs_sport_t *sport;
466 	ocs_t *ocs;
467 	ocs_xport_t *xport;
468 	ocs_hw_rtn_e rc = 0;
469 	ocs_node_t *ns = NULL;
470 	int post_all_free = FALSE;
471 
472 	ocs_assert(node, -1);
473 	ocs_assert(node->sport, -1);
474 	ocs_assert(node->ocs, -1);
475 	sport = node->sport;
476 	ocs_assert(sport, -1);
477 	ocs = node->ocs;
478 	ocs_assert(ocs->xport, -1);
479 	xport = ocs->xport;
480 
481 	node_printf(node, "Free'd\n");
482 
483 	if(node->refound) {
484 		/*
485 		 * Save the name server node. We will send fake RSCN event at
486 		 * the end to handle ignored RSCN event during node deletion
487 		 */
488 		ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER);
489 	}
490 
491 	/* Remove from node list */
492 	ocs_sport_lock(sport);
493 		ocs_list_remove(&sport->node_list, node);
494 
495 		/* Free HW resources */
496 		if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) {
497 			ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc);
498 			rc = -1;
499 		}
500 
501 		/* if the gidpt_delay_timer is still running, then delete it */
502 		if (ocs_timer_pending(&node->gidpt_delay_timer)) {
503 			ocs_del_timer(&node->gidpt_delay_timer);
504 		}
505 
506 		if (node->fcp2device) {
507 			ocs_del_crn(node);
508 		}
509 
510 		/* remove entry from sparse vector list */
511 		if (sport->lookup == NULL) {
512 			ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n");
513 			ocs_sport_unlock(sport);
514 			return -1;
515 		}
516 
517 		spv_set(sport->lookup, node->rnode.fc_id, NULL);
518 
519 		/*
520 		 * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport,
521 		 * after the lock is released.  The sport may be free'd as a result of the event.
522 		 */
523 		if (ocs_list_empty(&sport->node_list)) {
524 			post_all_free = TRUE;
525 		}
526 
527 	ocs_sport_unlock(sport);
528 
529 	if (post_all_free) {
530 		ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
531 	}
532 
533 	node->sport = NULL;
534 	node->sm.current_state = NULL;
535 
536 	ocs_node_lock_free(node);
537 	ocs_lock_free(&node->pend_frames_lock);
538 	ocs_lock_free(&node->active_ios_lock);
539 
540 	/* return to free list */
541 	ocs_device_lock(ocs);
542 		ocs_list_add_tail(&xport->nodes_free_list, node);
543 	ocs_device_unlock(ocs);
544 
545 	if(ns != NULL) {
546 		/* sending fake RSCN event to name server node */
547 		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL);
548 	}
549 
550 	return rc;
551 }
552 
553 /**
554  * @brief free memory resources of a node object
555  *
556  * The node object's child objects are freed after which the
557  * node object is freed.
558  *
559  * @param node pointer to a node object
560  *
561  * @return none
562  */
563 
564 void
565 ocs_node_force_free(ocs_node_t *node)
566 {
567 	ocs_io_t *io;
568 	ocs_io_t *next;
569 	ocs_io_t *els;
570 	ocs_io_t *els_next;
571 
572 	/* shutdown sm processing */
573 	ocs_sm_disable(&node->sm);
574 	ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name));
575 	ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name));
576 
577 	/* Let the backend cleanup if needed */
578 	ocs_scsi_notify_node_force_free(node);
579 
580 	ocs_lock(&node->active_ios_lock);
581 		ocs_list_foreach_safe(&node->active_ios, io, next) {
582 			ocs_list_remove(&io->node->active_ios, io);
583 			ocs_io_free(node->ocs, io);
584 		}
585 	ocs_unlock(&node->active_ios_lock);
586 
587 	/* free all pending ELS IOs */
588 	ocs_lock(&node->active_ios_lock);
589 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
590 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
591 			ocs_list_remove(&node->els_io_pend_list, els);
592 
593 			ocs_io_free(node->ocs, els);
594 		}
595 	ocs_unlock(&node->active_ios_lock);
596 
597 	/* free all active ELS IOs */
598 	ocs_lock(&node->active_ios_lock);
599 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
600 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
601 			ocs_list_remove(&node->els_io_active_list, els);
602 
603 			ocs_io_free(node->ocs, els);
604 		}
605 	ocs_unlock(&node->active_ios_lock);
606 
607 	/* manually purge pending frames (if any) */
608 	ocs_node_purge_pending(node);
609 
610 	ocs_node_free(node);
611 }
612 
613 /**
614  * @ingroup node_common
615  * @brief Perform HW call to attach a remote node
616  *
617  * @param node pointer to node object
618  *
619  * @return 0 on success, non-zero otherwise
620  */
621 int32_t
622 ocs_node_attach(ocs_node_t *node)
623 {
624 	int32_t rc = 0;
625 	ocs_sport_t *sport = node->sport;
626 	ocs_domain_t *domain = sport->domain;
627 	ocs_t *ocs = node->ocs;
628 
629 	if (!domain->attached) {
630 		ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n");
631 		return -1;
632 	}
633 	/* Update node->wwpn/wwnn */
634 
635 	ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node));
636 	ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node));
637 
638 	if (ocs->enable_hlm) {
639 		ocs_node_group_init(node);
640 	}
641 
642 	ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4);
643 
644 	/* take lock to protect node->rnode.attached */
645 	ocs_node_lock(node);
646 		rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf);
647 		if (OCS_HW_RTN_IS_ERROR(rc)) {
648 			ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc);
649 		}
650 	ocs_node_unlock(node);
651 
652 	return rc;
653 }
654 
655 /**
656  * @ingroup node_common
657  * @brief Generate text for a node's fc_id
658  *
659  * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit
660  * hex value.
661  *
662  * @param fc_id fc_id
663  * @param buffer text buffer
664  * @param buffer_length text buffer length in bytes
665  *
666  * @return none
667  */
668 
669 void
670 ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length)
671 {
672 	switch (fc_id) {
673 	case FC_ADDR_FABRIC:
674 		ocs_snprintf(buffer, buffer_length, "fabric");
675 		break;
676 	case FC_ADDR_CONTROLLER:
677 		ocs_snprintf(buffer, buffer_length, "fabctl");
678 		break;
679 	case FC_ADDR_NAMESERVER:
680 		ocs_snprintf(buffer, buffer_length, "nserve");
681 		break;
682 	default:
683 		if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) {
684 			ocs_snprintf(buffer, buffer_length, "dctl%02x",
685 				FC_ADDR_GET_DOMAIN_CTRL(fc_id));
686 		} else {
687 			ocs_snprintf(buffer, buffer_length, "%06x", fc_id);
688 		}
689 		break;
690 	}
691 
692 }
693 
694 /**
695  * @brief update the node's display name
696  *
697  * The node's display name is updated, sometimes needed because the sport part
698  * is updated after the node is allocated.
699  *
700  * @param node pointer to the node object
701  *
702  * @return none
703  */
704 
705 void
706 ocs_node_update_display_name(ocs_node_t *node)
707 {
708 	uint32_t port_id = node->rnode.fc_id;
709 	ocs_sport_t *sport = node->sport;
710 	char portid_display[16];
711 
712 	ocs_assert(sport);
713 
714 	ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display));
715 
716 	ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display);
717 }
718 
719 /**
720  * @brief cleans up an XRI for the pending link services accept by aborting the
721  *         XRI if required.
722  *
723  * <h3 class="desc">Description</h3>
724  * This function is called when the LS accept is not sent.
725  *
726  * @param node Node for which should be cleaned up
727  */
728 
729 void
730 ocs_node_send_ls_io_cleanup(ocs_node_t *node)
731 {
732 	ocs_t *ocs = node->ocs;
733 
734 	if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) {
735 		ocs_assert(node->ls_acc_io);
736 		ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n",
737 			node->display_name, node->ls_acc_oxid);
738 
739 		node->ls_acc_io->hio = NULL;
740 		ocs_els_io_free(node->ls_acc_io);
741 		node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
742 		node->ls_acc_io = NULL;
743 	}
744 }
745 
746 /**
747  * @ingroup node_common
748  * @brief state: shutdown a node
749  *
750  * A node is shutdown,
751  *
752  * @param ctx remote node sm context
753  * @param evt event to process
754  * @param arg per event optional argument
755  *
756  * @return returns NULL
757  *
758  * @note
759  */
760 
761 void *
762 __ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
763 {
764 	int32_t rc;
765 	std_node_state_decl();
766 
767 	node_sm_trace();
768 
769 	switch(evt) {
770 	case OCS_EVT_ENTER: {
771 		ocs_node_hold_frames(node);
772 		ocs_assert(ocs_node_active_ios_empty(node), NULL);
773 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
774 
775 		/* by default, we will be freeing node after we unwind */
776 		node->req_free = 1;
777 
778 		switch (node->shutdown_reason) {
779 		case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO:
780 			/* sm: if shutdown reason is implicit logout / ocs_node_attach
781 			 * Node shutdown b/c of PLOGI received when node already
782 			 * logged in. We have PLOGI service parameters, so submit
783 			 * node attach; we won't be freeing this node
784 			 */
785 
786 			/* currently, only case for implicit logo is PLOGI recvd. Thus,
787 			 * node's ELS IO pending list won't be empty (PLOGI will be on it)
788 			 */
789 			ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
790 			node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n");
791 
792 			ocs_scsi_io_alloc_enable(node);
793 
794 			/* Re-attach node with the same HW node resources */
795 			node->req_free = 0;
796 			rc = ocs_node_attach(node);
797 			ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
798 			if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
799 				ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
800 			}
801 			break;
802 		case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: {
803 			int8_t pend_frames_empty;
804 
805 			/* cleanup any pending LS_ACC ELSs */
806 			ocs_node_send_ls_io_cleanup(node);
807 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
808 
809 			ocs_lock(&node->pend_frames_lock);
810 				pend_frames_empty = ocs_list_empty(&node->pend_frames);
811 			ocs_unlock(&node->pend_frames_lock);
812 
813 			/* there are two scenarios where we want to keep this node alive:
814 			 * 1. there are pending frames that need to be processed or
815 			 * 2. we're an initiator and the remote node is a target and we
816 			 *    need to re-authenticate
817 			 */
818 			node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n",
819 				    !pend_frames_empty, node->sport->enable_ini, node->targ);
820 
821 			if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) {
822 				uint8_t send_plogi = FALSE;
823 				if (node->sport->enable_ini && node->targ) {
824 					/* we're an initiator and node shutting down is a target; we'll
825 					 * need to re-authenticate in initial state
826 					 */
827 					send_plogi = TRUE;
828 				}
829 
830 				/* transition to __ocs_d_init (will retain HW node resources) */
831 				ocs_scsi_io_alloc_enable(node);
832 				node->req_free = 0;
833 
834 				/* either pending frames exist, or we're re-authenticating with PLOGI
835 				 * (or both); in either case, return to initial state
836 				 */
837 				ocs_node_init_device(node, send_plogi);
838 			}
839 			/* else: let node shutdown occur */
840 			break;
841 		}
842 		case OCS_NODE_SHUTDOWN_DEFAULT:
843 		default:
844 			/* shutdown due to link down, node going away (xport event) or
845 			 * sport shutdown, purge pending and proceed to cleanup node
846 			 */
847 
848 			/* cleanup any pending LS_ACC ELSs */
849 			ocs_node_send_ls_io_cleanup(node);
850 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
851 
852 			node_printf(node, "Shutdown reason: default, purge pending\n");
853 			ocs_node_purge_pending(node);
854 			break;
855 		}
856 
857 		break;
858 	}
859 	case OCS_EVT_EXIT:
860 		ocs_node_accept_frames(node);
861 		break;
862 
863 	default:
864 		__ocs_node_common(__func__, ctx, evt, arg);
865 		return NULL;
866 	}
867 
868 	return NULL;
869 }
870 
871 /**
872  * @ingroup common_node
873  * @brief Checks to see if ELS's have been quiesced
874  *
875  * Check if ELS's have been quiesced. If so, transition to the
876  * next state in the shutdown process.
877  *
878  * @param node Node for which ELS's are checked
879  *
880  * @return Returns 1 if ELS's have been quiesced, 0 otherwise.
881  */
882 static int
883 ocs_node_check_els_quiesced(ocs_node_t *node)
884 {
885 	ocs_assert(node, -1);
886 
887 	/* check to see if ELS requests, completions are quiesced */
888 	if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) &&
889 	    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
890 		if (!node->attached) {
891 			/* hw node detach already completed, proceed */
892 			node_printf(node, "HW node not attached\n");
893 			ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
894 		} else {
895 			/* hw node detach hasn't completed, transition and wait */
896 			node_printf(node, "HW node still attached\n");
897 			ocs_node_transition(node, __ocs_node_wait_node_free, NULL);
898 		}
899 		return 1;
900 	}
901 	return 0;
902 }
903 
904 /**
905  * @ingroup common_node
906  * @brief Initiate node IO cleanup.
907  *
908  * Note: this function must be called with a non-attached node
909  * or a node for which the node detach (ocs_hw_node_detach())
910  * has already been initiated.
911  *
912  * @param node Node for which shutdown is initiated
913  *
914  * @return Returns None.
915  */
916 
917 void
918 ocs_node_initiate_cleanup(ocs_node_t *node)
919 {
920 	ocs_io_t *els;
921 	ocs_io_t *els_next;
922 	ocs_t *ocs;
923 	ocs_assert(node);
924 	ocs = node->ocs;
925 
926 	/* first cleanup ELS's that are pending (not yet active) */
927 	ocs_lock(&node->active_ios_lock);
928 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
929 			/* skip the ELS IO for which a response will be sent after shutdown */
930 			if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) &&
931 			    (els == node->ls_acc_io)) {
932 				continue;
933 			}
934 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
935                         node_printf(node, "Freeing pending els %s\n", els->display_name);
936 			ocs_list_remove(&node->els_io_pend_list, els);
937 
938 			ocs_io_free(node->ocs, els);
939 		}
940 	ocs_unlock(&node->active_ios_lock);
941 
942 	if (node->ls_acc_io && node->ls_acc_io->hio != NULL) {
943 		/*
944 		 * if there's an IO that will result in an LS_ACC after
945 		 * shutdown and its HW IO is non-NULL, it better be an
946 		 * implicit logout in vanilla sequence coalescing. In this
947 		 * case, force the LS_ACC to go out on another XRI (hio)
948 		 * since the previous will have been aborted by the UNREG_RPI
949 		 */
950 		ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO);
951 		ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI);
952 		node_printf(node, "invalidating ls_acc_io due to implicit logo\n");
953 
954 		/* No need to abort because the unreg_rpi takes care of it, just free */
955 		ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio);
956 
957 		/* NULL out hio to force the LS_ACC to grab a new XRI */
958 		node->ls_acc_io->hio = NULL;
959 	}
960 
961 	/*
962 	 * if ELS's have already been quiesced, will move to next state
963 	 * if ELS's have not been quiesced, abort them
964 	 */
965 	if (ocs_node_check_els_quiesced(node) == 0) {
966 		/*
967 		 * Abort all ELS's since ELS's won't be aborted by HW
968 		 * node free.
969 		 */
970 		ocs_node_abort_all_els(node);
971 		ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL);
972 	}
973 }
974 
975 /**
976  * @ingroup node_common
977  * @brief Node state machine: Wait for all ELSs to complete.
978  *
979  * <h3 class="desc">Description</h3>
980  * State waits for all ELSs to complete after aborting all
981  * outstanding .
982  *
983  * @param ctx Remote node state machine context.
984  * @param evt Event to process.
985  * @param arg Per event optional argument.
986  *
987  * @return Returns NULL.
988  */
989 
990 void *
991 __ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
992 {
993 	uint8_t check_quiesce = FALSE;
994 	std_node_state_decl();
995 
996 	node_sm_trace();
997 
998 	switch(evt) {
999 	case OCS_EVT_ENTER: {
1000 		ocs_node_hold_frames(node);
1001 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1002 			node_printf(node, "All ELS IOs complete\n");
1003 			check_quiesce = TRUE;
1004 		}
1005 		break;
1006 	}
1007 	case OCS_EVT_EXIT:
1008 		ocs_node_accept_frames(node);
1009 		break;
1010 
1011 	case OCS_EVT_SRRS_ELS_REQ_OK:
1012 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1013 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1014 	case OCS_EVT_ELS_REQ_ABORTED:
1015 		ocs_assert(node->els_req_cnt, NULL);
1016 		node->els_req_cnt--;
1017 		check_quiesce = TRUE;
1018 		break;
1019 
1020 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1021 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1022 		ocs_assert(node->els_cmpl_cnt, NULL);
1023 		node->els_cmpl_cnt--;
1024 		check_quiesce = TRUE;
1025 		break;
1026 
1027 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1028 		/* all ELS IO's complete */
1029 		node_printf(node, "All ELS IOs complete\n");
1030 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
1031 		check_quiesce = TRUE;
1032 		break;
1033 
1034 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1035 		break;
1036 
1037 	case OCS_EVT_DOMAIN_ATTACH_OK:
1038 		/* don't care about domain_attach_ok */
1039 		break;
1040 
1041 	/* ignore shutdown events as we're already in shutdown path */
1042 	case OCS_EVT_SHUTDOWN:
1043 		/* have default shutdown event take precedence */
1044 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1045 		/* fall through */
1046 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1047 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1048 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1049 		break;
1050 
1051 	default:
1052 		__ocs_node_common(__func__, ctx, evt, arg);
1053 		return NULL;
1054 	}
1055 
1056 	if (check_quiesce) {
1057 		ocs_node_check_els_quiesced(node);
1058 	}
1059 	return NULL;
1060 }
1061 
1062 /**
1063  * @ingroup node_command
1064  * @brief Node state machine: Wait for a HW node free event to
1065  * complete.
1066  *
1067  * <h3 class="desc">Description</h3>
1068  * State waits for the node free event to be received from the HW.
1069  *
1070  * @param ctx Remote node state machine context.
1071  * @param evt Event to process.
1072  * @param arg Per event optional argument.
1073  *
1074  * @return Returns NULL.
1075  */
1076 
1077 void *
1078 __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1079 {
1080 	std_node_state_decl();
1081 
1082 	node_sm_trace();
1083 
1084 	switch(evt) {
1085 	case OCS_EVT_ENTER:
1086 		ocs_node_hold_frames(node);
1087 		break;
1088 
1089 	case OCS_EVT_EXIT:
1090 		ocs_node_accept_frames(node);
1091 		break;
1092 
1093 	case OCS_EVT_NODE_FREE_OK:
1094 		/* node is officially no longer attached */
1095 		node->attached = FALSE;
1096 		ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
1097 		break;
1098 
1099 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1100 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1101 		/* As IOs and ELS IO's complete we expect to get these events */
1102 		break;
1103 
1104 	case OCS_EVT_DOMAIN_ATTACH_OK:
1105 		/* don't care about domain_attach_ok */
1106 		break;
1107 
1108 	/* ignore shutdown events as we're already in shutdown path */
1109 	case OCS_EVT_SHUTDOWN:
1110 		/* have default shutdown event take precedence */
1111 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1112 		/* Fall through */
1113 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1114 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1115 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1116 		break;
1117 	default:
1118 		__ocs_node_common(__func__, ctx, evt, arg);
1119 		return NULL;
1120 	}
1121 
1122 	return NULL;
1123 }
1124 
1125 /**
1126  * @ingroup node_common
1127  * @brief state: initiate node shutdown
1128  *
1129  * State is entered when a node receives a shutdown event, and it's waiting
1130  * for all the active IOs and ELS IOs associated with the node to complete.
1131  *
1132  * @param ctx remote node sm context
1133  * @param evt event to process
1134  * @param arg per event optional argument
1135  *
1136  * @return returns NULL
1137  */
1138 
1139 void *
1140 __ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1141 {
1142 	ocs_io_t *io;
1143 	ocs_io_t *next;
1144 	std_node_state_decl();
1145 
1146 	node_sm_trace();
1147 
1148 	switch(evt) {
1149 	case OCS_EVT_ENTER:
1150 		ocs_node_hold_frames(node);
1151 
1152 		/* first check to see if no ELS IOs are outstanding */
1153 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1154 			/* If there are any active IOS, Free them. */
1155 			if (!ocs_node_active_ios_empty(node)) {
1156 				ocs_lock(&node->active_ios_lock);
1157 				ocs_list_foreach_safe(&node->active_ios, io, next) {
1158 					ocs_list_remove(&io->node->active_ios, io);
1159 					ocs_io_free(node->ocs, io);
1160 				}
1161 				ocs_unlock(&node->active_ios_lock);
1162 			}
1163 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1164 		}
1165 		break;
1166 
1167 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1168 	case OCS_EVT_ALL_CHILD_NODES_FREE: {
1169 		if (ocs_node_active_ios_empty(node) &&
1170 		    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1171 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1172 		}
1173 		break;
1174 	}
1175 
1176 	case OCS_EVT_EXIT:
1177 		ocs_node_accept_frames(node);
1178 		break;
1179 
1180 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1181 		/* Can happen as ELS IO IO's complete */
1182 		ocs_assert(node->els_req_cnt, NULL);
1183 		node->els_req_cnt--;
1184 		break;
1185 
1186 	/* ignore shutdown events as we're already in shutdown path */
1187 	case OCS_EVT_SHUTDOWN:
1188 		/* have default shutdown event take precedence */
1189 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1190 		/* fall through */
1191 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1192 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1193 		ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt));
1194 		break;
1195 	case OCS_EVT_DOMAIN_ATTACH_OK:
1196 		/* don't care about domain_attach_ok */
1197 		break;
1198 	default:
1199 		__ocs_node_common(__func__, ctx, evt, arg);
1200 		return NULL;
1201 	}
1202 
1203 	return NULL;
1204 }
1205 
1206 /**
1207  * @ingroup node_common
1208  * @brief state: common node event handler
1209  *
1210  * Handle common/shared node events
1211  *
1212  * @param funcname calling function's name
1213  * @param ctx remote node sm context
1214  * @param evt event to process
1215  * @param arg per event optional argument
1216  *
1217  * @return returns NULL
1218  */
1219 
1220 void *
1221 __ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1222 {
1223 	ocs_node_t *node = NULL;
1224 	ocs_t *ocs = NULL;
1225 	ocs_node_cb_t *cbdata = arg;
1226 	ocs_assert(ctx, NULL);
1227 	ocs_assert(ctx->app, NULL);
1228 	node = ctx->app;
1229 	ocs_assert(node->ocs, NULL);
1230 	ocs = node->ocs;
1231 
1232 	switch(evt) {
1233 	case OCS_EVT_ENTER:
1234 	case OCS_EVT_REENTER:
1235 	case OCS_EVT_EXIT:
1236 	case OCS_EVT_SPORT_TOPOLOGY_NOTIFY:
1237 	case OCS_EVT_NODE_MISSING:
1238 	case OCS_EVT_FCP_CMD_RCVD:
1239 		break;
1240 
1241 	case OCS_EVT_NODE_REFOUND:
1242 		node->refound = 1;
1243 		break;
1244 
1245 	/* node->attached must be set appropriately for all node attach/detach events */
1246 	case OCS_EVT_NODE_ATTACH_OK:
1247 		node->attached = TRUE;
1248 		break;
1249 
1250 	case OCS_EVT_NODE_FREE_OK:
1251 	case OCS_EVT_NODE_ATTACH_FAIL:
1252 		node->attached = FALSE;
1253 		break;
1254 
1255 	/* handle any ELS completions that other states either didn't care about
1256 	 * or forgot about
1257 	 */
1258 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1259 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1260 		ocs_assert(node->els_cmpl_cnt, NULL);
1261 		node->els_cmpl_cnt--;
1262 		break;
1263 
1264 	/* handle any ELS request completions that other states either didn't care about
1265 	 * or forgot about
1266 	 */
1267 	case OCS_EVT_SRRS_ELS_REQ_OK:
1268 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1269 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1270 	case OCS_EVT_ELS_REQ_ABORTED:
1271 		ocs_assert(node->els_req_cnt, NULL);
1272 		node->els_req_cnt--;
1273 		break;
1274 
1275 	case OCS_EVT_ELS_RCVD: {
1276 		fc_header_t *hdr = cbdata->header->dma.virt;
1277 
1278 		/* Unsupported ELS was received, send LS_RJT, command not supported */
1279 		ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
1280 			      node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]);
1281 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1282 			FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0,
1283 			NULL, NULL);
1284 		break;
1285 	}
1286 
1287 	case OCS_EVT_PLOGI_RCVD:
1288 	case OCS_EVT_FLOGI_RCVD:
1289 	case OCS_EVT_LOGO_RCVD:
1290 	case OCS_EVT_PRLI_RCVD:
1291 	case OCS_EVT_PRLO_RCVD:
1292 	case OCS_EVT_PDISC_RCVD:
1293 	case OCS_EVT_FDISC_RCVD:
1294 	case OCS_EVT_ADISC_RCVD:
1295 	case OCS_EVT_RSCN_RCVD:
1296 	case OCS_EVT_SCR_RCVD: {
1297 		fc_header_t *hdr = cbdata->header->dma.virt;
1298 		/* sm: / send ELS_RJT */
1299 		ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n",
1300 			      node->display_name, funcname, ocs_sm_event_name(evt));
1301 		/* if we didn't catch this in a state, send generic LS_RJT */
1302 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1303 			FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0,
1304 			NULL, NULL);
1305 
1306 		break;
1307 	}
1308 	case OCS_EVT_GID_PT_RCVD:
1309 	case OCS_EVT_RFT_ID_RCVD:
1310 	case OCS_EVT_RFF_ID_RCVD: {
1311 		fc_header_t *hdr = cbdata->header->dma.virt;
1312 		ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n",
1313 			      node->display_name, funcname, ocs_sm_event_name(evt));
1314 		ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0);
1315 		break;
1316 	}
1317 
1318 	case OCS_EVT_ABTS_RCVD: {
1319 		fc_header_t *hdr = cbdata->header->dma.virt;
1320 		ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n",
1321 			      node->display_name, funcname, ocs_sm_event_name(evt));
1322 
1323 		/* sm: send BA_ACC */
1324 		ocs_bls_send_acc_hdr(cbdata->io, hdr);
1325 		break;
1326 	}
1327 
1328 	default:
1329 		ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
1330 			ocs_sm_event_name(evt));
1331 		break;
1332 	}
1333 	return NULL;
1334 }
1335 
1336 /**
1337  * @ingroup node_common
1338  * @brief save node service parameters
1339  *
1340  * Service parameters are copyed into the node structure
1341  *
1342  * @param node pointer to node structure
1343  * @param payload pointer to service parameters to save
1344  *
1345  * @return none
1346  */
1347 
1348 void
1349 ocs_node_save_sparms(ocs_node_t *node, void *payload)
1350 {
1351 	ocs_memcpy(node->service_params, payload, sizeof(node->service_params));
1352 }
1353 
1354 /**
1355  * @ingroup node_common
1356  * @brief Post event to node state machine context
1357  *
1358  * This is used by the node state machine code to post events to the nodes.  Upon
1359  * completion of the event posting, if the nesting depth is zero and we're not holding
1360  * inbound frames, then the pending frames are processed.
1361  *
1362  * @param node pointer to node
1363  * @param evt event to post
1364  * @param arg event posting argument
1365  *
1366  * @return none
1367  */
1368 
1369 void
1370 ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg)
1371 {
1372 	int free_node = FALSE;
1373 	ocs_assert(node);
1374 
1375 	ocs_node_lock(node);
1376 		node->evtdepth ++;
1377 
1378 		ocs_sm_post_event(&node->sm, evt, arg);
1379 
1380 		/* If our event call depth is one and we're not holding frames
1381 		 * then we can dispatch any pending frames.   We don't want to allow
1382 		 * the ocs_process_node_pending() call to recurse.
1383 		 */
1384 		if (!node->hold_frames && (node->evtdepth == 1)) {
1385 			ocs_process_node_pending(node);
1386 		}
1387 		node->evtdepth --;
1388 
1389 		/* Free the node object if so requested, and we're at an event
1390 		 * call depth of zero
1391 		 */
1392 		if ((node->evtdepth == 0) && node->req_free) {
1393 			free_node = TRUE;
1394 		}
1395 	ocs_node_unlock(node);
1396 
1397 	if (free_node) {
1398 		ocs_node_free(node);
1399 	}
1400 
1401 	return;
1402 }
1403 
1404 /**
1405  * @ingroup node_common
1406  * @brief transition state of a node
1407  *
1408  * The node's state is transitioned to the requested state.  Entry/Exit
1409  * events are posted as needed.
1410  *
1411  * @param node pointer to node
1412  * @param state state to transition to
1413  * @param data transition data
1414  *
1415  * @return none
1416  */
1417 
1418 void
1419 ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data)
1420 {
1421 	ocs_sm_ctx_t *ctx = &node->sm;
1422 
1423 	ocs_node_lock(node);
1424 		if (ctx->current_state == state) {
1425 			ocs_node_post_event(node, OCS_EVT_REENTER, data);
1426 		} else {
1427 			ocs_node_post_event(node, OCS_EVT_EXIT, data);
1428 			ctx->current_state = state;
1429 			ocs_node_post_event(node, OCS_EVT_ENTER, data);
1430 		}
1431 	ocs_node_unlock(node);
1432 }
1433 
1434 /**
1435  * @ingroup node_common
1436  * @brief build EUI formatted WWN
1437  *
1438  * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC
1439  * use the eui. format, an ascii string such as: "eui.10000000C9A19501"
1440  *
1441  * @param buffer buffer to place formatted name into
1442  * @param buffer_len length in bytes of the buffer
1443  * @param eui_name cpu endian 64 bit WWN value
1444  *
1445  * @return none
1446  */
1447 
1448 void
1449 ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name)
1450 {
1451 	ocs_memset(buffer, 0, buffer_len);
1452 
1453 	ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name);
1454 }
1455 
1456 /**
1457  * @ingroup node_common
1458  * @brief return nodes' WWPN as a uint64_t
1459  *
1460  * The WWPN is computed from service parameters and returned as a uint64_t
1461  *
1462  * @param node pointer to node structure
1463  *
1464  * @return WWPN
1465  *
1466  */
1467 
1468 uint64_t
1469 ocs_node_get_wwpn(ocs_node_t *node)
1470 {
1471 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1472 
1473 	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1474 }
1475 
1476 /**
1477  * @ingroup node_common
1478  * @brief return nodes' WWNN as a uint64_t
1479  *
1480  * The WWNN is computed from service parameters and returned as a uint64_t
1481  *
1482  * @param node pointer to node structure
1483  *
1484  * @return WWNN
1485  *
1486  */
1487 
1488 uint64_t
1489 ocs_node_get_wwnn(ocs_node_t *node)
1490 {
1491 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1492 
1493 	return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo)));
1494 }
1495 
1496 /**
1497  * @brief Generate node ddump data
1498  *
1499  * Generates the node ddumpdata
1500  *
1501  * @param textbuf pointer to text buffer
1502  * @param node pointer to node context
1503  *
1504  * @return Returns 0 on success, or a negative value on failure.
1505  */
1506 
1507 int
1508 ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node)
1509 {
1510 	ocs_io_t *io;
1511 	ocs_io_t *els;
1512 	int retval = 0;
1513 
1514 	ocs_ddump_section(textbuf, "node", node->instance_index);
1515 	ocs_ddump_value(textbuf, "display_name", "%s", node->display_name);
1516 	ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name);
1517 	ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name);
1518 	ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt));
1519 	ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt));
1520 
1521 	ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator);
1522 	ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id);
1523 	ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached);
1524 
1525 	ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames);
1526 	ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled);
1527 	ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason);
1528 	ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc);
1529 	ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did);
1530 	ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid);
1531 	ocs_ddump_value(textbuf, "req_free", "%d", node->req_free);
1532 	ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt);
1533 	ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt);
1534 
1535 	ocs_ddump_value(textbuf, "targ", "%d", node->targ);
1536 	ocs_ddump_value(textbuf, "init", "%d", node->init);
1537 	ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn);
1538 	ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn);
1539 	ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1540 	ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count);
1541 	ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt);
1542 
1543 	ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4);
1544 
1545 	ocs_lock(&node->pend_frames_lock);
1546 		if (!ocs_list_empty(&node->pend_frames)) {
1547 			ocs_hw_sequence_t *frame;
1548 			ocs_ddump_section(textbuf, "pending_frames", 0);
1549 			ocs_list_foreach(&node->pend_frames, frame) {
1550 				fc_header_t *hdr;
1551 				char buf[128];
1552 
1553 				hdr = frame->header->dma.virt;
1554 				ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu",
1555 				 hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1556 				 frame->payload->dma.len);
1557 				ocs_ddump_value(textbuf, "frame", "%s", buf);
1558 			}
1559 			ocs_ddump_endsection(textbuf, "pending_frames", 0);
1560 		}
1561 	ocs_unlock(&node->pend_frames_lock);
1562 
1563 	ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1564 	ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1565 
1566 	ocs_lock(&node->active_ios_lock);
1567 		ocs_ddump_section(textbuf, "active_ios", 0);
1568 		ocs_list_foreach(&node->active_ios, io) {
1569 			ocs_ddump_io(textbuf, io);
1570 		}
1571 		ocs_ddump_endsection(textbuf, "active_ios", 0);
1572 
1573 		ocs_ddump_section(textbuf, "els_io_pend_list", 0);
1574 		ocs_list_foreach(&node->els_io_pend_list, els) {
1575 			ocs_ddump_els(textbuf, els);
1576 		}
1577 		ocs_ddump_endsection(textbuf, "els_io_pend_list", 0);
1578 
1579 		ocs_ddump_section(textbuf, "els_io_active_list", 0);
1580 		ocs_list_foreach(&node->els_io_active_list, els) {
1581 			ocs_ddump_els(textbuf, els);
1582 		}
1583 		ocs_ddump_endsection(textbuf, "els_io_active_list", 0);
1584 	ocs_unlock(&node->active_ios_lock);
1585 
1586 	ocs_ddump_endsection(textbuf, "node", node->instance_index);
1587 
1588 	return retval;
1589 }
1590 
1591 /**
1592  * @brief check ELS request completion
1593  *
1594  * Check ELS request completion event to make sure it's for the
1595  * ELS request we expect. If not, invoke given common event
1596  * handler and return an error.
1597  *
1598  * @param ctx state machine context
1599  * @param evt ELS request event
1600  * @param arg event argument
1601  * @param cmd ELS command expected
1602  * @param node_common_func common event handler to call if ELS
1603  *      		   doesn't match
1604  * @param funcname function name that called this
1605  *
1606  * @return zero if ELS command matches, -1 otherwise
1607  */
1608 int32_t
1609 node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1610 {
1611 	ocs_node_t *node = NULL;
1612 	ocs_t *ocs = NULL;
1613 	ocs_node_cb_t *cbdata = arg;
1614 	fc_els_gen_t *els_gen = NULL;
1615 	ocs_assert(ctx, -1);
1616 	node = ctx->app;
1617 	ocs_assert(node, -1);
1618 	ocs = node->ocs;
1619 	ocs_assert(ocs, -1);
1620 	cbdata = arg;
1621 	ocs_assert(cbdata, -1);
1622 	ocs_assert(cbdata->els, -1);
1623 	els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt;
1624 	ocs_assert(els_gen, -1);
1625 
1626 	if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) {
1627 		if (cbdata->els->hio_type != OCS_HW_ELS_REQ) {
1628 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n",
1629 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1630 		} else {
1631 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n",
1632 				node->display_name, funcname, cmd, els_gen->command_code);
1633 		}
1634 		/* send event to common handler */
1635 		node_common_func(funcname, ctx, evt, arg);
1636 		return -1;
1637 	}
1638 	return 0;
1639 }
1640 
1641 /**
1642  * @brief check NS request completion
1643  *
1644  * Check ELS request completion event to make sure it's for the
1645  * nameserver request we expect. If not, invoke given common
1646  * event handler and return an error.
1647  *
1648  * @param ctx state machine context
1649  * @param evt ELS request event
1650  * @param arg event argument
1651  * @param cmd nameserver command expected
1652  * @param node_common_func common event handler to call if
1653  *      		   nameserver cmd doesn't match
1654  * @param funcname function name that called this
1655  *
1656  * @return zero if NS command matches, -1 otherwise
1657  */
1658 int32_t
1659 node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1660 {
1661 	ocs_node_t *node = NULL;
1662 	ocs_t *ocs = NULL;
1663 	ocs_node_cb_t *cbdata = arg;
1664 	fcct_iu_header_t *fcct = NULL;
1665 	ocs_assert(ctx, -1);
1666 	node = ctx->app;
1667 	ocs_assert(node, -1);
1668 	ocs = node->ocs;
1669 	ocs_assert(ocs, -1);
1670 	cbdata = arg;
1671 	ocs_assert(cbdata, -1);
1672 	ocs_assert(cbdata->els, -1);
1673 	fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt;
1674 	ocs_assert(fcct, -1);
1675 
1676 	if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) {
1677 		if (cbdata->els->hio_type != OCS_HW_FC_CT) {
1678 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n",
1679 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1680 		} else {
1681 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n",
1682 				node->display_name, funcname, cmd, fcct->cmd_rsp_code);
1683 		}
1684 		/* send event to common handler */
1685 		node_common_func(funcname, ctx, evt, arg);
1686 		return -1;
1687 	}
1688 	return 0;
1689 }
1690 
1691 void
1692 ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object)
1693 {
1694 	ocs_io_t *io;
1695 	ocs_node_t *node = (ocs_node_t *)object;
1696 
1697 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1698 
1699 	/* Readonly values */
1700 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1701 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1702 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1703 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached");
1704 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames");
1705 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down");
1706 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free");
1707 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id");
1708 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use");
1709 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt");
1710 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ");
1711 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init");
1712 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1713 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1714 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames");
1715 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count");
1716 
1717 	/* Actions */
1718 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1719 
1720 	ocs_lock(&node->active_ios_lock);
1721 	ocs_list_foreach(&node->active_ios, io) {
1722 		if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) {
1723 			io->mgmt_functions->get_list_handler(textbuf, io);
1724 		}
1725 	}
1726 	ocs_unlock(&node->active_ios_lock);
1727 
1728 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1729 }
1730 
1731 int
1732 ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1733 {
1734 	ocs_io_t *io;
1735 	ocs_node_t *node = (ocs_node_t *)object;
1736 	char qualifier[80];
1737 	int retval = -1;
1738 
1739 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1740 
1741 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1742 
1743 	/* If it doesn't start with my qualifier I don't know what to do with it */
1744 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1745 		char *unqualified_name = name + strlen(qualifier) +1;
1746 
1747 		/* See if it's a value I can supply */
1748 		if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1749 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1750 			retval = 0;
1751 		} else if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1752 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1753 			retval = 0;
1754 		} else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1755 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1756 			retval = 0;
1757 		} else if (ocs_strcmp(unqualified_name, "attached") == 0) {
1758 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1759 			retval = 0;
1760 		} else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) {
1761 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1762 			retval = 0;
1763 		} else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) {
1764 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1765 			retval = 0;
1766 		} else if (ocs_strcmp(unqualified_name, "req_free") == 0) {
1767 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1768 			retval = 0;
1769 		} else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) {
1770 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1771 			retval = 0;
1772 		} else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) {
1773 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1774 			retval = 0;
1775 		} else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) {
1776 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1777 			retval = 0;
1778 		} else if (ocs_strcmp(unqualified_name, "targ") == 0) {
1779 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1780 			retval = 0;
1781 		} else if (ocs_strcmp(unqualified_name, "init") == 0) {
1782 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1783 			retval = 0;
1784 		} else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1785 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1786 			retval = 0;
1787 		} else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1788 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1789 			retval = 0;
1790 		} else if (ocs_strcmp(unqualified_name, "current_state") == 0) {
1791 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1792 			retval = 0;
1793 		} else if (ocs_strcmp(unqualified_name, "login_state") == 0) {
1794 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1795 			retval = 0;
1796 		} else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) {
1797 			ocs_hw_sequence_t *frame;
1798 			ocs_lock(&node->pend_frames_lock);
1799 				ocs_list_foreach(&node->pend_frames, frame) {
1800 					fc_header_t *hdr;
1801 					char buf[128];
1802 
1803 					hdr = frame->header->dma.virt;
1804 					ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1805 						 ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1806 						 frame->payload->dma.len);
1807 					ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1808 				}
1809 			ocs_unlock(&node->pend_frames_lock);
1810 			retval = 0;
1811 		} else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) {
1812 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1813 			retval = 0;
1814 		} else {
1815 			/* If I didn't know the value of this status pass the request to each of my children */
1816 			ocs_lock(&node->active_ios_lock);
1817 				ocs_list_foreach(&node->active_ios, io) {
1818 					if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) {
1819 						retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io);
1820 					}
1821 
1822 					if (retval == 0) {
1823 						break;
1824 					}
1825 				}
1826 			ocs_unlock(&node->active_ios_lock);
1827 		}
1828 	}
1829 
1830 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1831 
1832 	return retval;
1833 }
1834 
1835 void
1836 ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object)
1837 {
1838 	ocs_io_t *io;
1839 	ocs_node_t *node = (ocs_node_t *)object;
1840 	ocs_hw_sequence_t *frame;
1841 
1842 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1843 
1844 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1845 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1846 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1847 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1848 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1849 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1850 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1851 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1852 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1853 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1854 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1855 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1856 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1857 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1858 
1859 	ocs_lock(&node->pend_frames_lock);
1860 	ocs_list_foreach(&node->pend_frames, frame) {
1861 		fc_header_t *hdr;
1862 		char buf[128];
1863 
1864 		hdr = frame->header->dma.virt;
1865 		ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1866 			     ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1867 			     frame->payload->dma.len);
1868 		ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1869 	}
1870 	ocs_unlock(&node->pend_frames_lock);
1871 
1872 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1873 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1874 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1875 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1876 
1877 	ocs_lock(&node->active_ios_lock);
1878 	ocs_list_foreach(&node->active_ios, io) {
1879 		if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) {
1880 			io->mgmt_functions->get_all_handler(textbuf,io);
1881 		}
1882 	}
1883 	ocs_unlock(&node->active_ios_lock);
1884 
1885 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1886 }
1887 
1888 int
1889 ocs_mgmt_node_set(char *parent, char *name, char *value, void *object)
1890 {
1891 	ocs_io_t *io;
1892 	ocs_node_t *node = (ocs_node_t *)object;
1893 	char qualifier[80];
1894 	int retval = -1;
1895 
1896 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1897 
1898 	/* If it doesn't start with my qualifier I don't know what to do with it */
1899 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1900 		ocs_lock(&node->active_ios_lock);
1901 		ocs_list_foreach(&node->active_ios, io) {
1902 			if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) {
1903 				retval = io->mgmt_functions->set_handler(qualifier, name, value, io);
1904 			}
1905 
1906 			if (retval == 0) {
1907 				break;
1908 			}
1909 		}
1910 		ocs_unlock(&node->active_ios_lock);
1911 	}
1912 
1913 	return retval;
1914 }
1915 
1916 int
1917 ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1918 		   void *arg_out, uint32_t arg_out_length, void *object)
1919 {
1920 	ocs_io_t *io;
1921 	ocs_node_t *node = (ocs_node_t *)object;
1922 	char qualifier[80];
1923 	int retval = -1;
1924 
1925 	ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index);
1926 
1927 	/* If it doesn't start with my qualifier I don't know what to do with it */
1928 	if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1929 		char *unqualified_name = action + strlen(qualifier) +1;
1930 
1931 		if (ocs_strcmp(unqualified_name, "resume") == 0) {
1932 			ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
1933 		}
1934 
1935 		{
1936 			/* If I didn't know how to do this action pass the request to each of my children */
1937 			ocs_lock(&node->active_ios_lock);
1938 				ocs_list_foreach(&node->active_ios, io) {
1939 					if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) {
1940 						retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1941 							arg_out, arg_out_length, io);
1942 					}
1943 
1944 					if (retval == 0) {
1945 						break;
1946 					}
1947 				}
1948 			ocs_unlock(&node->active_ios_lock);
1949 		}
1950 	}
1951 
1952 	return retval;
1953 }
1954 
1955 /**
1956  * @brief Return TRUE if active ios list is empty
1957  *
1958  * Test if node->active_ios list is empty while holding the node->active_ios_lock.
1959  *
1960  * @param node pointer to node object
1961  *
1962  * @return TRUE if node active ios list is empty
1963  */
1964 
1965 int
1966 ocs_node_active_ios_empty(ocs_node_t *node)
1967 {
1968 	int empty;
1969 
1970 	ocs_lock(&node->active_ios_lock);
1971 		empty = ocs_list_empty(&node->active_ios);
1972 	ocs_unlock(&node->active_ios_lock);
1973 	return empty;
1974 }
1975 
1976 /**
1977  * @brief Pause a node
1978  *
1979  * The node is placed in the __ocs_node_paused state after saving the state
1980  * to return to
1981  *
1982  * @param node Pointer to node object
1983  * @param state State to resume to
1984  *
1985  * @return none
1986  */
1987 
1988 void
1989 ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state)
1990 {
1991 	node->nodedb_state = state;
1992 	ocs_node_transition(node, __ocs_node_paused, NULL);
1993 }
1994 
1995 /**
1996  * @brief Paused node state
1997  *
1998  * This state is entered when a state is "paused". When resumed, the node
1999  * is transitioned to a previously saved state (node->ndoedb_state)
2000  *
2001  * @param ctx Remote node state machine context.
2002  * @param evt Event to process.
2003  * @param arg Per event optional argument.
2004  *
2005  * @return returns NULL
2006  */
2007 
2008 void *
2009 __ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
2010 {
2011 	std_node_state_decl();
2012 
2013 	node_sm_trace();
2014 
2015 	switch(evt) {
2016 	case OCS_EVT_ENTER:
2017 		node_printf(node, "Paused\n");
2018 		break;
2019 
2020 	case OCS_EVT_RESUME: {
2021 		ocs_sm_function_t pf = node->nodedb_state;
2022 
2023 		node->nodedb_state = NULL;
2024 		ocs_node_transition(node, pf, NULL);
2025 		break;
2026 	}
2027 
2028 	case OCS_EVT_DOMAIN_ATTACH_OK:
2029 		break;
2030 
2031 	case OCS_EVT_SHUTDOWN:
2032 		node->req_free = 1;
2033 		break;
2034 
2035 	default:
2036 		__ocs_node_common(__func__, ctx, evt, arg);
2037 		break;
2038 	}
2039 	return NULL;
2040 }
2041 
2042 /**
2043  * @brief Resume a paused state
2044  *
2045  * Posts a resume event to the paused node.
2046  *
2047  * @param node Pointer to node object
2048  *
2049  * @return returns 0 for success, a negative error code value for failure.
2050  */
2051 
2052 int32_t
2053 ocs_node_resume(ocs_node_t *node)
2054 {
2055 	ocs_assert(node != NULL, -1);
2056 
2057 	ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
2058 
2059 	return 0;
2060 }
2061 
2062 /**
2063  * @ingroup node_common
2064  * @brief Dispatch a ELS frame.
2065  *
2066  * <h3 class="desc">Description</h3>
2067  * An ELS frame is dispatched to the \c node state machine.
2068  * RQ Pair mode: this function is always called with a NULL hw
2069  * io.
2070  *
2071  * @param node Node that originated the frame.
2072  * @param seq header/payload sequence buffers
2073  *
2074  * @return Returns 0 if frame processed and RX buffers cleaned
2075  * up appropriately, -1 if frame not handled and RX buffers need
2076  * to be returned.
2077  */
2078 
2079 int32_t
2080 ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2081 {
2082 	struct {
2083 		uint32_t cmd;
2084 		ocs_sm_event_t evt;
2085 		uint32_t payload_size;
2086 	} els_cmd_list[] = {
2087 		{FC_ELS_CMD_PLOGI,	OCS_EVT_PLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2088 		{FC_ELS_CMD_FLOGI,	OCS_EVT_FLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2089 		{FC_ELS_CMD_LOGO,	OCS_EVT_LOGO_RCVD, 	sizeof(fc_acc_payload_t)},
2090 		{FC_ELS_CMD_RRQ,	OCS_EVT_RRQ_RCVD, 	sizeof(fc_acc_payload_t)},
2091 		{FC_ELS_CMD_PRLI, 	OCS_EVT_PRLI_RCVD, 	sizeof(fc_prli_payload_t)},
2092 		{FC_ELS_CMD_PRLO, 	OCS_EVT_PRLO_RCVD, 	sizeof(fc_prlo_payload_t)},
2093 		{FC_ELS_CMD_PDISC, 	OCS_EVT_PDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2094 		{FC_ELS_CMD_FDISC, 	OCS_EVT_FDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2095 		{FC_ELS_CMD_ADISC, 	OCS_EVT_ADISC_RCVD, 	sizeof(fc_adisc_payload_t)},
2096 		{FC_ELS_CMD_RSCN, 	OCS_EVT_RSCN_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2097 		{FC_ELS_CMD_SCR	, 	OCS_EVT_SCR_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2098 	};
2099 	ocs_t *ocs = node->ocs;
2100 	ocs_node_cb_t cbdata;
2101 	fc_header_t *hdr = seq->header->dma.virt;
2102 	uint8_t *buf = seq->payload->dma.virt;
2103 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2104 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2105 	uint32_t i;
2106 
2107 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2108 	cbdata.header = seq->header;
2109 	cbdata.payload = seq->payload;
2110 
2111 	/* find a matching event for the ELS command */
2112 	for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) {
2113 		if (els_cmd_list[i].cmd == buf[0]) {
2114 			evt = els_cmd_list[i].evt;
2115 			payload_size = els_cmd_list[i].payload_size;
2116 			break;
2117 		}
2118 	}
2119 
2120 	switch(evt) {
2121 	case OCS_EVT_FLOGI_RCVD:
2122 		ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2123 		break;
2124 	case OCS_EVT_FDISC_RCVD:
2125 		ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2126 		break;
2127 	case OCS_EVT_PLOGI_RCVD:
2128 		ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2129 		break;
2130 	default:
2131 		break;
2132 	}
2133 
2134 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2135 
2136 	if (cbdata.io != NULL) {
2137 		cbdata.io->hw_priv = seq->hw_priv;
2138 		/* if we're here, sequence initiative has been transferred */
2139 		cbdata.io->seq_init = 1;
2140 
2141 		ocs_node_post_event(node, evt, &cbdata);
2142 	} else {
2143 		node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2144 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2145 	}
2146 	ocs_hw_sequence_free(&ocs->hw, seq);
2147 	return 0;
2148 }
2149 
2150 /**
2151  * @ingroup node_common
2152  * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing).
2153  *
2154  * <h3 class="desc">Description</h3>
2155  * An ABTS frame is dispatched to the node state machine. This
2156  * function is used for both RQ Pair and sequence coalescing.
2157  *
2158  * @param node Node that originated the frame.
2159  * @param seq Header/payload sequence buffers
2160  *
2161  * @return Returns 0 if frame processed and RX buffers cleaned
2162  * up appropriately, -1 if frame not handled and RX buffers need
2163  * to be returned.
2164  */
2165 
2166 int32_t
2167 ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2168 {
2169 	ocs_t *ocs = node->ocs;
2170 	ocs_xport_t *xport = ocs->xport;
2171 	fc_header_t *hdr = seq->header->dma.virt;
2172 	uint16_t ox_id = ocs_be16toh(hdr->ox_id);
2173 	uint16_t rx_id = ocs_be16toh(hdr->rx_id);
2174 	ocs_node_cb_t cbdata;
2175 	int32_t rc = 0;
2176 
2177 	node->abort_cnt++;
2178 
2179 	/*
2180 	 * Check to see if the IO we want to abort is active, if it not active,
2181 	 * then we can send the BA_ACC using the send frame option
2182 	 */
2183 	if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) {
2184 		uint32_t send_frame_capable;
2185 
2186 		ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id);
2187 
2188 		/* If we have SEND_FRAME capability, then use it to send BA_ACC */
2189 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
2190 		if ((rc == 0) && send_frame_capable) {
2191 			rc = ocs_sframe_send_bls_acc(node, seq);
2192 			if (rc) {
2193 				ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n");
2194 			}
2195 			return rc;
2196 		}
2197 		/* continuing */
2198 	}
2199 
2200 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2201 	cbdata.header = seq->header;
2202 	cbdata.payload = seq->payload;
2203 
2204 	cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
2205 	if (cbdata.io != NULL) {
2206 		cbdata.io->hw_priv = seq->hw_priv;
2207 		/* If we got this far, SIT=1 */
2208 		cbdata.io->seq_init = 1;
2209 
2210 		/* fill out generic fields */
2211 		cbdata.io->ocs = ocs;
2212 		cbdata.io->node = node;
2213 		cbdata.io->cmd_tgt = TRUE;
2214 
2215 		ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata);
2216 	} else {
2217 		ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
2218 		node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2219 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2220 	}
2221 
2222 	/* ABTS processed, return RX buffer to the chip */
2223 	ocs_hw_sequence_free(&ocs->hw, seq);
2224 	return 0;
2225 }
2226 
2227 /**
2228  * @ingroup node_common
2229  * @brief Dispatch a CT frame.
2230  *
2231  * <h3 class="desc">Description</h3>
2232  * A CT frame is dispatched to the \c node state machine.
2233  * RQ Pair mode: this function is always called with a NULL hw
2234  * io.
2235  *
2236  * @param node Node that originated the frame.
2237  * @param seq header/payload sequence buffers
2238  *
2239  * @return Returns 0 if frame processed and RX buffers cleaned
2240  * up appropriately, -1 if frame not handled and RX buffers need
2241  * to be returned.
2242  */
2243 
2244 int32_t
2245 ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2246 {
2247 	ocs_t *ocs = node->ocs;
2248 	fc_header_t *hdr = seq->header->dma.virt;
2249 	fcct_iu_header_t *iu = seq->payload->dma.virt;
2250 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2251 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2252 	uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code);
2253 	ocs_node_cb_t cbdata;
2254 	uint32_t i;
2255 	struct {
2256 		uint32_t cmd;
2257 		ocs_sm_event_t evt;
2258 		uint32_t payload_size;
2259 	} ct_cmd_list[] = {
2260 		{FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100},
2261 		{FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100},
2262 		{FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100},
2263 		{FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100},
2264 		{FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100},
2265 		{FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100},
2266 		{FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256},
2267 		{FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256},
2268 		{FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100},
2269 		{FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100},
2270 		{FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100},
2271 		{FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100},
2272 		{FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100},
2273 		{FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100},
2274 		{FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100},
2275 	};
2276 
2277 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2278 	cbdata.header = seq->header;
2279 	cbdata.payload = seq->payload;
2280 
2281 	/* find a matching event for the ELS/GS command */
2282 	for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) {
2283 		if (ct_cmd_list[i].cmd == gscmd) {
2284 			evt = ct_cmd_list[i].evt;
2285 			payload_size = ct_cmd_list[i].payload_size;
2286 			break;
2287 		}
2288 	}
2289 
2290 	/* Allocate an IO and send a reject */
2291 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2292 	if (cbdata.io == NULL) {
2293 		node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2294 			fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id),
2295 			ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2296 		return -1;
2297 	}
2298 	cbdata.io->hw_priv = seq->hw_priv;
2299 	ocs_node_post_event(node, evt, &cbdata);
2300 
2301 	ocs_hw_sequence_free(&ocs->hw, seq);
2302 	return 0;
2303 }
2304 
2305 /**
2306  * @ingroup node_common
2307  * @brief Dispatch a FCP command frame when the node is not ready.
2308  *
2309  * <h3 class="desc">Description</h3>
2310  * A frame is dispatched to the \c node state machine.
2311  *
2312  * @param node Node that originated the frame.
2313  * @param seq header/payload sequence buffers
2314  *
2315  * @return Returns 0 if frame processed and RX buffers cleaned
2316  * up appropriately, -1 if frame not handled.
2317  */
2318 
2319 int32_t
2320 ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
2321 {
2322 	ocs_node_cb_t cbdata;
2323 	ocs_t *ocs = node->ocs;
2324 
2325 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2326 	cbdata.header = seq->header;
2327 	cbdata.payload = seq->payload;
2328 	ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata);
2329 	ocs_hw_sequence_free(&ocs->hw, seq);
2330 	return 0;
2331 }
2332 
2333 /**
2334  * @ingroup node_common
2335  * @brief Stub handler for non-ABTS BLS frames
2336  *
2337  * <h3 class="desc">Description</h3>
2338  * Log message and drop. Customer can plumb it to their back-end as needed
2339  *
2340  * @param node Node that originated the frame.
2341  * @param seq header/payload sequence buffers
2342  *
2343  * @return Returns 0
2344  */
2345 
2346 int32_t
2347 ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq)
2348 {
2349 	fc_header_t *hdr = seq->header->dma.virt;
2350 
2351 	node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
2352 		    ocs_htobe32(((uint32_t *)hdr)[0]),
2353 		    ocs_htobe32(((uint32_t *)hdr)[1]),
2354 		    ocs_htobe32(((uint32_t *)hdr)[2]),
2355 		    ocs_htobe32(((uint32_t *)hdr)[3]),
2356 		    ocs_htobe32(((uint32_t *)hdr)[4]),
2357 		    ocs_htobe32(((uint32_t *)hdr)[5]));
2358 
2359 	return -1;
2360 }
2361