xref: /freebsd/sys/dev/ocs_fc/ocs_node.c (revision b9f654b163bce26de79705e77b872427c9f2afa1)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * OCS driver remote node handler.  This file contains code that is shared
37  * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes.
38  */
39 
40 /*!
41  * @defgroup node_common Node common support
42  * @defgroup node_alloc Node allocation
43  */
44 
45 #include "ocs.h"
46 #include "ocs_els.h"
47 #include "ocs_device.h"
48 
49 #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
50 #define SCSI_ITT_SIZE(ocs)	((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
51 
52 #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
53 
54 #define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \
55 	io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
56 
57 void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node);
58 void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node);
59 int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node);
60 int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node);
61 int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
62 		void *arg_out, uint32_t arg_out_length, void *node);
63 static ocs_mgmt_functions_t node_mgmt_functions = {
64 	.get_list_handler	=	ocs_mgmt_node_list,
65 	.get_handler		=	ocs_mgmt_node_get,
66 	.get_all_handler	=	ocs_mgmt_node_get_all,
67 	.set_handler		=	ocs_mgmt_node_set,
68 	.exec_handler		=	ocs_mgmt_node_exec,
69 };
70 
71 
72 /**
73  * @ingroup node_common
74  * @brief Device node state machine wait for all ELS's to
75  *        complete
76  *
77  * Abort all ELS's for given node.
78  *
79  * @param node node for which ELS's will be aborted
80  */
81 
82 void
83 ocs_node_abort_all_els(ocs_node_t *node)
84 {
85 	ocs_io_t *els;
86 	ocs_io_t *els_next;
87 	ocs_node_cb_t cbdata = {0};
88 
89 	ocs_node_hold_frames(node);
90 	ocs_lock(&node->active_ios_lock);
91 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
92 			ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name);
93 			ocs_unlock(&node->active_ios_lock);
94 			cbdata.els = els;
95 			ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata);
96 			ocs_lock(&node->active_ios_lock);
97 		}
98 	ocs_unlock(&node->active_ios_lock);
99 }
100 
101 /**
102  * @ingroup node_common
103  * @brief Handle remote node events from HW
104  *
105  * Handle remote node events from HW.   Essentially the HW event is translated into
106  * a node state machine event that is posted to the affected node.
107  *
108  * @param arg pointer to ocs
109  * @param event HW event to proceoss
110  * @param data application specific data (pointer to the affected node)
111  *
112  * @return returns 0 for success, a negative error code value for failure.
113  */
114 int32_t
115 ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data)
116 {
117 	ocs_t *ocs = arg;
118 	ocs_sm_event_t	sm_event = OCS_EVT_LAST;
119 	ocs_remote_node_t *rnode = data;
120 	ocs_node_t *node = rnode->node;
121 
122 	switch (event) {
123 	case OCS_HW_NODE_ATTACH_OK:
124 		sm_event = OCS_EVT_NODE_ATTACH_OK;
125 		break;
126 
127 	case OCS_HW_NODE_ATTACH_FAIL:
128 		sm_event = OCS_EVT_NODE_ATTACH_FAIL;
129 		break;
130 
131 	case OCS_HW_NODE_FREE_OK:
132 		sm_event = OCS_EVT_NODE_FREE_OK;
133 		break;
134 
135 	case OCS_HW_NODE_FREE_FAIL:
136 		sm_event = OCS_EVT_NODE_FREE_FAIL;
137 		break;
138 
139 	default:
140 		ocs_log_test(ocs, "unhandled event %#x\n", event);
141 		return -1;
142 	}
143 
144 	/* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */
145 	if ((node->node_group != NULL) &&
146 			((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) {
147 		ocs_node_t *n = NULL;
148 		uint8_t		attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK;
149 
150 		ocs_sport_lock(node->sport);
151 		{
152 			ocs_list_foreach(&node->sport->node_list, n) {
153 				if (node == n) {
154 					continue;
155 				}
156 				ocs_node_lock(n);
157 					if ((!n->rnode.attached) && (node->node_group == n->node_group)) {
158 						n->rnode.attached = attach_ok;
159 						node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n",
160 								n->rnode.index, attach_ok ? "ok" : "fail");
161 						ocs_node_post_event(n, sm_event, NULL);
162 					}
163 				ocs_node_unlock(n);
164 			}
165 		}
166 
167 		ocs_sport_unlock(node->sport);
168 	}
169 
170 	ocs_node_post_event(node, sm_event, NULL);
171 
172 	return 0;
173 }
174 
175 /**
176  * @ingroup node_alloc
177  * @brief Find an FC node structure given the FC port ID
178  *
179  * @param sport the SPORT to search
180  * @param port_id FC port ID
181  *
182  * @return pointer to the object or NULL if not found
183  */
184 ocs_node_t *
185 ocs_node_find(ocs_sport_t *sport, uint32_t port_id)
186 {
187 	ocs_node_t *node;
188 
189 	ocs_assert(sport->lookup, NULL);
190 	ocs_sport_lock(sport);
191 		node = spv_get(sport->lookup, port_id);
192 	ocs_sport_unlock(sport);
193 	return node;
194 }
195 
196 /**
197  * @ingroup node_alloc
198  * @brief Find an FC node structure given the WWPN
199  *
200  * @param sport the SPORT to search
201  * @param wwpn the WWPN to search for (host endian)
202  *
203  * @return pointer to the object or NULL if not found
204  */
205 ocs_node_t *
206 ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn)
207 {
208 	ocs_node_t *node = NULL;;
209 
210 	ocs_assert(sport, NULL);
211 
212 	ocs_sport_lock(sport);
213 		ocs_list_foreach(&sport->node_list, node) {
214 			if (ocs_node_get_wwpn(node) == wwpn) {
215 				ocs_sport_unlock(sport);
216 				return node;
217 			}
218 		}
219 	ocs_sport_unlock(sport);
220 	return NULL;
221 }
222 
223 /**
224  * @ingroup node_alloc
225  * @brief allocate node object pool
226  *
227  * A pool of ocs_node_t objects is allocated.
228  *
229  * @param ocs pointer to driver instance context
230  * @param node_count count of nodes to allocate
231  *
232  * @return returns 0 for success, a negative error code value for failure.
233  */
234 
235 int32_t
236 ocs_node_create_pool(ocs_t *ocs, uint32_t node_count)
237 {
238 	ocs_xport_t *xport = ocs->xport;
239 	uint32_t i;
240 	ocs_node_t *node;
241 	uint32_t max_sge;
242 	uint32_t num_sgl;
243 	uint64_t max_xfer_size;
244 	int32_t rc;
245 
246 	xport->nodes_count = node_count;
247 
248 	xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT);
249 	if (xport->nodes == NULL) {
250 		ocs_log_err(ocs, "node ptrs allocation failed");
251 		return -1;
252 	}
253 
254 	if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) &&
255 	    0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) {
256 		max_xfer_size = (max_sge * (uint64_t)num_sgl);
257 	} else {
258 		max_xfer_size = 65536;
259 	}
260 
261 	if (max_xfer_size > 65536)
262 		max_xfer_size = 65536;
263 
264 	ocs_list_init(&xport->nodes_free_list, ocs_node_t, link);
265 
266 
267 	for (i = 0; i < node_count; i ++) {
268 		node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT);
269 		if (node == NULL) {
270 			ocs_log_err(ocs, "node allocation failed");
271 			goto error;
272 		}
273 
274 		/* Assign any persistent field values */
275 		node->instance_index = i;
276 		node->max_wr_xfer_size = max_xfer_size;
277 		node->rnode.indicator = UINT32_MAX;
278 
279 		rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16);
280 		if (rc) {
281 			ocs_free(ocs, node, sizeof(ocs_node_t));
282 			ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc);
283 			goto error;
284 		}
285 
286 		xport->nodes[i] = node;
287 		ocs_list_add_tail(&xport->nodes_free_list, node);
288 	}
289 	return 0;
290 
291 error:
292 	ocs_node_free_pool(ocs);
293 	return -1;
294 }
295 
296 /**
297  * @ingroup node_alloc
298  * @brief free node object pool
299  *
300  * The pool of previously allocated node objects is freed
301  *
302  * @param ocs pointer to driver instance context
303  *
304  * @return none
305  */
306 
307 void
308 ocs_node_free_pool(ocs_t *ocs)
309 {
310 	ocs_xport_t *xport = ocs->xport;
311 	ocs_node_t *node;
312 	uint32_t i;
313 
314 	if (!xport->nodes)
315 		return;
316 
317 	ocs_device_lock(ocs);
318 
319 	for (i = 0; i < xport->nodes_count; i ++) {
320 		node = xport->nodes[i];
321 		if (node) {
322 			/* free sparam_dma_buf */
323 			ocs_dma_free(ocs, &node->sparm_dma_buf);
324 			ocs_free(ocs, node, sizeof(ocs_node_t));
325 		}
326 		xport->nodes[i] = NULL;
327 	}
328 
329 	ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *)));
330 
331 	ocs_device_unlock(ocs);
332 }
333 
334 /**
335  * @ingroup node_alloc
336  * @brief return pointer to node object given instance index
337  *
338  * A pointer to the node object given by an instance index is returned.
339  *
340  * @param ocs pointer to driver instance context
341  * @param index instance index
342  *
343  * @return returns pointer to node object, or NULL
344  */
345 
346 ocs_node_t *
347 ocs_node_get_instance(ocs_t *ocs, uint32_t index)
348 {
349 	ocs_xport_t *xport = ocs->xport;
350 	ocs_node_t *node = NULL;
351 
352 	if (index >= (xport->nodes_count)) {
353 		ocs_log_test(ocs, "invalid index: %d\n", index);
354 		return NULL;
355 	}
356 	node = xport->nodes[index];
357 	return node->attached ? node : NULL;
358 }
359 
360 /**
361  * @ingroup node_alloc
362  * @brief Allocate an fc node structure and add to node list
363  *
364  * @param sport pointer to the SPORT from which this node is allocated
365  * @param port_id FC port ID of new node
366  * @param init Port is an inititiator (sent a plogi)
367  * @param targ Port is potentially a target
368  *
369  * @return pointer to the object or NULL if none available
370  */
371 
372 ocs_node_t *
373 ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ)
374 {
375 	int32_t rc;
376 	ocs_node_t *node = NULL;
377 	uint32_t instance_index;
378 	uint32_t max_wr_xfer_size;
379 	ocs_t *ocs = sport->ocs;
380 	ocs_xport_t *xport = ocs->xport;
381 	ocs_dma_t sparm_dma_buf;
382 
383 	ocs_assert(sport, NULL);
384 
385 	if (sport->shutting_down) {
386 		ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id);
387 		return NULL;
388 	}
389 
390 	ocs_device_lock(ocs);
391 		node = ocs_list_remove_head(&xport->nodes_free_list);
392 	ocs_device_unlock(ocs);
393 	if (node == NULL) {
394 		ocs_log_err(ocs, "node allocation failed %06x", port_id);
395 		return NULL;
396 	}
397 
398 	/* Save persistent values across memset zero */
399 	instance_index = node->instance_index;
400 	max_wr_xfer_size = node->max_wr_xfer_size;
401 	sparm_dma_buf = node->sparm_dma_buf;
402 
403 	ocs_memset(node, 0, sizeof(*node));
404 	node->instance_index = instance_index;
405 	node->max_wr_xfer_size = max_wr_xfer_size;
406 	node->sparm_dma_buf = sparm_dma_buf;
407 	node->rnode.indicator = UINT32_MAX;
408 
409 	node->sport = sport;
410 	ocs_sport_lock(sport);
411 
412 		node->ocs = ocs;
413 		node->init = init;
414 		node->targ = targ;
415 
416 		rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport);
417 		if (rc) {
418 			ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc);
419 			ocs_sport_unlock(sport);
420 
421 			/* Return back to pool. */
422 			ocs_device_lock(ocs);
423 			ocs_list_add_tail(&xport->nodes_free_list, node);
424 			ocs_device_unlock(ocs);
425 
426 			return NULL;
427 		}
428 		ocs_list_add_tail(&sport->node_list, node);
429 
430 		ocs_node_lock_init(node);
431 		ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index);
432 		ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link);
433 		ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index);
434 		ocs_list_init(&node->active_ios, ocs_io_t, link);
435 		ocs_list_init(&node->els_io_pend_list, ocs_io_t, link);
436 		ocs_list_init(&node->els_io_active_list, ocs_io_t, link);
437 		ocs_scsi_io_alloc_enable(node);
438 
439 		/* zero the service parameters */
440 		ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size);
441 
442 		node->rnode.node = node;
443 		node->sm.app = node;
444 		node->evtdepth = 0;
445 
446 		ocs_node_update_display_name(node);
447 
448 		spv_set(sport->lookup, port_id, node);
449 	ocs_sport_unlock(sport);
450 	node->mgmt_functions = &node_mgmt_functions;
451 
452 	return node;
453 }
454 
455 /**
456  * @ingroup node_alloc
457  * @brief free a node structure
458  *
459  * The node structure given by 'node' is free'd
460  *
461  * @param node the node to free
462  *
463  * @return returns 0 for success, a negative error code value for failure.
464  */
465 
466 int32_t
467 ocs_node_free(ocs_node_t *node)
468 {
469 	ocs_sport_t *sport;
470 	ocs_t *ocs;
471 	ocs_xport_t *xport;
472 	ocs_hw_rtn_e rc = 0;
473 	ocs_node_t *ns = NULL;
474 	int post_all_free = FALSE;
475 
476 	ocs_assert(node, -1);
477 	ocs_assert(node->sport, -1);
478 	ocs_assert(node->ocs, -1);
479 	sport = node->sport;
480 	ocs_assert(sport, -1);
481 	ocs = node->ocs;
482 	ocs_assert(ocs->xport, -1);
483 	xport = ocs->xport;
484 
485 	node_printf(node, "Free'd\n");
486 
487 	if(node->refound) {
488 		/*
489 		 * Save the name server node. We will send fake RSCN event at
490 		 * the end to handle ignored RSCN event during node deletion
491 		 */
492 		ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER);
493 	}
494 
495 	/* Remove from node list */
496 	ocs_sport_lock(sport);
497 		ocs_list_remove(&sport->node_list, node);
498 
499 		/* Free HW resources */
500 		if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) {
501 			ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc);
502 			rc = -1;
503 		}
504 
505 		/* if the gidpt_delay_timer is still running, then delete it */
506 		if (ocs_timer_pending(&node->gidpt_delay_timer)) {
507 			ocs_del_timer(&node->gidpt_delay_timer);
508 		}
509 
510 		if (node->fcp2device) {
511 			ocs_del_crn(node);
512 		}
513 
514 		/* remove entry from sparse vector list */
515 		if (sport->lookup == NULL) {
516 			ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n");
517 			ocs_sport_unlock(sport);
518 			return -1;
519 		}
520 
521 		spv_set(sport->lookup, node->rnode.fc_id, NULL);
522 
523 		/*
524 		 * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport,
525 		 * after the lock is released.  The sport may be free'd as a result of the event.
526 		 */
527 		if (ocs_list_empty(&sport->node_list)) {
528 			post_all_free = TRUE;
529 		}
530 
531 	ocs_sport_unlock(sport);
532 
533 	if (post_all_free) {
534 		ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
535 	}
536 
537 	node->sport = NULL;
538 	node->sm.current_state = NULL;
539 
540 	ocs_node_lock_free(node);
541 	ocs_lock_free(&node->pend_frames_lock);
542 	ocs_lock_free(&node->active_ios_lock);
543 
544 	/* return to free list */
545 	ocs_device_lock(ocs);
546 		ocs_list_add_tail(&xport->nodes_free_list, node);
547 	ocs_device_unlock(ocs);
548 
549 	if(ns != NULL) {
550 		/* sending fake RSCN event to name server node */
551 		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL);
552 	}
553 
554 	return rc;
555 }
556 
557 /**
558  * @brief free memory resources of a node object
559  *
560  * The node object's child objects are freed after which the
561  * node object is freed.
562  *
563  * @param node pointer to a node object
564  *
565  * @return none
566  */
567 
568 void
569 ocs_node_force_free(ocs_node_t *node)
570 {
571 	ocs_io_t *io;
572 	ocs_io_t *next;
573 	ocs_io_t *els;
574 	ocs_io_t *els_next;
575 
576 	/* shutdown sm processing */
577 	ocs_sm_disable(&node->sm);
578 	ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name));
579 	ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name));
580 
581 	/* Let the backend cleanup if needed */
582 	ocs_scsi_notify_node_force_free(node);
583 
584 	ocs_lock(&node->active_ios_lock);
585 		ocs_list_foreach_safe(&node->active_ios, io, next) {
586 			ocs_list_remove(&io->node->active_ios, io);
587 			ocs_io_free(node->ocs, io);
588 		}
589 	ocs_unlock(&node->active_ios_lock);
590 
591 	/* free all pending ELS IOs */
592 	ocs_lock(&node->active_ios_lock);
593 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
594 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
595 			ocs_list_remove(&node->els_io_pend_list, els);
596 
597 			ocs_io_free(node->ocs, els);
598 		}
599 	ocs_unlock(&node->active_ios_lock);
600 
601 	/* free all active ELS IOs */
602 	ocs_lock(&node->active_ios_lock);
603 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
604 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
605 			ocs_list_remove(&node->els_io_active_list, els);
606 
607 			ocs_io_free(node->ocs, els);
608 		}
609 	ocs_unlock(&node->active_ios_lock);
610 
611 	/* manually purge pending frames (if any) */
612 	ocs_node_purge_pending(node);
613 
614 	ocs_node_free(node);
615 }
616 
617 /**
618  * @ingroup node_common
619  * @brief Perform HW call to attach a remote node
620  *
621  * @param node pointer to node object
622  *
623  * @return 0 on success, non-zero otherwise
624  */
625 int32_t
626 ocs_node_attach(ocs_node_t *node)
627 {
628 	int32_t rc = 0;
629 	ocs_sport_t *sport = node->sport;
630 	ocs_domain_t *domain = sport->domain;
631 	ocs_t *ocs = node->ocs;
632 
633 	if (!domain->attached) {
634 		ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n");
635 		return -1;
636 	}
637 	/* Update node->wwpn/wwnn */
638 
639 	ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node));
640 	ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node));
641 
642 	if (ocs->enable_hlm) {
643 		ocs_node_group_init(node);
644 	}
645 
646 	ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4);
647 
648 	/* take lock to protect node->rnode.attached */
649 	ocs_node_lock(node);
650 		rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf);
651 		if (OCS_HW_RTN_IS_ERROR(rc)) {
652 			ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc);
653 		}
654 	ocs_node_unlock(node);
655 
656 	return rc;
657 }
658 
659 /**
660  * @ingroup node_common
661  * @brief Generate text for a node's fc_id
662  *
663  * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit
664  * hex value.
665  *
666  * @param fc_id fc_id
667  * @param buffer text buffer
668  * @param buffer_length text buffer length in bytes
669  *
670  * @return none
671  */
672 
673 void
674 ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length)
675 {
676 	switch (fc_id) {
677 	case FC_ADDR_FABRIC:
678 		ocs_snprintf(buffer, buffer_length, "fabric");
679 		break;
680 	case FC_ADDR_CONTROLLER:
681 		ocs_snprintf(buffer, buffer_length, "fabctl");
682 		break;
683 	case FC_ADDR_NAMESERVER:
684 		ocs_snprintf(buffer, buffer_length, "nserve");
685 		break;
686 	default:
687 		if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) {
688 			ocs_snprintf(buffer, buffer_length, "dctl%02x",
689 				FC_ADDR_GET_DOMAIN_CTRL(fc_id));
690 		} else {
691 			ocs_snprintf(buffer, buffer_length, "%06x", fc_id);
692 		}
693 		break;
694 	}
695 
696 }
697 
698 /**
699  * @brief update the node's display name
700  *
701  * The node's display name is updated, sometimes needed because the sport part
702  * is updated after the node is allocated.
703  *
704  * @param node pointer to the node object
705  *
706  * @return none
707  */
708 
709 void
710 ocs_node_update_display_name(ocs_node_t *node)
711 {
712 	uint32_t port_id = node->rnode.fc_id;
713 	ocs_sport_t *sport = node->sport;
714 	char portid_display[16];
715 
716 	ocs_assert(sport);
717 
718 	ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display));
719 
720 	ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display);
721 }
722 
723 /**
724  * @brief cleans up an XRI for the pending link services accept by aborting the
725  *         XRI if required.
726  *
727  * <h3 class="desc">Description</h3>
728  * This function is called when the LS accept is not sent.
729  *
730  * @param node Node for which should be cleaned up
731  */
732 
733 void
734 ocs_node_send_ls_io_cleanup(ocs_node_t *node)
735 {
736 	ocs_t *ocs = node->ocs;
737 
738 	if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) {
739 		ocs_assert(node->ls_acc_io);
740 		ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n",
741 			node->display_name, node->ls_acc_oxid);
742 
743 		node->ls_acc_io->hio = NULL;
744 		ocs_els_io_free(node->ls_acc_io);
745 		node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
746 		node->ls_acc_io = NULL;
747 	}
748 }
749 
750 /**
751  * @ingroup node_common
752  * @brief state: shutdown a node
753  *
754  * A node is shutdown,
755  *
756  * @param ctx remote node sm context
757  * @param evt event to process
758  * @param arg per event optional argument
759  *
760  * @return returns NULL
761  *
762  * @note
763  */
764 
765 void *
766 __ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
767 {
768 	int32_t rc;
769 	std_node_state_decl();
770 
771 	node_sm_trace();
772 
773 	switch(evt) {
774 	case OCS_EVT_ENTER: {
775 		ocs_node_hold_frames(node);
776 		ocs_assert(ocs_node_active_ios_empty(node), NULL);
777 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
778 
779 		/* by default, we will be freeing node after we unwind */
780 		node->req_free = 1;
781 
782 		switch (node->shutdown_reason) {
783 		case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO:
784 			/* sm: if shutdown reason is implicit logout / ocs_node_attach
785 			 * Node shutdown b/c of PLOGI received when node already
786 			 * logged in. We have PLOGI service parameters, so submit
787 			 * node attach; we won't be freeing this node
788 			 */
789 
790 			/* currently, only case for implicit logo is PLOGI recvd. Thus,
791 			 * node's ELS IO pending list won't be empty (PLOGI will be on it)
792 			 */
793 			ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
794 			node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n");
795 
796 			ocs_scsi_io_alloc_enable(node);
797 
798 			/* Re-attach node with the same HW node resources */
799 			node->req_free = 0;
800 			rc = ocs_node_attach(node);
801 			ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
802 			if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
803 				ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
804 			}
805 			break;
806 		case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: {
807 			int8_t pend_frames_empty;
808 
809 			/* cleanup any pending LS_ACC ELSs */
810 			ocs_node_send_ls_io_cleanup(node);
811 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
812 
813 			ocs_lock(&node->pend_frames_lock);
814 				pend_frames_empty = ocs_list_empty(&node->pend_frames);
815 			ocs_unlock(&node->pend_frames_lock);
816 
817 			/* there are two scenarios where we want to keep this node alive:
818 			 * 1. there are pending frames that need to be processed or
819 			 * 2. we're an initiator and the remote node is a target and we
820 			 *    need to re-authenticate
821 			 */
822 			node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n",
823 				    !pend_frames_empty, node->sport->enable_ini, node->targ);
824 
825 			if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) {
826 				uint8_t send_plogi = FALSE;
827 				if (node->sport->enable_ini && node->targ) {
828 					/* we're an initiator and node shutting down is a target; we'll
829 					 * need to re-authenticate in initial state
830 					 */
831 					send_plogi = TRUE;
832 				}
833 
834 				/* transition to __ocs_d_init (will retain HW node resources) */
835 				ocs_scsi_io_alloc_enable(node);
836 				node->req_free = 0;
837 
838 				/* either pending frames exist, or we're re-authenticating with PLOGI
839 				 * (or both); in either case, return to initial state
840 				 */
841 				ocs_node_init_device(node, send_plogi);
842 
843 			}
844 			/* else: let node shutdown occur */
845 			break;
846 		}
847 		case OCS_NODE_SHUTDOWN_DEFAULT:
848 		default:
849 			/* shutdown due to link down, node going away (xport event) or
850 			 * sport shutdown, purge pending and proceed to cleanup node
851 			 */
852 
853 			/* cleanup any pending LS_ACC ELSs */
854 			ocs_node_send_ls_io_cleanup(node);
855 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
856 
857 			node_printf(node, "Shutdown reason: default, purge pending\n");
858 			ocs_node_purge_pending(node);
859 			break;
860 		}
861 
862 		break;
863 	}
864 	case OCS_EVT_EXIT:
865 		ocs_node_accept_frames(node);
866 		break;
867 
868 	default:
869 		__ocs_node_common(__func__, ctx, evt, arg);
870 		return NULL;
871 	}
872 
873 	return NULL;
874 }
875 
876 /**
877  * @ingroup common_node
878  * @brief Checks to see if ELS's have been quiesced
879  *
880  * Check if ELS's have been quiesced. If so, transition to the
881  * next state in the shutdown process.
882  *
883  * @param node Node for which ELS's are checked
884  *
885  * @return Returns 1 if ELS's have been quiesced, 0 otherwise.
886  */
887 static int
888 ocs_node_check_els_quiesced(ocs_node_t *node)
889 {
890 	ocs_assert(node, -1);
891 
892 	/* check to see if ELS requests, completions are quiesced */
893 	if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) &&
894 	    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
895 		if (!node->attached) {
896 			/* hw node detach already completed, proceed */
897 			node_printf(node, "HW node not attached\n");
898 			ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
899 		} else {
900 			/* hw node detach hasn't completed, transition and wait */
901 			node_printf(node, "HW node still attached\n");
902 			ocs_node_transition(node, __ocs_node_wait_node_free, NULL);
903 		}
904 		return 1;
905 	}
906 	return 0;
907 }
908 
909 /**
910  * @ingroup common_node
911  * @brief Initiate node IO cleanup.
912  *
913  * Note: this function must be called with a non-attached node
914  * or a node for which the node detach (ocs_hw_node_detach())
915  * has already been initiated.
916  *
917  * @param node Node for which shutdown is initiated
918  *
919  * @return Returns None.
920  */
921 
922 void
923 ocs_node_initiate_cleanup(ocs_node_t *node)
924 {
925 	ocs_io_t *els;
926 	ocs_io_t *els_next;
927 	ocs_t *ocs;
928 	ocs_assert(node);
929 	ocs = node->ocs;
930 
931 	/* first cleanup ELS's that are pending (not yet active) */
932 	ocs_lock(&node->active_ios_lock);
933 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
934 
935 			/* skip the ELS IO for which a response will be sent after shutdown */
936 			if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) &&
937 			    (els == node->ls_acc_io)) {
938 				continue;
939 			}
940 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
941                         node_printf(node, "Freeing pending els %s\n", els->display_name);
942 			ocs_list_remove(&node->els_io_pend_list, els);
943 
944 			ocs_io_free(node->ocs, els);
945 		}
946 	ocs_unlock(&node->active_ios_lock);
947 
948 	if (node->ls_acc_io && node->ls_acc_io->hio != NULL) {
949 		/*
950 		 * if there's an IO that will result in an LS_ACC after
951 		 * shutdown and its HW IO is non-NULL, it better be an
952 		 * implicit logout in vanilla sequence coalescing. In this
953 		 * case, force the LS_ACC to go out on another XRI (hio)
954 		 * since the previous will have been aborted by the UNREG_RPI
955 		 */
956 		ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO);
957 		ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI);
958 		node_printf(node, "invalidating ls_acc_io due to implicit logo\n");
959 
960 		/* No need to abort because the unreg_rpi takes care of it, just free */
961 		ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio);
962 
963 		/* NULL out hio to force the LS_ACC to grab a new XRI */
964 		node->ls_acc_io->hio = NULL;
965 	}
966 
967 	/*
968 	 * if ELS's have already been quiesced, will move to next state
969 	 * if ELS's have not been quiesced, abort them
970 	 */
971 	if (ocs_node_check_els_quiesced(node) == 0) {
972 		/*
973 		 * Abort all ELS's since ELS's won't be aborted by HW
974 		 * node free.
975 		 */
976 		ocs_node_abort_all_els(node);
977 		ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL);
978 	}
979 }
980 
981 /**
982  * @ingroup node_common
983  * @brief Node state machine: Wait for all ELSs to complete.
984  *
985  * <h3 class="desc">Description</h3>
986  * State waits for all ELSs to complete after aborting all
987  * outstanding .
988  *
989  * @param ctx Remote node state machine context.
990  * @param evt Event to process.
991  * @param arg Per event optional argument.
992  *
993  * @return Returns NULL.
994  */
995 
996 void *
997 __ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
998 {
999 	uint8_t check_quiesce = FALSE;
1000 	std_node_state_decl();
1001 
1002 	node_sm_trace();
1003 
1004 	switch(evt) {
1005 
1006 	case OCS_EVT_ENTER: {
1007 		ocs_node_hold_frames(node);
1008 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1009 			node_printf(node, "All ELS IOs complete\n");
1010 			check_quiesce = TRUE;
1011 		}
1012 		break;
1013 	}
1014 	case OCS_EVT_EXIT:
1015 		ocs_node_accept_frames(node);
1016 		break;
1017 
1018 	case OCS_EVT_SRRS_ELS_REQ_OK:
1019 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1020 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1021 	case OCS_EVT_ELS_REQ_ABORTED:
1022 		ocs_assert(node->els_req_cnt, NULL);
1023 		node->els_req_cnt--;
1024 		check_quiesce = TRUE;
1025 		break;
1026 
1027 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1028 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1029 		ocs_assert(node->els_cmpl_cnt, NULL);
1030 		node->els_cmpl_cnt--;
1031 		check_quiesce = TRUE;
1032 		break;
1033 
1034 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1035 		/* all ELS IO's complete */
1036 		node_printf(node, "All ELS IOs complete\n");
1037 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
1038 		check_quiesce = TRUE;
1039 		break;
1040 
1041 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1042 		break;
1043 
1044 	case OCS_EVT_DOMAIN_ATTACH_OK:
1045 		/* don't care about domain_attach_ok */
1046 		break;
1047 
1048 	/* ignore shutdown events as we're already in shutdown path */
1049 	case OCS_EVT_SHUTDOWN:
1050 		/* have default shutdown event take precedence */
1051 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1052 		/* fall through */
1053 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1054 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1055 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1056 		break;
1057 
1058 	default:
1059 		__ocs_node_common(__func__, ctx, evt, arg);
1060 		return NULL;
1061 	}
1062 
1063 	if (check_quiesce) {
1064 		ocs_node_check_els_quiesced(node);
1065 	}
1066 	return NULL;
1067 }
1068 
1069 /**
1070  * @ingroup node_command
1071  * @brief Node state machine: Wait for a HW node free event to
1072  * complete.
1073  *
1074  * <h3 class="desc">Description</h3>
1075  * State waits for the node free event to be received from the HW.
1076  *
1077  * @param ctx Remote node state machine context.
1078  * @param evt Event to process.
1079  * @param arg Per event optional argument.
1080  *
1081  * @return Returns NULL.
1082  */
1083 
1084 void *
1085 __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1086 {
1087 	std_node_state_decl();
1088 
1089 	node_sm_trace();
1090 
1091 	switch(evt) {
1092 
1093 	case OCS_EVT_ENTER:
1094 		ocs_node_hold_frames(node);
1095 		break;
1096 
1097 	case OCS_EVT_EXIT:
1098 		ocs_node_accept_frames(node);
1099 		break;
1100 
1101 	case OCS_EVT_NODE_FREE_OK:
1102 		/* node is officially no longer attached */
1103 		node->attached = FALSE;
1104 		ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
1105 		break;
1106 
1107 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1108 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1109 		/* As IOs and ELS IO's complete we expect to get these events */
1110 		break;
1111 
1112 	case OCS_EVT_DOMAIN_ATTACH_OK:
1113 		/* don't care about domain_attach_ok */
1114 		break;
1115 
1116 	/* ignore shutdown events as we're already in shutdown path */
1117 	case OCS_EVT_SHUTDOWN:
1118 		/* have default shutdown event take precedence */
1119 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1120 		/* Fall through */
1121 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1122 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1123 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1124 		break;
1125 	default:
1126 		__ocs_node_common(__func__, ctx, evt, arg);
1127 		return NULL;
1128 	}
1129 
1130 	return NULL;
1131 }
1132 
1133 /**
1134  * @ingroup node_common
1135  * @brief state: initiate node shutdown
1136  *
1137  * State is entered when a node receives a shutdown event, and it's waiting
1138  * for all the active IOs and ELS IOs associated with the node to complete.
1139  *
1140  * @param ctx remote node sm context
1141  * @param evt event to process
1142  * @param arg per event optional argument
1143  *
1144  * @return returns NULL
1145  */
1146 
1147 void *
1148 __ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1149 {
1150 	ocs_io_t *io;
1151 	ocs_io_t *next;
1152 	std_node_state_decl();
1153 
1154 	node_sm_trace();
1155 
1156 	switch(evt) {
1157 	case OCS_EVT_ENTER:
1158 		ocs_node_hold_frames(node);
1159 
1160 		/* first check to see if no ELS IOs are outstanding */
1161 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1162 			/* If there are any active IOS, Free them. */
1163 			if (!ocs_node_active_ios_empty(node)) {
1164 				ocs_lock(&node->active_ios_lock);
1165 				ocs_list_foreach_safe(&node->active_ios, io, next) {
1166 					ocs_list_remove(&io->node->active_ios, io);
1167 					ocs_io_free(node->ocs, io);
1168 				}
1169 				ocs_unlock(&node->active_ios_lock);
1170 			}
1171 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1172 		}
1173 		break;
1174 
1175 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1176 	case OCS_EVT_ALL_CHILD_NODES_FREE: {
1177 		if (ocs_node_active_ios_empty(node) &&
1178 		    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1179 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1180 		}
1181 		break;
1182 	}
1183 
1184 	case OCS_EVT_EXIT:
1185 		ocs_node_accept_frames(node);
1186 		break;
1187 
1188 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1189 		/* Can happen as ELS IO IO's complete */
1190 		ocs_assert(node->els_req_cnt, NULL);
1191 		node->els_req_cnt--;
1192 		break;
1193 
1194 	/* ignore shutdown events as we're already in shutdown path */
1195 	case OCS_EVT_SHUTDOWN:
1196 		/* have default shutdown event take precedence */
1197 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1198 		/* fall through */
1199 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1200 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1201 		ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt));
1202 		break;
1203 	case OCS_EVT_DOMAIN_ATTACH_OK:
1204 		/* don't care about domain_attach_ok */
1205 		break;
1206 	default:
1207 		__ocs_node_common(__func__, ctx, evt, arg);
1208 		return NULL;
1209 	}
1210 
1211 	return NULL;
1212 }
1213 
1214 /**
1215  * @ingroup node_common
1216  * @brief state: common node event handler
1217  *
1218  * Handle common/shared node events
1219  *
1220  * @param funcname calling function's name
1221  * @param ctx remote node sm context
1222  * @param evt event to process
1223  * @param arg per event optional argument
1224  *
1225  * @return returns NULL
1226  */
1227 
1228 void *
1229 __ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1230 {
1231 	ocs_node_t *node = NULL;
1232 	ocs_t *ocs = NULL;
1233 	ocs_node_cb_t *cbdata = arg;
1234 	ocs_assert(ctx, NULL);
1235 	ocs_assert(ctx->app, NULL);
1236 	node = ctx->app;
1237 	ocs_assert(node->ocs, NULL);
1238 	ocs = node->ocs;
1239 
1240 	switch(evt) {
1241 	case OCS_EVT_ENTER:
1242 	case OCS_EVT_REENTER:
1243 	case OCS_EVT_EXIT:
1244 	case OCS_EVT_SPORT_TOPOLOGY_NOTIFY:
1245 	case OCS_EVT_NODE_MISSING:
1246 	case OCS_EVT_FCP_CMD_RCVD:
1247 		break;
1248 
1249 	case OCS_EVT_NODE_REFOUND:
1250 		node->refound = 1;
1251 		break;
1252 
1253 	/* node->attached must be set appropriately for all node attach/detach events */
1254 	case OCS_EVT_NODE_ATTACH_OK:
1255 		node->attached = TRUE;
1256 		break;
1257 
1258 	case OCS_EVT_NODE_FREE_OK:
1259 	case OCS_EVT_NODE_ATTACH_FAIL:
1260 		node->attached = FALSE;
1261 		break;
1262 
1263 	/* handle any ELS completions that other states either didn't care about
1264 	 * or forgot about
1265 	 */
1266 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1267 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1268 		ocs_assert(node->els_cmpl_cnt, NULL);
1269 		node->els_cmpl_cnt--;
1270 		break;
1271 
1272 	/* handle any ELS request completions that other states either didn't care about
1273 	 * or forgot about
1274 	 */
1275 	case OCS_EVT_SRRS_ELS_REQ_OK:
1276 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1277 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1278 	case OCS_EVT_ELS_REQ_ABORTED:
1279 		ocs_assert(node->els_req_cnt, NULL);
1280 		node->els_req_cnt--;
1281 		break;
1282 
1283 	case OCS_EVT_ELS_RCVD: {
1284 		fc_header_t *hdr = cbdata->header->dma.virt;
1285 
1286 		/* Unsupported ELS was received, send LS_RJT, command not supported */
1287 		ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
1288 			      node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]);
1289 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1290 			FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0,
1291 			NULL, NULL);
1292 		break;
1293 	}
1294 
1295 	case OCS_EVT_PLOGI_RCVD:
1296 	case OCS_EVT_FLOGI_RCVD:
1297 	case OCS_EVT_LOGO_RCVD:
1298 	case OCS_EVT_PRLI_RCVD:
1299 	case OCS_EVT_PRLO_RCVD:
1300 	case OCS_EVT_PDISC_RCVD:
1301 	case OCS_EVT_FDISC_RCVD:
1302 	case OCS_EVT_ADISC_RCVD:
1303 	case OCS_EVT_RSCN_RCVD:
1304 	case OCS_EVT_SCR_RCVD: {
1305 		fc_header_t *hdr = cbdata->header->dma.virt;
1306 		/* sm: / send ELS_RJT */
1307 		ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n",
1308 			      node->display_name, funcname, ocs_sm_event_name(evt));
1309 		/* if we didn't catch this in a state, send generic LS_RJT */
1310 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1311 			FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0,
1312 			NULL, NULL);
1313 
1314 		break;
1315 	}
1316 	case OCS_EVT_GID_PT_RCVD:
1317 	case OCS_EVT_RFT_ID_RCVD:
1318 	case OCS_EVT_RFF_ID_RCVD: {
1319 		fc_header_t *hdr = cbdata->header->dma.virt;
1320 		ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n",
1321 			      node->display_name, funcname, ocs_sm_event_name(evt));
1322 		ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0);
1323 		break;
1324 	}
1325 
1326 	case OCS_EVT_ABTS_RCVD: {
1327 		fc_header_t *hdr = cbdata->header->dma.virt;
1328 		ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n",
1329 			      node->display_name, funcname, ocs_sm_event_name(evt));
1330 
1331 		/* sm: send BA_ACC */
1332 		ocs_bls_send_acc_hdr(cbdata->io, hdr);
1333 		break;
1334 	}
1335 
1336 	default:
1337 		ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
1338 			ocs_sm_event_name(evt));
1339 		break;
1340 	}
1341 	return NULL;
1342 }
1343 
1344 
1345 /**
1346  * @ingroup node_common
1347  * @brief save node service parameters
1348  *
1349  * Service parameters are copyed into the node structure
1350  *
1351  * @param node pointer to node structure
1352  * @param payload pointer to service parameters to save
1353  *
1354  * @return none
1355  */
1356 
1357 void
1358 ocs_node_save_sparms(ocs_node_t *node, void *payload)
1359 {
1360 	ocs_memcpy(node->service_params, payload, sizeof(node->service_params));
1361 }
1362 
1363 /**
1364  * @ingroup node_common
1365  * @brief Post event to node state machine context
1366  *
1367  * This is used by the node state machine code to post events to the nodes.  Upon
1368  * completion of the event posting, if the nesting depth is zero and we're not holding
1369  * inbound frames, then the pending frames are processed.
1370  *
1371  * @param node pointer to node
1372  * @param evt event to post
1373  * @param arg event posting argument
1374  *
1375  * @return none
1376  */
1377 
1378 void
1379 ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg)
1380 {
1381 	int free_node = FALSE;
1382 	ocs_assert(node);
1383 
1384 	ocs_node_lock(node);
1385 		node->evtdepth ++;
1386 
1387 		ocs_sm_post_event(&node->sm, evt, arg);
1388 
1389 		/* If our event call depth is one and we're not holding frames
1390 		 * then we can dispatch any pending frames.   We don't want to allow
1391 		 * the ocs_process_node_pending() call to recurse.
1392 		 */
1393 		if (!node->hold_frames && (node->evtdepth == 1)) {
1394 			ocs_process_node_pending(node);
1395 		}
1396 		node->evtdepth --;
1397 
1398 		/* Free the node object if so requested, and we're at an event
1399 		 * call depth of zero
1400 		 */
1401 		if ((node->evtdepth == 0) && node->req_free) {
1402 			free_node = TRUE;
1403 		}
1404 	ocs_node_unlock(node);
1405 
1406 	if (free_node) {
1407 		ocs_node_free(node);
1408 	}
1409 
1410 	return;
1411 }
1412 
1413 /**
1414  * @ingroup node_common
1415  * @brief transition state of a node
1416  *
1417  * The node's state is transitioned to the requested state.  Entry/Exit
1418  * events are posted as needed.
1419  *
1420  * @param node pointer to node
1421  * @param state state to transition to
1422  * @param data transition data
1423  *
1424  * @return none
1425  */
1426 
1427 void
1428 ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data)
1429 {
1430 	ocs_sm_ctx_t *ctx = &node->sm;
1431 
1432 	ocs_node_lock(node);
1433 		if (ctx->current_state == state) {
1434 			ocs_node_post_event(node, OCS_EVT_REENTER, data);
1435 		} else {
1436 			ocs_node_post_event(node, OCS_EVT_EXIT, data);
1437 			ctx->current_state = state;
1438 			ocs_node_post_event(node, OCS_EVT_ENTER, data);
1439 		}
1440 	ocs_node_unlock(node);
1441 }
1442 
1443 /**
1444  * @ingroup node_common
1445  * @brief build EUI formatted WWN
1446  *
1447  * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC
1448  * use the eui. format, an ascii string such as: "eui.10000000C9A19501"
1449  *
1450  * @param buffer buffer to place formatted name into
1451  * @param buffer_len length in bytes of the buffer
1452  * @param eui_name cpu endian 64 bit WWN value
1453  *
1454  * @return none
1455  */
1456 
1457 void
1458 ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name)
1459 {
1460 	ocs_memset(buffer, 0, buffer_len);
1461 
1462 	ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name);
1463 }
1464 
1465 /**
1466  * @ingroup node_common
1467  * @brief return nodes' WWPN as a uint64_t
1468  *
1469  * The WWPN is computed from service parameters and returned as a uint64_t
1470  *
1471  * @param node pointer to node structure
1472  *
1473  * @return WWPN
1474  *
1475  */
1476 
1477 uint64_t
1478 ocs_node_get_wwpn(ocs_node_t *node)
1479 {
1480 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1481 
1482 	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1483 }
1484 
1485 /**
1486  * @ingroup node_common
1487  * @brief return nodes' WWNN as a uint64_t
1488  *
1489  * The WWNN is computed from service parameters and returned as a uint64_t
1490  *
1491  * @param node pointer to node structure
1492  *
1493  * @return WWNN
1494  *
1495  */
1496 
1497 uint64_t
1498 ocs_node_get_wwnn(ocs_node_t *node)
1499 {
1500 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1501 
1502 	return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo)));
1503 }
1504 
1505 /**
1506  * @brief Generate node ddump data
1507  *
1508  * Generates the node ddumpdata
1509  *
1510  * @param textbuf pointer to text buffer
1511  * @param node pointer to node context
1512  *
1513  * @return Returns 0 on success, or a negative value on failure.
1514  */
1515 
1516 int
1517 ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node)
1518 {
1519 	ocs_io_t *io;
1520 	ocs_io_t *els;
1521 	int retval = 0;
1522 
1523 	ocs_ddump_section(textbuf, "node", node->instance_index);
1524 	ocs_ddump_value(textbuf, "display_name", "%s", node->display_name);
1525 	ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name);
1526 	ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name);
1527 	ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt));
1528 	ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt));
1529 
1530 	ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator);
1531 	ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id);
1532 	ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached);
1533 
1534 	ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames);
1535 	ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled);
1536 	ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason);
1537 	ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc);
1538 	ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did);
1539 	ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid);
1540 	ocs_ddump_value(textbuf, "req_free", "%d", node->req_free);
1541 	ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt);
1542 	ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt);
1543 
1544 	ocs_ddump_value(textbuf, "targ", "%d", node->targ);
1545 	ocs_ddump_value(textbuf, "init", "%d", node->init);
1546 	ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn);
1547 	ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn);
1548 	ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1549 	ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count);
1550 	ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt);
1551 
1552 	ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4);
1553 
1554 	ocs_lock(&node->pend_frames_lock);
1555 		if (!ocs_list_empty(&node->pend_frames)) {
1556 			ocs_hw_sequence_t *frame;
1557 			ocs_ddump_section(textbuf, "pending_frames", 0);
1558 			ocs_list_foreach(&node->pend_frames, frame) {
1559 				fc_header_t *hdr;
1560 				char buf[128];
1561 
1562 				hdr = frame->header->dma.virt;
1563 				ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu",
1564 				 hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1565 				 frame->payload->dma.len);
1566 				ocs_ddump_value(textbuf, "frame", "%s", buf);
1567 			}
1568 			ocs_ddump_endsection(textbuf, "pending_frames", 0);
1569 		}
1570 	ocs_unlock(&node->pend_frames_lock);
1571 
1572 	ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1573 	ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1574 
1575 	ocs_lock(&node->active_ios_lock);
1576 		ocs_ddump_section(textbuf, "active_ios", 0);
1577 		ocs_list_foreach(&node->active_ios, io) {
1578 			ocs_ddump_io(textbuf, io);
1579 		}
1580 		ocs_ddump_endsection(textbuf, "active_ios", 0);
1581 
1582 		ocs_ddump_section(textbuf, "els_io_pend_list", 0);
1583 		ocs_list_foreach(&node->els_io_pend_list, els) {
1584 			ocs_ddump_els(textbuf, els);
1585 		}
1586 		ocs_ddump_endsection(textbuf, "els_io_pend_list", 0);
1587 
1588 		ocs_ddump_section(textbuf, "els_io_active_list", 0);
1589 		ocs_list_foreach(&node->els_io_active_list, els) {
1590 			ocs_ddump_els(textbuf, els);
1591 		}
1592 		ocs_ddump_endsection(textbuf, "els_io_active_list", 0);
1593 	ocs_unlock(&node->active_ios_lock);
1594 
1595 	ocs_ddump_endsection(textbuf, "node", node->instance_index);
1596 
1597 	return retval;
1598 }
1599 
1600 /**
1601  * @brief check ELS request completion
1602  *
1603  * Check ELS request completion event to make sure it's for the
1604  * ELS request we expect. If not, invoke given common event
1605  * handler and return an error.
1606  *
1607  * @param ctx state machine context
1608  * @param evt ELS request event
1609  * @param arg event argument
1610  * @param cmd ELS command expected
1611  * @param node_common_func common event handler to call if ELS
1612  *      		   doesn't match
1613  * @param funcname function name that called this
1614  *
1615  * @return zero if ELS command matches, -1 otherwise
1616  */
1617 int32_t
1618 node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1619 {
1620 	ocs_node_t *node = NULL;
1621 	ocs_t *ocs = NULL;
1622 	ocs_node_cb_t *cbdata = arg;
1623 	fc_els_gen_t *els_gen = NULL;
1624 	ocs_assert(ctx, -1);
1625 	node = ctx->app;
1626 	ocs_assert(node, -1);
1627 	ocs = node->ocs;
1628 	ocs_assert(ocs, -1);
1629 	cbdata = arg;
1630 	ocs_assert(cbdata, -1);
1631 	ocs_assert(cbdata->els, -1);
1632 	els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt;
1633 	ocs_assert(els_gen, -1);
1634 
1635 	if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) {
1636 		if (cbdata->els->hio_type != OCS_HW_ELS_REQ) {
1637 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n",
1638 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1639 		} else {
1640 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n",
1641 				node->display_name, funcname, cmd, els_gen->command_code);
1642 		}
1643 		/* send event to common handler */
1644 		node_common_func(funcname, ctx, evt, arg);
1645 		return -1;
1646 	}
1647 	return 0;
1648 }
1649 
1650 /**
1651  * @brief check NS request completion
1652  *
1653  * Check ELS request completion event to make sure it's for the
1654  * nameserver request we expect. If not, invoke given common
1655  * event handler and return an error.
1656  *
1657  * @param ctx state machine context
1658  * @param evt ELS request event
1659  * @param arg event argument
1660  * @param cmd nameserver command expected
1661  * @param node_common_func common event handler to call if
1662  *      		   nameserver cmd doesn't match
1663  * @param funcname function name that called this
1664  *
1665  * @return zero if NS command matches, -1 otherwise
1666  */
1667 int32_t
1668 node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1669 {
1670 	ocs_node_t *node = NULL;
1671 	ocs_t *ocs = NULL;
1672 	ocs_node_cb_t *cbdata = arg;
1673 	fcct_iu_header_t *fcct = NULL;
1674 	ocs_assert(ctx, -1);
1675 	node = ctx->app;
1676 	ocs_assert(node, -1);
1677 	ocs = node->ocs;
1678 	ocs_assert(ocs, -1);
1679 	cbdata = arg;
1680 	ocs_assert(cbdata, -1);
1681 	ocs_assert(cbdata->els, -1);
1682 	fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt;
1683 	ocs_assert(fcct, -1);
1684 
1685 	if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) {
1686 		if (cbdata->els->hio_type != OCS_HW_FC_CT) {
1687 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n",
1688 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1689 		} else {
1690 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n",
1691 				node->display_name, funcname, cmd, fcct->cmd_rsp_code);
1692 		}
1693 		/* send event to common handler */
1694 		node_common_func(funcname, ctx, evt, arg);
1695 		return -1;
1696 	}
1697 	return 0;
1698 }
1699 
1700 
1701 void
1702 ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object)
1703 {
1704 	ocs_io_t *io;
1705 	ocs_node_t *node = (ocs_node_t *)object;
1706 
1707 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1708 
1709 	/* Readonly values */
1710 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1711 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1712 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1713 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached");
1714 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames");
1715 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down");
1716 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free");
1717 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id");
1718 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use");
1719 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt");
1720 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ");
1721 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init");
1722 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1723 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1724 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames");
1725 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count");
1726 
1727 	/* Actions */
1728 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1729 
1730 	ocs_lock(&node->active_ios_lock);
1731 	ocs_list_foreach(&node->active_ios, io) {
1732 		if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) {
1733 			io->mgmt_functions->get_list_handler(textbuf, io);
1734 		}
1735 	}
1736 	ocs_unlock(&node->active_ios_lock);
1737 
1738 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1739 }
1740 
1741 int
1742 ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1743 {
1744 	ocs_io_t *io;
1745 	ocs_node_t *node = (ocs_node_t *)object;
1746 	char qualifier[80];
1747 	int retval = -1;
1748 
1749 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1750 
1751 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1752 
1753 	/* If it doesn't start with my qualifier I don't know what to do with it */
1754 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1755 		char *unqualified_name = name + strlen(qualifier) +1;
1756 
1757 		/* See if it's a value I can supply */
1758 		if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1759 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1760 			retval = 0;
1761 		} else if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1762 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1763 			retval = 0;
1764 		} else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1765 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1766 			retval = 0;
1767 		} else if (ocs_strcmp(unqualified_name, "attached") == 0) {
1768 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1769 			retval = 0;
1770 		} else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) {
1771 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1772 			retval = 0;
1773 		} else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) {
1774 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1775 			retval = 0;
1776 		} else if (ocs_strcmp(unqualified_name, "req_free") == 0) {
1777 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1778 			retval = 0;
1779 		} else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) {
1780 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1781 			retval = 0;
1782 		} else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) {
1783 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1784 			retval = 0;
1785 		} else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) {
1786 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1787 			retval = 0;
1788 		} else if (ocs_strcmp(unqualified_name, "targ") == 0) {
1789 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1790 			retval = 0;
1791 		} else if (ocs_strcmp(unqualified_name, "init") == 0) {
1792 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1793 			retval = 0;
1794 		} else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1795 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1796 			retval = 0;
1797 		} else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1798 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1799 			retval = 0;
1800 		} else if (ocs_strcmp(unqualified_name, "current_state") == 0) {
1801 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1802 			retval = 0;
1803 		} else if (ocs_strcmp(unqualified_name, "login_state") == 0) {
1804 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1805 			retval = 0;
1806 		} else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) {
1807 			ocs_hw_sequence_t *frame;
1808 			ocs_lock(&node->pend_frames_lock);
1809 				ocs_list_foreach(&node->pend_frames, frame) {
1810 					fc_header_t *hdr;
1811 					char buf[128];
1812 
1813 					hdr = frame->header->dma.virt;
1814 					ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1815 						 ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1816 						 frame->payload->dma.len);
1817 					ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1818 				}
1819 			ocs_unlock(&node->pend_frames_lock);
1820 			retval = 0;
1821 		} else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) {
1822 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1823 			retval = 0;
1824 		} else {
1825 			/* If I didn't know the value of this status pass the request to each of my children */
1826 			ocs_lock(&node->active_ios_lock);
1827 				ocs_list_foreach(&node->active_ios, io) {
1828 					if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) {
1829 						retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io);
1830 					}
1831 
1832 					if (retval == 0) {
1833 						break;
1834 					}
1835 				}
1836 			ocs_unlock(&node->active_ios_lock);
1837 		}
1838 	}
1839 
1840 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1841 
1842 	return retval;
1843 }
1844 
1845 void
1846 ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object)
1847 {
1848 	ocs_io_t *io;
1849 	ocs_node_t *node = (ocs_node_t *)object;
1850 	ocs_hw_sequence_t *frame;
1851 
1852 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1853 
1854 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1855 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1856 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1857 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1858 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1859 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1860 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1861 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1862 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1863 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1864 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1865 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1866 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1867 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1868 
1869 	ocs_lock(&node->pend_frames_lock);
1870 	ocs_list_foreach(&node->pend_frames, frame) {
1871 		fc_header_t *hdr;
1872 		char buf[128];
1873 
1874 		hdr = frame->header->dma.virt;
1875 		ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1876 			     ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1877 			     frame->payload->dma.len);
1878 		ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1879 	}
1880 	ocs_unlock(&node->pend_frames_lock);
1881 
1882 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1883 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1884 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1885 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1886 
1887 	ocs_lock(&node->active_ios_lock);
1888 	ocs_list_foreach(&node->active_ios, io) {
1889 		if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) {
1890 			io->mgmt_functions->get_all_handler(textbuf,io);
1891 		}
1892 	}
1893 	ocs_unlock(&node->active_ios_lock);
1894 
1895 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1896 }
1897 
1898 int
1899 ocs_mgmt_node_set(char *parent, char *name, char *value, void *object)
1900 {
1901 	ocs_io_t *io;
1902 	ocs_node_t *node = (ocs_node_t *)object;
1903 	char qualifier[80];
1904 	int retval = -1;
1905 
1906 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1907 
1908 	/* If it doesn't start with my qualifier I don't know what to do with it */
1909 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1910 
1911 		ocs_lock(&node->active_ios_lock);
1912 		ocs_list_foreach(&node->active_ios, io) {
1913 			if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) {
1914 				retval = io->mgmt_functions->set_handler(qualifier, name, value, io);
1915 			}
1916 
1917 			if (retval == 0) {
1918 				break;
1919 			}
1920 
1921 		}
1922 		ocs_unlock(&node->active_ios_lock);
1923 
1924 	}
1925 
1926 	return retval;
1927 }
1928 
1929 int
1930 ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1931 		   void *arg_out, uint32_t arg_out_length, void *object)
1932 {
1933 	ocs_io_t *io;
1934 	ocs_node_t *node = (ocs_node_t *)object;
1935 	char qualifier[80];
1936 	int retval = -1;
1937 
1938 	ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index);
1939 
1940 	/* If it doesn't start with my qualifier I don't know what to do with it */
1941 	if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1942 		char *unqualified_name = action + strlen(qualifier) +1;
1943 
1944 		if (ocs_strcmp(unqualified_name, "resume") == 0) {
1945 			ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
1946 		}
1947 
1948 		{
1949 			/* If I didn't know how to do this action pass the request to each of my children */
1950 			ocs_lock(&node->active_ios_lock);
1951 				ocs_list_foreach(&node->active_ios, io) {
1952 					if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) {
1953 						retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1954 							arg_out, arg_out_length, io);
1955 					}
1956 
1957 					if (retval == 0) {
1958 						break;
1959 					}
1960 
1961 				}
1962 			ocs_unlock(&node->active_ios_lock);
1963 		}
1964 	}
1965 
1966 	return retval;
1967 }
1968 
1969 
1970 
1971 /**
1972  * @brief Return TRUE if active ios list is empty
1973  *
1974  * Test if node->active_ios list is empty while holding the node->active_ios_lock.
1975  *
1976  * @param node pointer to node object
1977  *
1978  * @return TRUE if node active ios list is empty
1979  */
1980 
1981 int
1982 ocs_node_active_ios_empty(ocs_node_t *node)
1983 {
1984 	int empty;
1985 
1986 	ocs_lock(&node->active_ios_lock);
1987 		empty = ocs_list_empty(&node->active_ios);
1988 	ocs_unlock(&node->active_ios_lock);
1989 	return empty;
1990 }
1991 
1992 /**
1993  * @brief Pause a node
1994  *
1995  * The node is placed in the __ocs_node_paused state after saving the state
1996  * to return to
1997  *
1998  * @param node Pointer to node object
1999  * @param state State to resume to
2000  *
2001  * @return none
2002  */
2003 
2004 void
2005 ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state)
2006 {
2007 	node->nodedb_state = state;
2008 	ocs_node_transition(node, __ocs_node_paused, NULL);
2009 }
2010 
2011 /**
2012  * @brief Paused node state
2013  *
2014  * This state is entered when a state is "paused". When resumed, the node
2015  * is transitioned to a previously saved state (node->ndoedb_state)
2016  *
2017  * @param ctx Remote node state machine context.
2018  * @param evt Event to process.
2019  * @param arg Per event optional argument.
2020  *
2021  * @return returns NULL
2022  */
2023 
2024 void *
2025 __ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
2026 {
2027 	std_node_state_decl();
2028 
2029 	node_sm_trace();
2030 
2031 	switch(evt) {
2032 	case OCS_EVT_ENTER:
2033 		node_printf(node, "Paused\n");
2034 		break;
2035 
2036 	case OCS_EVT_RESUME: {
2037 		ocs_sm_function_t pf = node->nodedb_state;
2038 
2039 		node->nodedb_state = NULL;
2040 		ocs_node_transition(node, pf, NULL);
2041 		break;
2042 	}
2043 
2044 	case OCS_EVT_DOMAIN_ATTACH_OK:
2045 		break;
2046 
2047 	case OCS_EVT_SHUTDOWN:
2048 		node->req_free = 1;
2049 		break;
2050 
2051 	default:
2052 		__ocs_node_common(__func__, ctx, evt, arg);
2053 		break;
2054 	}
2055 	return NULL;
2056 }
2057 
2058 /**
2059  * @brief Resume a paused state
2060  *
2061  * Posts a resume event to the paused node.
2062  *
2063  * @param node Pointer to node object
2064  *
2065  * @return returns 0 for success, a negative error code value for failure.
2066  */
2067 
2068 int32_t
2069 ocs_node_resume(ocs_node_t *node)
2070 {
2071 	ocs_assert(node != NULL, -1);
2072 
2073 	ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
2074 
2075 	return 0;
2076 }
2077 
2078 /**
2079  * @ingroup node_common
2080  * @brief Dispatch a ELS frame.
2081  *
2082  * <h3 class="desc">Description</h3>
2083  * An ELS frame is dispatched to the \c node state machine.
2084  * RQ Pair mode: this function is always called with a NULL hw
2085  * io.
2086  *
2087  * @param node Node that originated the frame.
2088  * @param seq header/payload sequence buffers
2089  *
2090  * @return Returns 0 if frame processed and RX buffers cleaned
2091  * up appropriately, -1 if frame not handled and RX buffers need
2092  * to be returned.
2093  */
2094 
2095 int32_t
2096 ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2097 {
2098 	struct {
2099 		uint32_t cmd;
2100 		ocs_sm_event_t evt;
2101 		uint32_t payload_size;
2102 	} els_cmd_list[] = {
2103 		{FC_ELS_CMD_PLOGI,	OCS_EVT_PLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2104 		{FC_ELS_CMD_FLOGI,	OCS_EVT_FLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2105 		{FC_ELS_CMD_LOGO,	OCS_EVT_LOGO_RCVD, 	sizeof(fc_acc_payload_t)},
2106 		{FC_ELS_CMD_RRQ,	OCS_EVT_RRQ_RCVD, 	sizeof(fc_acc_payload_t)},
2107 		{FC_ELS_CMD_PRLI, 	OCS_EVT_PRLI_RCVD, 	sizeof(fc_prli_payload_t)},
2108 		{FC_ELS_CMD_PRLO, 	OCS_EVT_PRLO_RCVD, 	sizeof(fc_prlo_payload_t)},
2109 		{FC_ELS_CMD_PDISC, 	OCS_EVT_PDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2110 		{FC_ELS_CMD_FDISC, 	OCS_EVT_FDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2111 		{FC_ELS_CMD_ADISC, 	OCS_EVT_ADISC_RCVD, 	sizeof(fc_adisc_payload_t)},
2112 		{FC_ELS_CMD_RSCN, 	OCS_EVT_RSCN_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2113 		{FC_ELS_CMD_SCR	, 	OCS_EVT_SCR_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2114 	};
2115 	ocs_t *ocs = node->ocs;
2116 	ocs_node_cb_t cbdata;
2117 	fc_header_t *hdr = seq->header->dma.virt;
2118 	uint8_t *buf = seq->payload->dma.virt;
2119 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2120 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2121 	uint32_t i;
2122 
2123 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2124 	cbdata.header = seq->header;
2125 	cbdata.payload = seq->payload;
2126 
2127 	/* find a matching event for the ELS command */
2128 	for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) {
2129 		if (els_cmd_list[i].cmd == buf[0]) {
2130 			evt = els_cmd_list[i].evt;
2131 			payload_size = els_cmd_list[i].payload_size;
2132 			break;
2133 		}
2134 	}
2135 
2136 	switch(evt) {
2137 	case OCS_EVT_FLOGI_RCVD:
2138 		ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2139 		break;
2140 	case OCS_EVT_FDISC_RCVD:
2141 		ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2142 		break;
2143 	case OCS_EVT_PLOGI_RCVD:
2144 		ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2145 		break;
2146 	default:
2147 		break;
2148 	}
2149 
2150 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2151 
2152 	if (cbdata.io != NULL) {
2153 		cbdata.io->hw_priv = seq->hw_priv;
2154 		/* if we're here, sequence initiative has been transferred */
2155 		cbdata.io->seq_init = 1;
2156 
2157 		ocs_node_post_event(node, evt, &cbdata);
2158 	} else {
2159 		node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2160 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2161 	}
2162 	ocs_hw_sequence_free(&ocs->hw, seq);
2163 	return 0;
2164 }
2165 
2166 /**
2167  * @ingroup node_common
2168  * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing).
2169  *
2170  * <h3 class="desc">Description</h3>
2171  * An ABTS frame is dispatched to the node state machine. This
2172  * function is used for both RQ Pair and sequence coalescing.
2173  *
2174  * @param node Node that originated the frame.
2175  * @param seq Header/payload sequence buffers
2176  *
2177  * @return Returns 0 if frame processed and RX buffers cleaned
2178  * up appropriately, -1 if frame not handled and RX buffers need
2179  * to be returned.
2180  */
2181 
2182 int32_t
2183 ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2184 {
2185 	ocs_t *ocs = node->ocs;
2186 	ocs_xport_t *xport = ocs->xport;
2187 	fc_header_t *hdr = seq->header->dma.virt;
2188 	uint16_t ox_id = ocs_be16toh(hdr->ox_id);
2189 	uint16_t rx_id = ocs_be16toh(hdr->rx_id);
2190 	ocs_node_cb_t cbdata;
2191 	int32_t rc = 0;
2192 
2193 	node->abort_cnt++;
2194 
2195 	/*
2196 	 * Check to see if the IO we want to abort is active, if it not active,
2197 	 * then we can send the BA_ACC using the send frame option
2198 	 */
2199 	if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) {
2200 		uint32_t send_frame_capable;
2201 
2202 		ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id);
2203 
2204 		/* If we have SEND_FRAME capability, then use it to send BA_ACC */
2205 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
2206 		if ((rc == 0) && send_frame_capable) {
2207 			rc = ocs_sframe_send_bls_acc(node, seq);
2208 			if (rc) {
2209 				ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n");
2210 			}
2211 			return rc;
2212 		}
2213 		/* continuing */
2214 	}
2215 
2216 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2217 	cbdata.header = seq->header;
2218 	cbdata.payload = seq->payload;
2219 
2220 	cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
2221 	if (cbdata.io != NULL) {
2222 		cbdata.io->hw_priv = seq->hw_priv;
2223 		/* If we got this far, SIT=1 */
2224 		cbdata.io->seq_init = 1;
2225 
2226 		/* fill out generic fields */
2227 		cbdata.io->ocs = ocs;
2228 		cbdata.io->node = node;
2229 		cbdata.io->cmd_tgt = TRUE;
2230 
2231 		ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata);
2232 	} else {
2233 		ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
2234 		node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2235 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2236 	}
2237 
2238 	/* ABTS processed, return RX buffer to the chip */
2239 	ocs_hw_sequence_free(&ocs->hw, seq);
2240 	return 0;
2241 }
2242 
2243 /**
2244  * @ingroup node_common
2245  * @brief Dispatch a CT frame.
2246  *
2247  * <h3 class="desc">Description</h3>
2248  * A CT frame is dispatched to the \c node state machine.
2249  * RQ Pair mode: this function is always called with a NULL hw
2250  * io.
2251  *
2252  * @param node Node that originated the frame.
2253  * @param seq header/payload sequence buffers
2254  *
2255  * @return Returns 0 if frame processed and RX buffers cleaned
2256  * up appropriately, -1 if frame not handled and RX buffers need
2257  * to be returned.
2258  */
2259 
2260 int32_t
2261 ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2262 {
2263 	ocs_t *ocs = node->ocs;
2264 	fc_header_t *hdr = seq->header->dma.virt;
2265 	fcct_iu_header_t *iu = seq->payload->dma.virt;
2266 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2267 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2268 	uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code);
2269 	ocs_node_cb_t cbdata;
2270 	uint32_t i;
2271 	struct {
2272 		uint32_t cmd;
2273 		ocs_sm_event_t evt;
2274 		uint32_t payload_size;
2275 	} ct_cmd_list[] = {
2276 		{FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100},
2277 		{FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100},
2278 		{FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100},
2279 		{FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100},
2280 		{FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100},
2281 		{FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100},
2282 		{FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256},
2283 		{FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256},
2284 		{FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100},
2285 		{FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100},
2286 		{FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100},
2287 		{FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100},
2288 		{FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100},
2289 		{FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100},
2290 		{FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100},
2291 	};
2292 
2293 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2294 	cbdata.header = seq->header;
2295 	cbdata.payload = seq->payload;
2296 
2297 	/* find a matching event for the ELS/GS command */
2298 	for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) {
2299 		if (ct_cmd_list[i].cmd == gscmd) {
2300 			evt = ct_cmd_list[i].evt;
2301 			payload_size = ct_cmd_list[i].payload_size;
2302 			break;
2303 		}
2304 	}
2305 
2306 	/* Allocate an IO and send a reject */
2307 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2308 	if (cbdata.io == NULL) {
2309 		node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2310 			fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id),
2311 			ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2312 		return -1;
2313 	}
2314 	cbdata.io->hw_priv = seq->hw_priv;
2315 	ocs_node_post_event(node, evt, &cbdata);
2316 
2317 	ocs_hw_sequence_free(&ocs->hw, seq);
2318 	return 0;
2319 }
2320 
2321 /**
2322  * @ingroup node_common
2323  * @brief Dispatch a FCP command frame when the node is not ready.
2324  *
2325  * <h3 class="desc">Description</h3>
2326  * A frame is dispatched to the \c node state machine.
2327  *
2328  * @param node Node that originated the frame.
2329  * @param seq header/payload sequence buffers
2330  *
2331  * @return Returns 0 if frame processed and RX buffers cleaned
2332  * up appropriately, -1 if frame not handled.
2333  */
2334 
2335 int32_t
2336 ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
2337 {
2338 	ocs_node_cb_t cbdata;
2339 	ocs_t *ocs = node->ocs;
2340 
2341 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2342 	cbdata.header = seq->header;
2343 	cbdata.payload = seq->payload;
2344 	ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata);
2345 	ocs_hw_sequence_free(&ocs->hw, seq);
2346 	return 0;
2347 }
2348 
2349 /**
2350  * @ingroup node_common
2351  * @brief Stub handler for non-ABTS BLS frames
2352  *
2353  * <h3 class="desc">Description</h3>
2354  * Log message and drop. Customer can plumb it to their back-end as needed
2355  *
2356  * @param node Node that originated the frame.
2357  * @param seq header/payload sequence buffers
2358  *
2359  * @return Returns 0
2360  */
2361 
2362 int32_t
2363 ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq)
2364 {
2365 	fc_header_t *hdr = seq->header->dma.virt;
2366 
2367 	node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
2368 		    ocs_htobe32(((uint32_t *)hdr)[0]),
2369 		    ocs_htobe32(((uint32_t *)hdr)[1]),
2370 		    ocs_htobe32(((uint32_t *)hdr)[2]),
2371 		    ocs_htobe32(((uint32_t *)hdr)[3]),
2372 		    ocs_htobe32(((uint32_t *)hdr)[4]),
2373 		    ocs_htobe32(((uint32_t *)hdr)[5]));
2374 
2375 	return -1;
2376 }
2377