xref: /freebsd/sys/dev/ocs_fc/ocs_node.c (revision 924226fba12cc9a228c73b956e1b7fa24c60b055)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * OCS driver remote node handler.  This file contains code that is shared
37  * between fabric (ocs_fabric.c) and device (ocs_device.c) nodes.
38  */
39 
40 /*!
41  * @defgroup node_common Node common support
42  * @defgroup node_alloc Node allocation
43  */
44 
45 #include "ocs.h"
46 #include "ocs_els.h"
47 #include "ocs_device.h"
48 
49 #define SCSI_IOFMT "[%04x][i:%0*x t:%0*x h:%04x]"
50 #define SCSI_ITT_SIZE(ocs)	((ocs->ocs_xport == OCS_XPORT_FC) ? 4 : 8)
51 
52 #define SCSI_IOFMT_ARGS(io) io->instance_index, SCSI_ITT_SIZE(io->ocs), io->init_task_tag, SCSI_ITT_SIZE(io->ocs), io->tgt_task_tag, io->hw_tag
53 
54 #define scsi_io_printf(io, fmt, ...) ocs_log_debug(io->ocs, "[%s]" SCSI_IOFMT fmt, \
55 	io->node->display_name, SCSI_IOFMT_ARGS(io), ##__VA_ARGS__)
56 
57 void ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *node);
58 void ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *node);
59 int ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *node);
60 int ocs_mgmt_node_set(char *parent, char *name, char *value, void *node);
61 int ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
62 		void *arg_out, uint32_t arg_out_length, void *node);
63 static ocs_mgmt_functions_t node_mgmt_functions = {
64 	.get_list_handler	=	ocs_mgmt_node_list,
65 	.get_handler		=	ocs_mgmt_node_get,
66 	.get_all_handler	=	ocs_mgmt_node_get_all,
67 	.set_handler		=	ocs_mgmt_node_set,
68 	.exec_handler		=	ocs_mgmt_node_exec,
69 };
70 
71 /**
72  * @ingroup node_common
73  * @brief Device node state machine wait for all ELS's to
74  *        complete
75  *
76  * Abort all ELS's for given node.
77  *
78  * @param node node for which ELS's will be aborted
79  */
80 
81 void
82 ocs_node_abort_all_els(ocs_node_t *node)
83 {
84 	ocs_io_t *els;
85 	ocs_io_t *els_next;
86 	ocs_node_cb_t cbdata = {0};
87 
88 	ocs_node_hold_frames(node);
89 	ocs_lock(&node->active_ios_lock);
90 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
91 			ocs_log_debug(node->ocs, "[%s] initiate ELS abort %s\n", node->display_name, els->display_name);
92 			ocs_unlock(&node->active_ios_lock);
93 			cbdata.els = els;
94 			ocs_els_post_event(els, OCS_EVT_ABORT_ELS, &cbdata);
95 			ocs_lock(&node->active_ios_lock);
96 		}
97 	ocs_unlock(&node->active_ios_lock);
98 }
99 
100 /**
101  * @ingroup node_common
102  * @brief Handle remote node events from HW
103  *
104  * Handle remote node events from HW.   Essentially the HW event is translated into
105  * a node state machine event that is posted to the affected node.
106  *
107  * @param arg pointer to ocs
108  * @param event HW event to proceoss
109  * @param data application specific data (pointer to the affected node)
110  *
111  * @return returns 0 for success, a negative error code value for failure.
112  */
113 int32_t
114 ocs_remote_node_cb(void *arg, ocs_hw_remote_node_event_e event, void *data)
115 {
116 	ocs_t *ocs = arg;
117 	ocs_sm_event_t	sm_event = OCS_EVT_LAST;
118 	ocs_remote_node_t *rnode = data;
119 	ocs_node_t *node = rnode->node;
120 
121 	switch (event) {
122 	case OCS_HW_NODE_ATTACH_OK:
123 		sm_event = OCS_EVT_NODE_ATTACH_OK;
124 		break;
125 
126 	case OCS_HW_NODE_ATTACH_FAIL:
127 		sm_event = OCS_EVT_NODE_ATTACH_FAIL;
128 		break;
129 
130 	case OCS_HW_NODE_FREE_OK:
131 		sm_event = OCS_EVT_NODE_FREE_OK;
132 		break;
133 
134 	case OCS_HW_NODE_FREE_FAIL:
135 		sm_event = OCS_EVT_NODE_FREE_FAIL;
136 		break;
137 
138 	default:
139 		ocs_log_test(ocs, "unhandled event %#x\n", event);
140 		return -1;
141 	}
142 
143 	/* If we're using HLM, forward the NODE_ATTACH_OK/FAIL event to all nodes in the node group */
144 	if ((node->node_group != NULL) &&
145 			((sm_event == OCS_EVT_NODE_ATTACH_OK) || (sm_event == OCS_EVT_NODE_ATTACH_FAIL))) {
146 		ocs_node_t *n = NULL;
147 		uint8_t		attach_ok = sm_event == OCS_EVT_NODE_ATTACH_OK;
148 
149 		ocs_sport_lock(node->sport);
150 		{
151 			ocs_list_foreach(&node->sport->node_list, n) {
152 				if (node == n) {
153 					continue;
154 				}
155 				ocs_node_lock(n);
156 					if ((!n->rnode.attached) && (node->node_group == n->node_group)) {
157 						n->rnode.attached = attach_ok;
158 						node_printf(n, "rpi[%d] deferred HLM node attach %s posted\n",
159 								n->rnode.index, attach_ok ? "ok" : "fail");
160 						ocs_node_post_event(n, sm_event, NULL);
161 					}
162 				ocs_node_unlock(n);
163 			}
164 		}
165 
166 		ocs_sport_unlock(node->sport);
167 	}
168 
169 	ocs_node_post_event(node, sm_event, NULL);
170 
171 	return 0;
172 }
173 
174 /**
175  * @ingroup node_alloc
176  * @brief Find an FC node structure given the FC port ID
177  *
178  * @param sport the SPORT to search
179  * @param port_id FC port ID
180  *
181  * @return pointer to the object or NULL if not found
182  */
183 ocs_node_t *
184 ocs_node_find(ocs_sport_t *sport, uint32_t port_id)
185 {
186 	ocs_node_t *node;
187 
188 	ocs_assert(sport->lookup, NULL);
189 	ocs_sport_lock(sport);
190 		node = spv_get(sport->lookup, port_id);
191 	ocs_sport_unlock(sport);
192 	return node;
193 }
194 
195 /**
196  * @ingroup node_alloc
197  * @brief Find an FC node structure given the WWPN
198  *
199  * @param sport the SPORT to search
200  * @param wwpn the WWPN to search for (host endian)
201  *
202  * @return pointer to the object or NULL if not found
203  */
204 ocs_node_t *
205 ocs_node_find_wwpn(ocs_sport_t *sport, uint64_t wwpn)
206 {
207 	ocs_node_t *node = NULL;
208 
209 	ocs_assert(sport, NULL);
210 
211 	ocs_sport_lock(sport);
212 		ocs_list_foreach(&sport->node_list, node) {
213 			if (ocs_node_get_wwpn(node) == wwpn) {
214 				ocs_sport_unlock(sport);
215 				return node;
216 			}
217 		}
218 	ocs_sport_unlock(sport);
219 	return NULL;
220 }
221 
222 /**
223  * @ingroup node_alloc
224  * @brief allocate node object pool
225  *
226  * A pool of ocs_node_t objects is allocated.
227  *
228  * @param ocs pointer to driver instance context
229  * @param node_count count of nodes to allocate
230  *
231  * @return returns 0 for success, a negative error code value for failure.
232  */
233 
234 int32_t
235 ocs_node_create_pool(ocs_t *ocs, uint32_t node_count)
236 {
237 	ocs_xport_t *xport = ocs->xport;
238 	uint32_t i;
239 	ocs_node_t *node;
240 	uint32_t max_sge;
241 	uint32_t num_sgl;
242 	uint64_t max_xfer_size;
243 	int32_t rc;
244 
245 	xport->nodes_count = node_count;
246 
247 	xport->nodes = ocs_malloc(ocs, node_count * sizeof(ocs_node_t *), OCS_M_ZERO | OCS_M_NOWAIT);
248 	if (xport->nodes == NULL) {
249 		ocs_log_err(ocs, "node ptrs allocation failed");
250 		return -1;
251 	}
252 
253 	if (0 == ocs_hw_get(&ocs->hw, OCS_HW_MAX_SGE, &max_sge) &&
254 	    0 == ocs_hw_get(&ocs->hw, OCS_HW_N_SGL, &num_sgl)) {
255 		max_xfer_size = (max_sge * (uint64_t)num_sgl);
256 	} else {
257 		max_xfer_size = 65536;
258 	}
259 
260 	if (max_xfer_size > 65536)
261 		max_xfer_size = 65536;
262 
263 	ocs_list_init(&xport->nodes_free_list, ocs_node_t, link);
264 
265 	for (i = 0; i < node_count; i ++) {
266 		node = ocs_malloc(ocs, sizeof(ocs_node_t), OCS_M_ZERO | OCS_M_NOWAIT);
267 		if (node == NULL) {
268 			ocs_log_err(ocs, "node allocation failed");
269 			goto error;
270 		}
271 
272 		/* Assign any persistent field values */
273 		node->instance_index = i;
274 		node->max_wr_xfer_size = max_xfer_size;
275 		node->rnode.indicator = UINT32_MAX;
276 
277 		rc = ocs_dma_alloc(ocs, &node->sparm_dma_buf, 256, 16);
278 		if (rc) {
279 			ocs_free(ocs, node, sizeof(ocs_node_t));
280 			ocs_log_err(ocs, "ocs_dma_alloc failed: %d\n", rc);
281 			goto error;
282 		}
283 
284 		xport->nodes[i] = node;
285 		ocs_list_add_tail(&xport->nodes_free_list, node);
286 	}
287 	return 0;
288 
289 error:
290 	ocs_node_free_pool(ocs);
291 	return -1;
292 }
293 
294 /**
295  * @ingroup node_alloc
296  * @brief free node object pool
297  *
298  * The pool of previously allocated node objects is freed
299  *
300  * @param ocs pointer to driver instance context
301  *
302  * @return none
303  */
304 
305 void
306 ocs_node_free_pool(ocs_t *ocs)
307 {
308 	ocs_xport_t *xport = ocs->xport;
309 	ocs_node_t *node;
310 	uint32_t i;
311 
312 	if (!xport->nodes)
313 		return;
314 
315 	ocs_device_lock(ocs);
316 
317 	for (i = 0; i < xport->nodes_count; i ++) {
318 		node = xport->nodes[i];
319 		if (node) {
320 			/* free sparam_dma_buf */
321 			ocs_dma_free(ocs, &node->sparm_dma_buf);
322 			ocs_free(ocs, node, sizeof(ocs_node_t));
323 		}
324 		xport->nodes[i] = NULL;
325 	}
326 
327 	ocs_free(ocs, xport->nodes, (xport->nodes_count * sizeof(ocs_node_t *)));
328 
329 	ocs_device_unlock(ocs);
330 }
331 
332 /**
333  * @ingroup node_alloc
334  * @brief return pointer to node object given instance index
335  *
336  * A pointer to the node object given by an instance index is returned.
337  *
338  * @param ocs pointer to driver instance context
339  * @param index instance index
340  *
341  * @return returns pointer to node object, or NULL
342  */
343 
344 ocs_node_t *
345 ocs_node_get_instance(ocs_t *ocs, uint32_t index)
346 {
347 	ocs_xport_t *xport = ocs->xport;
348 	ocs_node_t *node = NULL;
349 
350 	if (index >= (xport->nodes_count)) {
351 		ocs_log_test(ocs, "invalid index: %d\n", index);
352 		return NULL;
353 	}
354 	node = xport->nodes[index];
355 	return node->attached ? node : NULL;
356 }
357 
358 /**
359  * @ingroup node_alloc
360  * @brief Allocate an fc node structure and add to node list
361  *
362  * @param sport pointer to the SPORT from which this node is allocated
363  * @param port_id FC port ID of new node
364  * @param init Port is an inititiator (sent a plogi)
365  * @param targ Port is potentially a target
366  *
367  * @return pointer to the object or NULL if none available
368  */
369 
370 ocs_node_t *
371 ocs_node_alloc(ocs_sport_t *sport, uint32_t port_id, uint8_t init, uint8_t targ)
372 {
373 	int32_t rc;
374 	ocs_node_t *node = NULL;
375 	uint32_t instance_index;
376 	uint32_t max_wr_xfer_size;
377 	ocs_t *ocs = sport->ocs;
378 	ocs_xport_t *xport = ocs->xport;
379 	ocs_dma_t sparm_dma_buf;
380 
381 	ocs_assert(sport, NULL);
382 
383 	if (sport->shutting_down) {
384 		ocs_log_debug(ocs, "node allocation when shutting down %06x", port_id);
385 		return NULL;
386 	}
387 
388 	ocs_device_lock(ocs);
389 		node = ocs_list_remove_head(&xport->nodes_free_list);
390 	ocs_device_unlock(ocs);
391 	if (node == NULL) {
392 		ocs_log_err(ocs, "node allocation failed %06x", port_id);
393 		return NULL;
394 	}
395 
396 	/* Save persistent values across memset zero */
397 	instance_index = node->instance_index;
398 	max_wr_xfer_size = node->max_wr_xfer_size;
399 	sparm_dma_buf = node->sparm_dma_buf;
400 
401 	ocs_memset(node, 0, sizeof(*node));
402 	node->instance_index = instance_index;
403 	node->max_wr_xfer_size = max_wr_xfer_size;
404 	node->sparm_dma_buf = sparm_dma_buf;
405 	node->rnode.indicator = UINT32_MAX;
406 
407 	node->sport = sport;
408 	ocs_sport_lock(sport);
409 
410 		node->ocs = ocs;
411 		node->init = init;
412 		node->targ = targ;
413 
414 		rc = ocs_hw_node_alloc(&ocs->hw, &node->rnode, port_id, sport);
415 		if (rc) {
416 			ocs_log_err(ocs, "ocs_hw_node_alloc failed: %d\n", rc);
417 			ocs_sport_unlock(sport);
418 
419 			/* Return back to pool. */
420 			ocs_device_lock(ocs);
421 			ocs_list_add_tail(&xport->nodes_free_list, node);
422 			ocs_device_unlock(ocs);
423 
424 			return NULL;
425 		}
426 		ocs_list_add_tail(&sport->node_list, node);
427 
428 		ocs_node_lock_init(node);
429 		ocs_lock_init(ocs, &node->pend_frames_lock, "pend_frames_lock[%d]", node->instance_index);
430 		ocs_list_init(&node->pend_frames, ocs_hw_sequence_t, link);
431 		ocs_lock_init(ocs, &node->active_ios_lock, "active_ios[%d]", node->instance_index);
432 		ocs_list_init(&node->active_ios, ocs_io_t, link);
433 		ocs_list_init(&node->els_io_pend_list, ocs_io_t, link);
434 		ocs_list_init(&node->els_io_active_list, ocs_io_t, link);
435 		ocs_scsi_io_alloc_enable(node);
436 
437 		/* zero the service parameters */
438 		ocs_memset(node->sparm_dma_buf.virt, 0, node->sparm_dma_buf.size);
439 
440 		node->rnode.node = node;
441 		node->sm.app = node;
442 		node->evtdepth = 0;
443 
444 		ocs_node_update_display_name(node);
445 
446 		spv_set(sport->lookup, port_id, node);
447 	ocs_sport_unlock(sport);
448 	node->mgmt_functions = &node_mgmt_functions;
449 
450 	return node;
451 }
452 
453 /**
454  * @ingroup node_alloc
455  * @brief free a node structure
456  *
457  * The node structure given by 'node' is free'd
458  *
459  * @param node the node to free
460  *
461  * @return returns 0 for success, a negative error code value for failure.
462  */
463 
464 int32_t
465 ocs_node_free(ocs_node_t *node)
466 {
467 	ocs_sport_t *sport;
468 	ocs_t *ocs;
469 	ocs_xport_t *xport;
470 	ocs_hw_rtn_e rc = 0;
471 	ocs_node_t *ns = NULL;
472 	int post_all_free = FALSE;
473 
474 	ocs_assert(node, -1);
475 	ocs_assert(node->sport, -1);
476 	ocs_assert(node->ocs, -1);
477 	sport = node->sport;
478 	ocs_assert(sport, -1);
479 	ocs = node->ocs;
480 	ocs_assert(ocs->xport, -1);
481 	xport = ocs->xport;
482 
483 	node_printf(node, "Free'd\n");
484 
485 	if(node->refound) {
486 		/*
487 		 * Save the name server node. We will send fake RSCN event at
488 		 * the end to handle ignored RSCN event during node deletion
489 		 */
490 		ns = ocs_node_find(node->sport, FC_ADDR_NAMESERVER);
491 	}
492 
493 	/* Remove from node list */
494 	ocs_sport_lock(sport);
495 		ocs_list_remove(&sport->node_list, node);
496 
497 		/* Free HW resources */
498 		if (OCS_HW_RTN_IS_ERROR((rc = ocs_hw_node_free_resources(&ocs->hw, &node->rnode)))) {
499 			ocs_log_test(ocs, "ocs_hw_node_free failed: %d\n", rc);
500 			rc = -1;
501 		}
502 
503 		/* if the gidpt_delay_timer is still running, then delete it */
504 		if (ocs_timer_pending(&node->gidpt_delay_timer)) {
505 			ocs_del_timer(&node->gidpt_delay_timer);
506 		}
507 
508 		if (node->fcp2device) {
509 			ocs_del_crn(node);
510 		}
511 
512 		/* remove entry from sparse vector list */
513 		if (sport->lookup == NULL) {
514 			ocs_log_test(node->ocs, "assertion failed: sport lookup is NULL\n");
515 			ocs_sport_unlock(sport);
516 			return -1;
517 		}
518 
519 		spv_set(sport->lookup, node->rnode.fc_id, NULL);
520 
521 		/*
522 		 * If the node_list is empty, then post a ALL_CHILD_NODES_FREE event to the sport,
523 		 * after the lock is released.  The sport may be free'd as a result of the event.
524 		 */
525 		if (ocs_list_empty(&sport->node_list)) {
526 			post_all_free = TRUE;
527 		}
528 
529 	ocs_sport_unlock(sport);
530 
531 	if (post_all_free) {
532 		ocs_sm_post_event(&sport->sm, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
533 	}
534 
535 	node->sport = NULL;
536 	node->sm.current_state = NULL;
537 
538 	ocs_node_lock_free(node);
539 	ocs_lock_free(&node->pend_frames_lock);
540 	ocs_lock_free(&node->active_ios_lock);
541 
542 	/* return to free list */
543 	ocs_device_lock(ocs);
544 		ocs_list_add_tail(&xport->nodes_free_list, node);
545 	ocs_device_unlock(ocs);
546 
547 	if(ns != NULL) {
548 		/* sending fake RSCN event to name server node */
549 		ocs_node_post_event(ns, OCS_EVT_RSCN_RCVD, NULL);
550 	}
551 
552 	return rc;
553 }
554 
555 /**
556  * @brief free memory resources of a node object
557  *
558  * The node object's child objects are freed after which the
559  * node object is freed.
560  *
561  * @param node pointer to a node object
562  *
563  * @return none
564  */
565 
566 void
567 ocs_node_force_free(ocs_node_t *node)
568 {
569 	ocs_io_t *io;
570 	ocs_io_t *next;
571 	ocs_io_t *els;
572 	ocs_io_t *els_next;
573 
574 	/* shutdown sm processing */
575 	ocs_sm_disable(&node->sm);
576 	ocs_strncpy(node->prev_state_name, node->current_state_name, sizeof(node->prev_state_name));
577 	ocs_strncpy(node->current_state_name, "disabled", sizeof(node->current_state_name));
578 
579 	/* Let the backend cleanup if needed */
580 	ocs_scsi_notify_node_force_free(node);
581 
582 	ocs_lock(&node->active_ios_lock);
583 		ocs_list_foreach_safe(&node->active_ios, io, next) {
584 			ocs_list_remove(&io->node->active_ios, io);
585 			ocs_io_free(node->ocs, io);
586 		}
587 	ocs_unlock(&node->active_ios_lock);
588 
589 	/* free all pending ELS IOs */
590 	ocs_lock(&node->active_ios_lock);
591 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
592 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
593 			ocs_list_remove(&node->els_io_pend_list, els);
594 
595 			ocs_io_free(node->ocs, els);
596 		}
597 	ocs_unlock(&node->active_ios_lock);
598 
599 	/* free all active ELS IOs */
600 	ocs_lock(&node->active_ios_lock);
601 		ocs_list_foreach_safe(&node->els_io_active_list, els, els_next) {
602 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
603 			ocs_list_remove(&node->els_io_active_list, els);
604 
605 			ocs_io_free(node->ocs, els);
606 		}
607 	ocs_unlock(&node->active_ios_lock);
608 
609 	/* manually purge pending frames (if any) */
610 	ocs_node_purge_pending(node);
611 
612 	ocs_node_free(node);
613 }
614 
615 /**
616  * @ingroup node_common
617  * @brief Perform HW call to attach a remote node
618  *
619  * @param node pointer to node object
620  *
621  * @return 0 on success, non-zero otherwise
622  */
623 int32_t
624 ocs_node_attach(ocs_node_t *node)
625 {
626 	int32_t rc = 0;
627 	ocs_sport_t *sport = node->sport;
628 	ocs_domain_t *domain = sport->domain;
629 	ocs_t *ocs = node->ocs;
630 
631 	if (!domain->attached) {
632 		ocs_log_test(ocs, "Warning: ocs_node_attach with unattached domain\n");
633 		return -1;
634 	}
635 	/* Update node->wwpn/wwnn */
636 
637 	ocs_node_build_eui_name(node->wwpn, sizeof(node->wwpn), ocs_node_get_wwpn(node));
638 	ocs_node_build_eui_name(node->wwnn, sizeof(node->wwnn), ocs_node_get_wwnn(node));
639 
640 	if (ocs->enable_hlm) {
641 		ocs_node_group_init(node);
642 	}
643 
644 	ocs_dma_copy_in(&node->sparm_dma_buf, node->service_params+4, sizeof(node->service_params)-4);
645 
646 	/* take lock to protect node->rnode.attached */
647 	ocs_node_lock(node);
648 		rc = ocs_hw_node_attach(&ocs->hw, &node->rnode, &node->sparm_dma_buf);
649 		if (OCS_HW_RTN_IS_ERROR(rc)) {
650 			ocs_log_test(ocs, "ocs_hw_node_attach failed: %d\n", rc);
651 		}
652 	ocs_node_unlock(node);
653 
654 	return rc;
655 }
656 
657 /**
658  * @ingroup node_common
659  * @brief Generate text for a node's fc_id
660  *
661  * The text for a nodes fc_id is generated, either as a well known name, or a 6 digit
662  * hex value.
663  *
664  * @param fc_id fc_id
665  * @param buffer text buffer
666  * @param buffer_length text buffer length in bytes
667  *
668  * @return none
669  */
670 
671 void
672 ocs_node_fcid_display(uint32_t fc_id, char *buffer, uint32_t buffer_length)
673 {
674 	switch (fc_id) {
675 	case FC_ADDR_FABRIC:
676 		ocs_snprintf(buffer, buffer_length, "fabric");
677 		break;
678 	case FC_ADDR_CONTROLLER:
679 		ocs_snprintf(buffer, buffer_length, "fabctl");
680 		break;
681 	case FC_ADDR_NAMESERVER:
682 		ocs_snprintf(buffer, buffer_length, "nserve");
683 		break;
684 	default:
685 		if (FC_ADDR_IS_DOMAIN_CTRL(fc_id)) {
686 			ocs_snprintf(buffer, buffer_length, "dctl%02x",
687 				FC_ADDR_GET_DOMAIN_CTRL(fc_id));
688 		} else {
689 			ocs_snprintf(buffer, buffer_length, "%06x", fc_id);
690 		}
691 		break;
692 	}
693 
694 }
695 
696 /**
697  * @brief update the node's display name
698  *
699  * The node's display name is updated, sometimes needed because the sport part
700  * is updated after the node is allocated.
701  *
702  * @param node pointer to the node object
703  *
704  * @return none
705  */
706 
707 void
708 ocs_node_update_display_name(ocs_node_t *node)
709 {
710 	uint32_t port_id = node->rnode.fc_id;
711 	ocs_sport_t *sport = node->sport;
712 	char portid_display[16];
713 
714 	ocs_assert(sport);
715 
716 	ocs_node_fcid_display(port_id, portid_display, sizeof(portid_display));
717 
718 	ocs_snprintf(node->display_name, sizeof(node->display_name), "%s.%s", sport->display_name, portid_display);
719 }
720 
721 /**
722  * @brief cleans up an XRI for the pending link services accept by aborting the
723  *         XRI if required.
724  *
725  * <h3 class="desc">Description</h3>
726  * This function is called when the LS accept is not sent.
727  *
728  * @param node Node for which should be cleaned up
729  */
730 
731 void
732 ocs_node_send_ls_io_cleanup(ocs_node_t *node)
733 {
734 	ocs_t *ocs = node->ocs;
735 
736 	if (node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) {
737 		ocs_assert(node->ls_acc_io);
738 		ocs_log_debug(ocs, "[%s] cleaning up LS_ACC oxid=0x%x\n",
739 			node->display_name, node->ls_acc_oxid);
740 
741 		node->ls_acc_io->hio = NULL;
742 		ocs_els_io_free(node->ls_acc_io);
743 		node->send_ls_acc = OCS_NODE_SEND_LS_ACC_NONE;
744 		node->ls_acc_io = NULL;
745 	}
746 }
747 
748 /**
749  * @ingroup node_common
750  * @brief state: shutdown a node
751  *
752  * A node is shutdown,
753  *
754  * @param ctx remote node sm context
755  * @param evt event to process
756  * @param arg per event optional argument
757  *
758  * @return returns NULL
759  *
760  * @note
761  */
762 
763 void *
764 __ocs_node_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
765 {
766 	int32_t rc;
767 	std_node_state_decl();
768 
769 	node_sm_trace();
770 
771 	switch(evt) {
772 	case OCS_EVT_ENTER: {
773 		ocs_node_hold_frames(node);
774 		ocs_assert(ocs_node_active_ios_empty(node), NULL);
775 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
776 
777 		/* by default, we will be freeing node after we unwind */
778 		node->req_free = 1;
779 
780 		switch (node->shutdown_reason) {
781 		case OCS_NODE_SHUTDOWN_IMPLICIT_LOGO:
782 			/* sm: if shutdown reason is implicit logout / ocs_node_attach
783 			 * Node shutdown b/c of PLOGI received when node already
784 			 * logged in. We have PLOGI service parameters, so submit
785 			 * node attach; we won't be freeing this node
786 			 */
787 
788 			/* currently, only case for implicit logo is PLOGI recvd. Thus,
789 			 * node's ELS IO pending list won't be empty (PLOGI will be on it)
790 			 */
791 			ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI, NULL);
792 			node_printf(node, "Shutdown reason: implicit logout, re-authenticate\n");
793 
794 			ocs_scsi_io_alloc_enable(node);
795 
796 			/* Re-attach node with the same HW node resources */
797 			node->req_free = 0;
798 			rc = ocs_node_attach(node);
799 			ocs_node_transition(node, __ocs_d_wait_node_attach, NULL);
800 			if (rc == OCS_HW_RTN_SUCCESS_SYNC) {
801 				ocs_node_post_event(node, OCS_EVT_NODE_ATTACH_OK, NULL);
802 			}
803 			break;
804 		case OCS_NODE_SHUTDOWN_EXPLICIT_LOGO: {
805 			int8_t pend_frames_empty;
806 
807 			/* cleanup any pending LS_ACC ELSs */
808 			ocs_node_send_ls_io_cleanup(node);
809 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
810 
811 			ocs_lock(&node->pend_frames_lock);
812 				pend_frames_empty = ocs_list_empty(&node->pend_frames);
813 			ocs_unlock(&node->pend_frames_lock);
814 
815 			/* there are two scenarios where we want to keep this node alive:
816 			 * 1. there are pending frames that need to be processed or
817 			 * 2. we're an initiator and the remote node is a target and we
818 			 *    need to re-authenticate
819 			 */
820 			node_printf(node, "Shutdown: explicit logo pend=%d sport.ini=%d node.tgt=%d\n",
821 				    !pend_frames_empty, node->sport->enable_ini, node->targ);
822 
823 			if((!pend_frames_empty) || (node->sport->enable_ini && node->targ)) {
824 				uint8_t send_plogi = FALSE;
825 				if (node->sport->enable_ini && node->targ) {
826 					/* we're an initiator and node shutting down is a target; we'll
827 					 * need to re-authenticate in initial state
828 					 */
829 					send_plogi = TRUE;
830 				}
831 
832 				/* transition to __ocs_d_init (will retain HW node resources) */
833 				ocs_scsi_io_alloc_enable(node);
834 				node->req_free = 0;
835 
836 				/* either pending frames exist, or we're re-authenticating with PLOGI
837 				 * (or both); in either case, return to initial state
838 				 */
839 				ocs_node_init_device(node, send_plogi);
840 			}
841 			/* else: let node shutdown occur */
842 			break;
843 		}
844 		case OCS_NODE_SHUTDOWN_DEFAULT:
845 		default:
846 			/* shutdown due to link down, node going away (xport event) or
847 			 * sport shutdown, purge pending and proceed to cleanup node
848 			 */
849 
850 			/* cleanup any pending LS_ACC ELSs */
851 			ocs_node_send_ls_io_cleanup(node);
852 			ocs_assert(ocs_els_io_list_empty(node, &node->els_io_pend_list), NULL);
853 
854 			node_printf(node, "Shutdown reason: default, purge pending\n");
855 			ocs_node_purge_pending(node);
856 			break;
857 		}
858 
859 		break;
860 	}
861 	case OCS_EVT_EXIT:
862 		ocs_node_accept_frames(node);
863 		break;
864 
865 	default:
866 		__ocs_node_common(__func__, ctx, evt, arg);
867 		return NULL;
868 	}
869 
870 	return NULL;
871 }
872 
873 /**
874  * @ingroup common_node
875  * @brief Checks to see if ELS's have been quiesced
876  *
877  * Check if ELS's have been quiesced. If so, transition to the
878  * next state in the shutdown process.
879  *
880  * @param node Node for which ELS's are checked
881  *
882  * @return Returns 1 if ELS's have been quiesced, 0 otherwise.
883  */
884 static int
885 ocs_node_check_els_quiesced(ocs_node_t *node)
886 {
887 	ocs_assert(node, -1);
888 
889 	/* check to see if ELS requests, completions are quiesced */
890 	if ((node->els_req_cnt == 0) && (node->els_cmpl_cnt == 0) &&
891 	    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
892 		if (!node->attached) {
893 			/* hw node detach already completed, proceed */
894 			node_printf(node, "HW node not attached\n");
895 			ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
896 		} else {
897 			/* hw node detach hasn't completed, transition and wait */
898 			node_printf(node, "HW node still attached\n");
899 			ocs_node_transition(node, __ocs_node_wait_node_free, NULL);
900 		}
901 		return 1;
902 	}
903 	return 0;
904 }
905 
906 /**
907  * @ingroup common_node
908  * @brief Initiate node IO cleanup.
909  *
910  * Note: this function must be called with a non-attached node
911  * or a node for which the node detach (ocs_hw_node_detach())
912  * has already been initiated.
913  *
914  * @param node Node for which shutdown is initiated
915  *
916  * @return Returns None.
917  */
918 
919 void
920 ocs_node_initiate_cleanup(ocs_node_t *node)
921 {
922 	ocs_io_t *els;
923 	ocs_io_t *els_next;
924 	ocs_t *ocs;
925 	ocs_assert(node);
926 	ocs = node->ocs;
927 
928 	/* first cleanup ELS's that are pending (not yet active) */
929 	ocs_lock(&node->active_ios_lock);
930 		ocs_list_foreach_safe(&node->els_io_pend_list, els, els_next) {
931 			/* skip the ELS IO for which a response will be sent after shutdown */
932 			if ((node->send_ls_acc != OCS_NODE_SEND_LS_ACC_NONE) &&
933 			    (els == node->ls_acc_io)) {
934 				continue;
935 			}
936 			/* can't call ocs_els_io_free() because lock is held; cleanup manually */
937                         node_printf(node, "Freeing pending els %s\n", els->display_name);
938 			ocs_list_remove(&node->els_io_pend_list, els);
939 
940 			ocs_io_free(node->ocs, els);
941 		}
942 	ocs_unlock(&node->active_ios_lock);
943 
944 	if (node->ls_acc_io && node->ls_acc_io->hio != NULL) {
945 		/*
946 		 * if there's an IO that will result in an LS_ACC after
947 		 * shutdown and its HW IO is non-NULL, it better be an
948 		 * implicit logout in vanilla sequence coalescing. In this
949 		 * case, force the LS_ACC to go out on another XRI (hio)
950 		 * since the previous will have been aborted by the UNREG_RPI
951 		 */
952 		ocs_assert(node->shutdown_reason == OCS_NODE_SHUTDOWN_IMPLICIT_LOGO);
953 		ocs_assert(node->send_ls_acc == OCS_NODE_SEND_LS_ACC_PLOGI);
954 		node_printf(node, "invalidating ls_acc_io due to implicit logo\n");
955 
956 		/* No need to abort because the unreg_rpi takes care of it, just free */
957 		ocs_hw_io_free(&ocs->hw, node->ls_acc_io->hio);
958 
959 		/* NULL out hio to force the LS_ACC to grab a new XRI */
960 		node->ls_acc_io->hio = NULL;
961 	}
962 
963 	/*
964 	 * if ELS's have already been quiesced, will move to next state
965 	 * if ELS's have not been quiesced, abort them
966 	 */
967 	if (ocs_node_check_els_quiesced(node) == 0) {
968 		/*
969 		 * Abort all ELS's since ELS's won't be aborted by HW
970 		 * node free.
971 		 */
972 		ocs_node_abort_all_els(node);
973 		ocs_node_transition(node, __ocs_node_wait_els_shutdown, NULL);
974 	}
975 }
976 
977 /**
978  * @ingroup node_common
979  * @brief Node state machine: Wait for all ELSs to complete.
980  *
981  * <h3 class="desc">Description</h3>
982  * State waits for all ELSs to complete after aborting all
983  * outstanding .
984  *
985  * @param ctx Remote node state machine context.
986  * @param evt Event to process.
987  * @param arg Per event optional argument.
988  *
989  * @return Returns NULL.
990  */
991 
992 void *
993 __ocs_node_wait_els_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
994 {
995 	uint8_t check_quiesce = FALSE;
996 	std_node_state_decl();
997 
998 	node_sm_trace();
999 
1000 	switch(evt) {
1001 	case OCS_EVT_ENTER: {
1002 		ocs_node_hold_frames(node);
1003 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1004 			node_printf(node, "All ELS IOs complete\n");
1005 			check_quiesce = TRUE;
1006 		}
1007 		break;
1008 	}
1009 	case OCS_EVT_EXIT:
1010 		ocs_node_accept_frames(node);
1011 		break;
1012 
1013 	case OCS_EVT_SRRS_ELS_REQ_OK:
1014 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1015 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1016 	case OCS_EVT_ELS_REQ_ABORTED:
1017 		ocs_assert(node->els_req_cnt, NULL);
1018 		node->els_req_cnt--;
1019 		check_quiesce = TRUE;
1020 		break;
1021 
1022 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1023 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1024 		ocs_assert(node->els_cmpl_cnt, NULL);
1025 		node->els_cmpl_cnt--;
1026 		check_quiesce = TRUE;
1027 		break;
1028 
1029 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1030 		/* all ELS IO's complete */
1031 		node_printf(node, "All ELS IOs complete\n");
1032 		ocs_assert(ocs_els_io_list_empty(node, &node->els_io_active_list), NULL);
1033 		check_quiesce = TRUE;
1034 		break;
1035 
1036 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1037 		break;
1038 
1039 	case OCS_EVT_DOMAIN_ATTACH_OK:
1040 		/* don't care about domain_attach_ok */
1041 		break;
1042 
1043 	/* ignore shutdown events as we're already in shutdown path */
1044 	case OCS_EVT_SHUTDOWN:
1045 		/* have default shutdown event take precedence */
1046 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1047 		/* fall through */
1048 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1049 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1050 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1051 		break;
1052 
1053 	default:
1054 		__ocs_node_common(__func__, ctx, evt, arg);
1055 		return NULL;
1056 	}
1057 
1058 	if (check_quiesce) {
1059 		ocs_node_check_els_quiesced(node);
1060 	}
1061 	return NULL;
1062 }
1063 
1064 /**
1065  * @ingroup node_command
1066  * @brief Node state machine: Wait for a HW node free event to
1067  * complete.
1068  *
1069  * <h3 class="desc">Description</h3>
1070  * State waits for the node free event to be received from the HW.
1071  *
1072  * @param ctx Remote node state machine context.
1073  * @param evt Event to process.
1074  * @param arg Per event optional argument.
1075  *
1076  * @return Returns NULL.
1077  */
1078 
1079 void *
1080 __ocs_node_wait_node_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1081 {
1082 	std_node_state_decl();
1083 
1084 	node_sm_trace();
1085 
1086 	switch(evt) {
1087 	case OCS_EVT_ENTER:
1088 		ocs_node_hold_frames(node);
1089 		break;
1090 
1091 	case OCS_EVT_EXIT:
1092 		ocs_node_accept_frames(node);
1093 		break;
1094 
1095 	case OCS_EVT_NODE_FREE_OK:
1096 		/* node is officially no longer attached */
1097 		node->attached = FALSE;
1098 		ocs_node_transition(node, __ocs_node_wait_ios_shutdown, NULL);
1099 		break;
1100 
1101 	case OCS_EVT_ALL_CHILD_NODES_FREE:
1102 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1103 		/* As IOs and ELS IO's complete we expect to get these events */
1104 		break;
1105 
1106 	case OCS_EVT_DOMAIN_ATTACH_OK:
1107 		/* don't care about domain_attach_ok */
1108 		break;
1109 
1110 	/* ignore shutdown events as we're already in shutdown path */
1111 	case OCS_EVT_SHUTDOWN:
1112 		/* have default shutdown event take precedence */
1113 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1114 		/* Fall through */
1115 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1116 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1117 		node_printf(node, "%s received\n", ocs_sm_event_name(evt));
1118 		break;
1119 	default:
1120 		__ocs_node_common(__func__, ctx, evt, arg);
1121 		return NULL;
1122 	}
1123 
1124 	return NULL;
1125 }
1126 
1127 /**
1128  * @ingroup node_common
1129  * @brief state: initiate node shutdown
1130  *
1131  * State is entered when a node receives a shutdown event, and it's waiting
1132  * for all the active IOs and ELS IOs associated with the node to complete.
1133  *
1134  * @param ctx remote node sm context
1135  * @param evt event to process
1136  * @param arg per event optional argument
1137  *
1138  * @return returns NULL
1139  */
1140 
1141 void *
1142 __ocs_node_wait_ios_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1143 {
1144 	ocs_io_t *io;
1145 	ocs_io_t *next;
1146 	std_node_state_decl();
1147 
1148 	node_sm_trace();
1149 
1150 	switch(evt) {
1151 	case OCS_EVT_ENTER:
1152 		ocs_node_hold_frames(node);
1153 
1154 		/* first check to see if no ELS IOs are outstanding */
1155 		if (ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1156 			/* If there are any active IOS, Free them. */
1157 			if (!ocs_node_active_ios_empty(node)) {
1158 				ocs_lock(&node->active_ios_lock);
1159 				ocs_list_foreach_safe(&node->active_ios, io, next) {
1160 					ocs_list_remove(&io->node->active_ios, io);
1161 					ocs_io_free(node->ocs, io);
1162 				}
1163 				ocs_unlock(&node->active_ios_lock);
1164 			}
1165 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1166 		}
1167 		break;
1168 
1169 	case OCS_EVT_NODE_ACTIVE_IO_LIST_EMPTY:
1170 	case OCS_EVT_ALL_CHILD_NODES_FREE: {
1171 		if (ocs_node_active_ios_empty(node) &&
1172 		    ocs_els_io_list_empty(node, &node->els_io_active_list)) {
1173 			ocs_node_transition(node, __ocs_node_shutdown, NULL);
1174 		}
1175 		break;
1176 	}
1177 
1178 	case OCS_EVT_EXIT:
1179 		ocs_node_accept_frames(node);
1180 		break;
1181 
1182 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1183 		/* Can happen as ELS IO IO's complete */
1184 		ocs_assert(node->els_req_cnt, NULL);
1185 		node->els_req_cnt--;
1186 		break;
1187 
1188 	/* ignore shutdown events as we're already in shutdown path */
1189 	case OCS_EVT_SHUTDOWN:
1190 		/* have default shutdown event take precedence */
1191 		node->shutdown_reason = OCS_NODE_SHUTDOWN_DEFAULT;
1192 		/* fall through */
1193 	case OCS_EVT_SHUTDOWN_EXPLICIT_LOGO:
1194 	case OCS_EVT_SHUTDOWN_IMPLICIT_LOGO:
1195 		ocs_log_debug(ocs, "[%s] %-20s\n", node->display_name, ocs_sm_event_name(evt));
1196 		break;
1197 	case OCS_EVT_DOMAIN_ATTACH_OK:
1198 		/* don't care about domain_attach_ok */
1199 		break;
1200 	default:
1201 		__ocs_node_common(__func__, ctx, evt, arg);
1202 		return NULL;
1203 	}
1204 
1205 	return NULL;
1206 }
1207 
1208 /**
1209  * @ingroup node_common
1210  * @brief state: common node event handler
1211  *
1212  * Handle common/shared node events
1213  *
1214  * @param funcname calling function's name
1215  * @param ctx remote node sm context
1216  * @param evt event to process
1217  * @param arg per event optional argument
1218  *
1219  * @return returns NULL
1220  */
1221 
1222 void *
1223 __ocs_node_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
1224 {
1225 	ocs_node_t *node = NULL;
1226 	ocs_t *ocs = NULL;
1227 	ocs_node_cb_t *cbdata = arg;
1228 	ocs_assert(ctx, NULL);
1229 	ocs_assert(ctx->app, NULL);
1230 	node = ctx->app;
1231 	ocs_assert(node->ocs, NULL);
1232 	ocs = node->ocs;
1233 
1234 	switch(evt) {
1235 	case OCS_EVT_ENTER:
1236 	case OCS_EVT_REENTER:
1237 	case OCS_EVT_EXIT:
1238 	case OCS_EVT_SPORT_TOPOLOGY_NOTIFY:
1239 	case OCS_EVT_NODE_MISSING:
1240 	case OCS_EVT_FCP_CMD_RCVD:
1241 		break;
1242 
1243 	case OCS_EVT_NODE_REFOUND:
1244 		node->refound = 1;
1245 		break;
1246 
1247 	/* node->attached must be set appropriately for all node attach/detach events */
1248 	case OCS_EVT_NODE_ATTACH_OK:
1249 		node->attached = TRUE;
1250 		break;
1251 
1252 	case OCS_EVT_NODE_FREE_OK:
1253 	case OCS_EVT_NODE_ATTACH_FAIL:
1254 		node->attached = FALSE;
1255 		break;
1256 
1257 	/* handle any ELS completions that other states either didn't care about
1258 	 * or forgot about
1259 	 */
1260 	case OCS_EVT_SRRS_ELS_CMPL_OK:
1261 	case OCS_EVT_SRRS_ELS_CMPL_FAIL:
1262 		ocs_assert(node->els_cmpl_cnt, NULL);
1263 		node->els_cmpl_cnt--;
1264 		break;
1265 
1266 	/* handle any ELS request completions that other states either didn't care about
1267 	 * or forgot about
1268 	 */
1269 	case OCS_EVT_SRRS_ELS_REQ_OK:
1270 	case OCS_EVT_SRRS_ELS_REQ_FAIL:
1271 	case OCS_EVT_SRRS_ELS_REQ_RJT:
1272 	case OCS_EVT_ELS_REQ_ABORTED:
1273 		ocs_assert(node->els_req_cnt, NULL);
1274 		node->els_req_cnt--;
1275 		break;
1276 
1277 	case OCS_EVT_ELS_RCVD: {
1278 		fc_header_t *hdr = cbdata->header->dma.virt;
1279 
1280 		/* Unsupported ELS was received, send LS_RJT, command not supported */
1281 		ocs_log_debug(ocs, "[%s] (%s) ELS x%02x, LS_RJT not supported\n",
1282 			      node->display_name, funcname, ((uint8_t*)cbdata->payload->dma.virt)[0]);
1283 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1284 			FC_REASON_COMMAND_NOT_SUPPORTED, FC_EXPL_NO_ADDITIONAL, 0,
1285 			NULL, NULL);
1286 		break;
1287 	}
1288 
1289 	case OCS_EVT_PLOGI_RCVD:
1290 	case OCS_EVT_FLOGI_RCVD:
1291 	case OCS_EVT_LOGO_RCVD:
1292 	case OCS_EVT_PRLI_RCVD:
1293 	case OCS_EVT_PRLO_RCVD:
1294 	case OCS_EVT_PDISC_RCVD:
1295 	case OCS_EVT_FDISC_RCVD:
1296 	case OCS_EVT_ADISC_RCVD:
1297 	case OCS_EVT_RSCN_RCVD:
1298 	case OCS_EVT_SCR_RCVD: {
1299 		fc_header_t *hdr = cbdata->header->dma.virt;
1300 		/* sm: / send ELS_RJT */
1301 		ocs_log_debug(ocs, "[%s] (%s) %s sending ELS_RJT\n",
1302 			      node->display_name, funcname, ocs_sm_event_name(evt));
1303 		/* if we didn't catch this in a state, send generic LS_RJT */
1304 		ocs_send_ls_rjt(cbdata->io, ocs_be16toh(hdr->ox_id),
1305 			FC_REASON_UNABLE_TO_PERFORM, FC_EXPL_NO_ADDITIONAL, 0,
1306 			NULL, NULL);
1307 
1308 		break;
1309 	}
1310 	case OCS_EVT_GID_PT_RCVD:
1311 	case OCS_EVT_RFT_ID_RCVD:
1312 	case OCS_EVT_RFF_ID_RCVD: {
1313 		fc_header_t *hdr = cbdata->header->dma.virt;
1314 		ocs_log_debug(ocs, "[%s] (%s) %s sending CT_REJECT\n",
1315 			      node->display_name, funcname, ocs_sm_event_name(evt));
1316 		ocs_send_ct_rsp(cbdata->io, hdr->ox_id, cbdata->payload->dma.virt, FCCT_HDR_CMDRSP_REJECT, FCCT_COMMAND_NOT_SUPPORTED, 0);
1317 		break;
1318 	}
1319 
1320 	case OCS_EVT_ABTS_RCVD: {
1321 		fc_header_t *hdr = cbdata->header->dma.virt;
1322 		ocs_log_debug(ocs, "[%s] (%s) %s sending BA_ACC\n",
1323 			      node->display_name, funcname, ocs_sm_event_name(evt));
1324 
1325 		/* sm: send BA_ACC */
1326 		ocs_bls_send_acc_hdr(cbdata->io, hdr);
1327 		break;
1328 	}
1329 
1330 	default:
1331 		ocs_log_test(node->ocs, "[%s] %-20s %-20s not handled\n", node->display_name, funcname,
1332 			ocs_sm_event_name(evt));
1333 		break;
1334 	}
1335 	return NULL;
1336 }
1337 
1338 /**
1339  * @ingroup node_common
1340  * @brief save node service parameters
1341  *
1342  * Service parameters are copyed into the node structure
1343  *
1344  * @param node pointer to node structure
1345  * @param payload pointer to service parameters to save
1346  *
1347  * @return none
1348  */
1349 
1350 void
1351 ocs_node_save_sparms(ocs_node_t *node, void *payload)
1352 {
1353 	ocs_memcpy(node->service_params, payload, sizeof(node->service_params));
1354 }
1355 
1356 /**
1357  * @ingroup node_common
1358  * @brief Post event to node state machine context
1359  *
1360  * This is used by the node state machine code to post events to the nodes.  Upon
1361  * completion of the event posting, if the nesting depth is zero and we're not holding
1362  * inbound frames, then the pending frames are processed.
1363  *
1364  * @param node pointer to node
1365  * @param evt event to post
1366  * @param arg event posting argument
1367  *
1368  * @return none
1369  */
1370 
1371 void
1372 ocs_node_post_event(ocs_node_t *node, ocs_sm_event_t evt, void *arg)
1373 {
1374 	int free_node = FALSE;
1375 	ocs_assert(node);
1376 
1377 	ocs_node_lock(node);
1378 		node->evtdepth ++;
1379 
1380 		ocs_sm_post_event(&node->sm, evt, arg);
1381 
1382 		/* If our event call depth is one and we're not holding frames
1383 		 * then we can dispatch any pending frames.   We don't want to allow
1384 		 * the ocs_process_node_pending() call to recurse.
1385 		 */
1386 		if (!node->hold_frames && (node->evtdepth == 1)) {
1387 			ocs_process_node_pending(node);
1388 		}
1389 		node->evtdepth --;
1390 
1391 		/* Free the node object if so requested, and we're at an event
1392 		 * call depth of zero
1393 		 */
1394 		if ((node->evtdepth == 0) && node->req_free) {
1395 			free_node = TRUE;
1396 		}
1397 	ocs_node_unlock(node);
1398 
1399 	if (free_node) {
1400 		ocs_node_free(node);
1401 	}
1402 
1403 	return;
1404 }
1405 
1406 /**
1407  * @ingroup node_common
1408  * @brief transition state of a node
1409  *
1410  * The node's state is transitioned to the requested state.  Entry/Exit
1411  * events are posted as needed.
1412  *
1413  * @param node pointer to node
1414  * @param state state to transition to
1415  * @param data transition data
1416  *
1417  * @return none
1418  */
1419 
1420 void
1421 ocs_node_transition(ocs_node_t *node, ocs_sm_function_t state, void *data)
1422 {
1423 	ocs_sm_ctx_t *ctx = &node->sm;
1424 
1425 	ocs_node_lock(node);
1426 		if (ctx->current_state == state) {
1427 			ocs_node_post_event(node, OCS_EVT_REENTER, data);
1428 		} else {
1429 			ocs_node_post_event(node, OCS_EVT_EXIT, data);
1430 			ctx->current_state = state;
1431 			ocs_node_post_event(node, OCS_EVT_ENTER, data);
1432 		}
1433 	ocs_node_unlock(node);
1434 }
1435 
1436 /**
1437  * @ingroup node_common
1438  * @brief build EUI formatted WWN
1439  *
1440  * Build a WWN given the somewhat transport agnostic iScsi naming specification, for FC
1441  * use the eui. format, an ascii string such as: "eui.10000000C9A19501"
1442  *
1443  * @param buffer buffer to place formatted name into
1444  * @param buffer_len length in bytes of the buffer
1445  * @param eui_name cpu endian 64 bit WWN value
1446  *
1447  * @return none
1448  */
1449 
1450 void
1451 ocs_node_build_eui_name(char *buffer, uint32_t buffer_len, uint64_t eui_name)
1452 {
1453 	ocs_memset(buffer, 0, buffer_len);
1454 
1455 	ocs_snprintf(buffer, buffer_len, "eui.%016llx", (unsigned long long)eui_name);
1456 }
1457 
1458 /**
1459  * @ingroup node_common
1460  * @brief return nodes' WWPN as a uint64_t
1461  *
1462  * The WWPN is computed from service parameters and returned as a uint64_t
1463  *
1464  * @param node pointer to node structure
1465  *
1466  * @return WWPN
1467  *
1468  */
1469 
1470 uint64_t
1471 ocs_node_get_wwpn(ocs_node_t *node)
1472 {
1473 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1474 
1475 	return (((uint64_t)ocs_be32toh(sp->port_name_hi) << 32ll) | (ocs_be32toh(sp->port_name_lo)));
1476 }
1477 
1478 /**
1479  * @ingroup node_common
1480  * @brief return nodes' WWNN as a uint64_t
1481  *
1482  * The WWNN is computed from service parameters and returned as a uint64_t
1483  *
1484  * @param node pointer to node structure
1485  *
1486  * @return WWNN
1487  *
1488  */
1489 
1490 uint64_t
1491 ocs_node_get_wwnn(ocs_node_t *node)
1492 {
1493 	fc_plogi_payload_t *sp = (fc_plogi_payload_t*) node->service_params;
1494 
1495 	return (((uint64_t)ocs_be32toh(sp->node_name_hi) << 32ll) | (ocs_be32toh(sp->node_name_lo)));
1496 }
1497 
1498 /**
1499  * @brief Generate node ddump data
1500  *
1501  * Generates the node ddumpdata
1502  *
1503  * @param textbuf pointer to text buffer
1504  * @param node pointer to node context
1505  *
1506  * @return Returns 0 on success, or a negative value on failure.
1507  */
1508 
1509 int
1510 ocs_ddump_node(ocs_textbuf_t *textbuf, ocs_node_t *node)
1511 {
1512 	ocs_io_t *io;
1513 	ocs_io_t *els;
1514 	int retval = 0;
1515 
1516 	ocs_ddump_section(textbuf, "node", node->instance_index);
1517 	ocs_ddump_value(textbuf, "display_name", "%s", node->display_name);
1518 	ocs_ddump_value(textbuf, "current_state", "%s", node->current_state_name);
1519 	ocs_ddump_value(textbuf, "prev_state", "%s", node->prev_state_name);
1520 	ocs_ddump_value(textbuf, "current_evt", "%s", ocs_sm_event_name(node->current_evt));
1521 	ocs_ddump_value(textbuf, "prev_evt", "%s", ocs_sm_event_name(node->prev_evt));
1522 
1523 	ocs_ddump_value(textbuf, "indicator", "%#x", node->rnode.indicator);
1524 	ocs_ddump_value(textbuf, "fc_id", "%#06x", node->rnode.fc_id);
1525 	ocs_ddump_value(textbuf, "attached", "%d", node->rnode.attached);
1526 
1527 	ocs_ddump_value(textbuf, "hold_frames", "%d", node->hold_frames);
1528 	ocs_ddump_value(textbuf, "io_alloc_enabled", "%d", node->io_alloc_enabled);
1529 	ocs_ddump_value(textbuf, "shutdown_reason", "%d", node->shutdown_reason);
1530 	ocs_ddump_value(textbuf, "send_ls_acc", "%d", node->send_ls_acc);
1531 	ocs_ddump_value(textbuf, "ls_acc_did", "%d", node->ls_acc_did);
1532 	ocs_ddump_value(textbuf, "ls_acc_oxid", "%#04x", node->ls_acc_oxid);
1533 	ocs_ddump_value(textbuf, "req_free", "%d", node->req_free);
1534 	ocs_ddump_value(textbuf, "els_req_cnt", "%d", node->els_req_cnt);
1535 	ocs_ddump_value(textbuf, "els_cmpl_cnt", "%d", node->els_cmpl_cnt);
1536 
1537 	ocs_ddump_value(textbuf, "targ", "%d", node->targ);
1538 	ocs_ddump_value(textbuf, "init", "%d", node->init);
1539 	ocs_ddump_value(textbuf, "wwnn", "%s", node->wwnn);
1540 	ocs_ddump_value(textbuf, "wwpn", "%s", node->wwpn);
1541 	ocs_ddump_value(textbuf, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1542 	ocs_ddump_value(textbuf, "chained_io_count", "%d", node->chained_io_count);
1543 	ocs_ddump_value(textbuf, "abort_cnt", "%d", node->abort_cnt);
1544 
1545 	ocs_display_sparams(NULL, "node_sparams", 1, textbuf, node->service_params+4);
1546 
1547 	ocs_lock(&node->pend_frames_lock);
1548 		if (!ocs_list_empty(&node->pend_frames)) {
1549 			ocs_hw_sequence_t *frame;
1550 			ocs_ddump_section(textbuf, "pending_frames", 0);
1551 			ocs_list_foreach(&node->pend_frames, frame) {
1552 				fc_header_t *hdr;
1553 				char buf[128];
1554 
1555 				hdr = frame->header->dma.virt;
1556 				ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu",
1557 				 hdr->r_ctl, ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1558 				 frame->payload->dma.len);
1559 				ocs_ddump_value(textbuf, "frame", "%s", buf);
1560 			}
1561 			ocs_ddump_endsection(textbuf, "pending_frames", 0);
1562 		}
1563 	ocs_unlock(&node->pend_frames_lock);
1564 
1565 	ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1566 	ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_NODE, node);
1567 
1568 	ocs_lock(&node->active_ios_lock);
1569 		ocs_ddump_section(textbuf, "active_ios", 0);
1570 		ocs_list_foreach(&node->active_ios, io) {
1571 			ocs_ddump_io(textbuf, io);
1572 		}
1573 		ocs_ddump_endsection(textbuf, "active_ios", 0);
1574 
1575 		ocs_ddump_section(textbuf, "els_io_pend_list", 0);
1576 		ocs_list_foreach(&node->els_io_pend_list, els) {
1577 			ocs_ddump_els(textbuf, els);
1578 		}
1579 		ocs_ddump_endsection(textbuf, "els_io_pend_list", 0);
1580 
1581 		ocs_ddump_section(textbuf, "els_io_active_list", 0);
1582 		ocs_list_foreach(&node->els_io_active_list, els) {
1583 			ocs_ddump_els(textbuf, els);
1584 		}
1585 		ocs_ddump_endsection(textbuf, "els_io_active_list", 0);
1586 	ocs_unlock(&node->active_ios_lock);
1587 
1588 	ocs_ddump_endsection(textbuf, "node", node->instance_index);
1589 
1590 	return retval;
1591 }
1592 
1593 /**
1594  * @brief check ELS request completion
1595  *
1596  * Check ELS request completion event to make sure it's for the
1597  * ELS request we expect. If not, invoke given common event
1598  * handler and return an error.
1599  *
1600  * @param ctx state machine context
1601  * @param evt ELS request event
1602  * @param arg event argument
1603  * @param cmd ELS command expected
1604  * @param node_common_func common event handler to call if ELS
1605  *      		   doesn't match
1606  * @param funcname function name that called this
1607  *
1608  * @return zero if ELS command matches, -1 otherwise
1609  */
1610 int32_t
1611 node_check_els_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint8_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1612 {
1613 	ocs_node_t *node = NULL;
1614 	ocs_t *ocs = NULL;
1615 	ocs_node_cb_t *cbdata = arg;
1616 	fc_els_gen_t *els_gen = NULL;
1617 	ocs_assert(ctx, -1);
1618 	node = ctx->app;
1619 	ocs_assert(node, -1);
1620 	ocs = node->ocs;
1621 	ocs_assert(ocs, -1);
1622 	cbdata = arg;
1623 	ocs_assert(cbdata, -1);
1624 	ocs_assert(cbdata->els, -1);
1625 	els_gen = (fc_els_gen_t *)cbdata->els->els_req.virt;
1626 	ocs_assert(els_gen, -1);
1627 
1628 	if ((cbdata->els->hio_type != OCS_HW_ELS_REQ) || (els_gen->command_code != cmd)) {
1629 		if (cbdata->els->hio_type != OCS_HW_ELS_REQ) {
1630 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received type=%d\n",
1631 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1632 		} else {
1633 			ocs_log_debug(node->ocs, "[%s] %-20s expecting ELS cmd=x%x received cmd=x%x\n",
1634 				node->display_name, funcname, cmd, els_gen->command_code);
1635 		}
1636 		/* send event to common handler */
1637 		node_common_func(funcname, ctx, evt, arg);
1638 		return -1;
1639 	}
1640 	return 0;
1641 }
1642 
1643 /**
1644  * @brief check NS request completion
1645  *
1646  * Check ELS request completion event to make sure it's for the
1647  * nameserver request we expect. If not, invoke given common
1648  * event handler and return an error.
1649  *
1650  * @param ctx state machine context
1651  * @param evt ELS request event
1652  * @param arg event argument
1653  * @param cmd nameserver command expected
1654  * @param node_common_func common event handler to call if
1655  *      		   nameserver cmd doesn't match
1656  * @param funcname function name that called this
1657  *
1658  * @return zero if NS command matches, -1 otherwise
1659  */
1660 int32_t
1661 node_check_ns_req(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg, uint32_t cmd, ocs_node_common_func_t node_common_func, const char *funcname)
1662 {
1663 	ocs_node_t *node = NULL;
1664 	ocs_t *ocs = NULL;
1665 	ocs_node_cb_t *cbdata = arg;
1666 	fcct_iu_header_t *fcct = NULL;
1667 	ocs_assert(ctx, -1);
1668 	node = ctx->app;
1669 	ocs_assert(node, -1);
1670 	ocs = node->ocs;
1671 	ocs_assert(ocs, -1);
1672 	cbdata = arg;
1673 	ocs_assert(cbdata, -1);
1674 	ocs_assert(cbdata->els, -1);
1675 	fcct = (fcct_iu_header_t *)cbdata->els->els_req.virt;
1676 	ocs_assert(fcct, -1);
1677 
1678 	if ((cbdata->els->hio_type != OCS_HW_FC_CT) || fcct->cmd_rsp_code != ocs_htobe16(cmd)) {
1679 		if (cbdata->els->hio_type != OCS_HW_FC_CT) {
1680 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received type=%d\n",
1681 				node->display_name, funcname, cmd, cbdata->els->hio_type);
1682 		} else {
1683 			ocs_log_debug(node->ocs, "[%s] %-20s expecting NS cmd=x%x received cmd=x%x\n",
1684 				node->display_name, funcname, cmd, fcct->cmd_rsp_code);
1685 		}
1686 		/* send event to common handler */
1687 		node_common_func(funcname, ctx, evt, arg);
1688 		return -1;
1689 	}
1690 	return 0;
1691 }
1692 
1693 void
1694 ocs_mgmt_node_list(ocs_textbuf_t *textbuf, void *object)
1695 {
1696 	ocs_io_t *io;
1697 	ocs_node_t *node = (ocs_node_t *)object;
1698 
1699 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1700 
1701 	/* Readonly values */
1702 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1703 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1704 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1705 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "attached");
1706 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "hold_frames");
1707 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "shutting_down");
1708 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "req_free");
1709 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id");
1710 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "ox_id_in_use");
1711 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "abort_cnt");
1712 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "targ");
1713 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "init");
1714 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1715 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1716 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "pend_frames");
1717 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "chained_io_count");
1718 
1719 	/* Actions */
1720 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1721 
1722 	ocs_lock(&node->active_ios_lock);
1723 	ocs_list_foreach(&node->active_ios, io) {
1724 		if ((io->mgmt_functions) && (io->mgmt_functions->get_list_handler)) {
1725 			io->mgmt_functions->get_list_handler(textbuf, io);
1726 		}
1727 	}
1728 	ocs_unlock(&node->active_ios_lock);
1729 
1730 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1731 }
1732 
1733 int
1734 ocs_mgmt_node_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1735 {
1736 	ocs_io_t *io;
1737 	ocs_node_t *node = (ocs_node_t *)object;
1738 	char qualifier[80];
1739 	int retval = -1;
1740 
1741 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1742 
1743 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1744 
1745 	/* If it doesn't start with my qualifier I don't know what to do with it */
1746 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1747 		char *unqualified_name = name + strlen(qualifier) +1;
1748 
1749 		/* See if it's a value I can supply */
1750 		if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1751 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1752 			retval = 0;
1753 		} else if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1754 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1755 			retval = 0;
1756 		} else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1757 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1758 			retval = 0;
1759 		} else if (ocs_strcmp(unqualified_name, "attached") == 0) {
1760 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1761 			retval = 0;
1762 		} else if (ocs_strcmp(unqualified_name, "hold_frames") == 0) {
1763 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1764 			retval = 0;
1765 		} else if (ocs_strcmp(unqualified_name, "io_alloc_enabled") == 0) {
1766 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1767 			retval = 0;
1768 		} else if (ocs_strcmp(unqualified_name, "req_free") == 0) {
1769 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1770 			retval = 0;
1771 		} else if (ocs_strcmp(unqualified_name, "ls_acc_oxid") == 0) {
1772 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1773 			retval = 0;
1774 		} else if (ocs_strcmp(unqualified_name, "ls_acc_did") == 0) {
1775 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1776 			retval = 0;
1777 		} else if (ocs_strcmp(unqualified_name, "abort_cnt") == 0) {
1778 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1779 			retval = 0;
1780 		} else if (ocs_strcmp(unqualified_name, "targ") == 0) {
1781 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1782 			retval = 0;
1783 		} else if (ocs_strcmp(unqualified_name, "init") == 0) {
1784 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1785 			retval = 0;
1786 		} else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1787 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1788 			retval = 0;
1789 		} else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1790 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1791 			retval = 0;
1792 		} else if (ocs_strcmp(unqualified_name, "current_state") == 0) {
1793 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1794 			retval = 0;
1795 		} else if (ocs_strcmp(unqualified_name, "login_state") == 0) {
1796 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1797 			retval = 0;
1798 		} else if (ocs_strcmp(unqualified_name, "pend_frames") == 0) {
1799 			ocs_hw_sequence_t *frame;
1800 			ocs_lock(&node->pend_frames_lock);
1801 				ocs_list_foreach(&node->pend_frames, frame) {
1802 					fc_header_t *hdr;
1803 					char buf[128];
1804 
1805 					hdr = frame->header->dma.virt;
1806 					ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1807 						 ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1808 						 frame->payload->dma.len);
1809 					ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1810 				}
1811 			ocs_unlock(&node->pend_frames_lock);
1812 			retval = 0;
1813 		} else if (ocs_strcmp(unqualified_name, "chained_io_count") == 0) {
1814 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1815 			retval = 0;
1816 		} else {
1817 			/* If I didn't know the value of this status pass the request to each of my children */
1818 			ocs_lock(&node->active_ios_lock);
1819 				ocs_list_foreach(&node->active_ios, io) {
1820 					if ((io->mgmt_functions) && (io->mgmt_functions->get_handler)) {
1821 						retval = io->mgmt_functions->get_handler(textbuf, qualifier, name, io);
1822 					}
1823 
1824 					if (retval == 0) {
1825 						break;
1826 					}
1827 				}
1828 			ocs_unlock(&node->active_ios_lock);
1829 		}
1830 	}
1831 
1832 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1833 
1834 	return retval;
1835 }
1836 
1837 void
1838 ocs_mgmt_node_get_all(ocs_textbuf_t *textbuf, void *object)
1839 {
1840 	ocs_io_t *io;
1841 	ocs_node_t *node = (ocs_node_t *)object;
1842 	ocs_hw_sequence_t *frame;
1843 
1844 	ocs_mgmt_start_section(textbuf, "node", node->instance_index);
1845 
1846 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", node->display_name);
1847 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", node->rnode.indicator);
1848 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", node->rnode.fc_id);
1849 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "attached", node->rnode.attached);
1850 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "hold_frames", node->hold_frames);
1851 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "io_alloc_enabled", node->io_alloc_enabled);
1852 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "req_free", node->req_free);
1853 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_oxid", "0x%#04x", node->ls_acc_oxid);
1854 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "ls_acc_did", "0x%#04x", node->ls_acc_did);
1855 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "abort_cnt", "%d", node->abort_cnt);
1856 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "targ",  node->targ);
1857 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "init",  node->init);
1858 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "%s", node->wwpn);
1859 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "%s", node->wwnn);
1860 
1861 	ocs_lock(&node->pend_frames_lock);
1862 	ocs_list_foreach(&node->pend_frames, frame) {
1863 		fc_header_t *hdr;
1864 		char buf[128];
1865 
1866 		hdr = frame->header->dma.virt;
1867 		ocs_snprintf(buf, sizeof(buf), "%02x/%04x/%04x len %zu", hdr->r_ctl,
1868 			     ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id),
1869 			     frame->payload->dma.len);
1870 		ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "pend_frames", buf);
1871 	}
1872 	ocs_unlock(&node->pend_frames_lock);
1873 
1874 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "chained_io_count", "%d", node->chained_io_count);
1875 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_EX, "resume");
1876 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "current_state", node->current_state_name);
1877 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "login_state", "%d", (node->sm.current_state == __ocs_d_device_ready) ? 1 : 0);
1878 
1879 	ocs_lock(&node->active_ios_lock);
1880 	ocs_list_foreach(&node->active_ios, io) {
1881 		if ((io->mgmt_functions) && (io->mgmt_functions->get_all_handler)) {
1882 			io->mgmt_functions->get_all_handler(textbuf,io);
1883 		}
1884 	}
1885 	ocs_unlock(&node->active_ios_lock);
1886 
1887 	ocs_mgmt_end_section(textbuf, "node", node->instance_index);
1888 }
1889 
1890 int
1891 ocs_mgmt_node_set(char *parent, char *name, char *value, void *object)
1892 {
1893 	ocs_io_t *io;
1894 	ocs_node_t *node = (ocs_node_t *)object;
1895 	char qualifier[80];
1896 	int retval = -1;
1897 
1898 	ocs_snprintf(qualifier, sizeof(qualifier), "%s/node[%d]", parent, node->instance_index);
1899 
1900 	/* If it doesn't start with my qualifier I don't know what to do with it */
1901 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1902 		ocs_lock(&node->active_ios_lock);
1903 		ocs_list_foreach(&node->active_ios, io) {
1904 			if ((io->mgmt_functions) && (io->mgmt_functions->set_handler)) {
1905 				retval = io->mgmt_functions->set_handler(qualifier, name, value, io);
1906 			}
1907 
1908 			if (retval == 0) {
1909 				break;
1910 			}
1911 		}
1912 		ocs_unlock(&node->active_ios_lock);
1913 	}
1914 
1915 	return retval;
1916 }
1917 
1918 int
1919 ocs_mgmt_node_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1920 		   void *arg_out, uint32_t arg_out_length, void *object)
1921 {
1922 	ocs_io_t *io;
1923 	ocs_node_t *node = (ocs_node_t *)object;
1924 	char qualifier[80];
1925 	int retval = -1;
1926 
1927 	ocs_snprintf(qualifier, sizeof(qualifier), "%s.node%d", parent, node->instance_index);
1928 
1929 	/* If it doesn't start with my qualifier I don't know what to do with it */
1930 	if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1931 		char *unqualified_name = action + strlen(qualifier) +1;
1932 
1933 		if (ocs_strcmp(unqualified_name, "resume") == 0) {
1934 			ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
1935 		}
1936 
1937 		{
1938 			/* If I didn't know how to do this action pass the request to each of my children */
1939 			ocs_lock(&node->active_ios_lock);
1940 				ocs_list_foreach(&node->active_ios, io) {
1941 					if ((io->mgmt_functions) && (io->mgmt_functions->exec_handler)) {
1942 						retval = io->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1943 							arg_out, arg_out_length, io);
1944 					}
1945 
1946 					if (retval == 0) {
1947 						break;
1948 					}
1949 				}
1950 			ocs_unlock(&node->active_ios_lock);
1951 		}
1952 	}
1953 
1954 	return retval;
1955 }
1956 
1957 /**
1958  * @brief Return TRUE if active ios list is empty
1959  *
1960  * Test if node->active_ios list is empty while holding the node->active_ios_lock.
1961  *
1962  * @param node pointer to node object
1963  *
1964  * @return TRUE if node active ios list is empty
1965  */
1966 
1967 int
1968 ocs_node_active_ios_empty(ocs_node_t *node)
1969 {
1970 	int empty;
1971 
1972 	ocs_lock(&node->active_ios_lock);
1973 		empty = ocs_list_empty(&node->active_ios);
1974 	ocs_unlock(&node->active_ios_lock);
1975 	return empty;
1976 }
1977 
1978 /**
1979  * @brief Pause a node
1980  *
1981  * The node is placed in the __ocs_node_paused state after saving the state
1982  * to return to
1983  *
1984  * @param node Pointer to node object
1985  * @param state State to resume to
1986  *
1987  * @return none
1988  */
1989 
1990 void
1991 ocs_node_pause(ocs_node_t *node, ocs_sm_function_t state)
1992 {
1993 	node->nodedb_state = state;
1994 	ocs_node_transition(node, __ocs_node_paused, NULL);
1995 }
1996 
1997 /**
1998  * @brief Paused node state
1999  *
2000  * This state is entered when a state is "paused". When resumed, the node
2001  * is transitioned to a previously saved state (node->ndoedb_state)
2002  *
2003  * @param ctx Remote node state machine context.
2004  * @param evt Event to process.
2005  * @param arg Per event optional argument.
2006  *
2007  * @return returns NULL
2008  */
2009 
2010 void *
2011 __ocs_node_paused(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
2012 {
2013 	std_node_state_decl();
2014 
2015 	node_sm_trace();
2016 
2017 	switch(evt) {
2018 	case OCS_EVT_ENTER:
2019 		node_printf(node, "Paused\n");
2020 		break;
2021 
2022 	case OCS_EVT_RESUME: {
2023 		ocs_sm_function_t pf = node->nodedb_state;
2024 
2025 		node->nodedb_state = NULL;
2026 		ocs_node_transition(node, pf, NULL);
2027 		break;
2028 	}
2029 
2030 	case OCS_EVT_DOMAIN_ATTACH_OK:
2031 		break;
2032 
2033 	case OCS_EVT_SHUTDOWN:
2034 		node->req_free = 1;
2035 		break;
2036 
2037 	default:
2038 		__ocs_node_common(__func__, ctx, evt, arg);
2039 		break;
2040 	}
2041 	return NULL;
2042 }
2043 
2044 /**
2045  * @brief Resume a paused state
2046  *
2047  * Posts a resume event to the paused node.
2048  *
2049  * @param node Pointer to node object
2050  *
2051  * @return returns 0 for success, a negative error code value for failure.
2052  */
2053 
2054 int32_t
2055 ocs_node_resume(ocs_node_t *node)
2056 {
2057 	ocs_assert(node != NULL, -1);
2058 
2059 	ocs_node_post_event(node, OCS_EVT_RESUME, NULL);
2060 
2061 	return 0;
2062 }
2063 
2064 /**
2065  * @ingroup node_common
2066  * @brief Dispatch a ELS frame.
2067  *
2068  * <h3 class="desc">Description</h3>
2069  * An ELS frame is dispatched to the \c node state machine.
2070  * RQ Pair mode: this function is always called with a NULL hw
2071  * io.
2072  *
2073  * @param node Node that originated the frame.
2074  * @param seq header/payload sequence buffers
2075  *
2076  * @return Returns 0 if frame processed and RX buffers cleaned
2077  * up appropriately, -1 if frame not handled and RX buffers need
2078  * to be returned.
2079  */
2080 
2081 int32_t
2082 ocs_node_recv_els_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2083 {
2084 	struct {
2085 		uint32_t cmd;
2086 		ocs_sm_event_t evt;
2087 		uint32_t payload_size;
2088 	} els_cmd_list[] = {
2089 		{FC_ELS_CMD_PLOGI,	OCS_EVT_PLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2090 		{FC_ELS_CMD_FLOGI,	OCS_EVT_FLOGI_RCVD, 	sizeof(fc_plogi_payload_t)},
2091 		{FC_ELS_CMD_LOGO,	OCS_EVT_LOGO_RCVD, 	sizeof(fc_acc_payload_t)},
2092 		{FC_ELS_CMD_RRQ,	OCS_EVT_RRQ_RCVD, 	sizeof(fc_acc_payload_t)},
2093 		{FC_ELS_CMD_PRLI, 	OCS_EVT_PRLI_RCVD, 	sizeof(fc_prli_payload_t)},
2094 		{FC_ELS_CMD_PRLO, 	OCS_EVT_PRLO_RCVD, 	sizeof(fc_prlo_payload_t)},
2095 		{FC_ELS_CMD_PDISC, 	OCS_EVT_PDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2096 		{FC_ELS_CMD_FDISC, 	OCS_EVT_FDISC_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2097 		{FC_ELS_CMD_ADISC, 	OCS_EVT_ADISC_RCVD, 	sizeof(fc_adisc_payload_t)},
2098 		{FC_ELS_CMD_RSCN, 	OCS_EVT_RSCN_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2099 		{FC_ELS_CMD_SCR	, 	OCS_EVT_SCR_RCVD, 	MAX_ACC_REJECT_PAYLOAD},
2100 	};
2101 	ocs_t *ocs = node->ocs;
2102 	ocs_node_cb_t cbdata;
2103 	fc_header_t *hdr = seq->header->dma.virt;
2104 	uint8_t *buf = seq->payload->dma.virt;
2105 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2106 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2107 	uint32_t i;
2108 
2109 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2110 	cbdata.header = seq->header;
2111 	cbdata.payload = seq->payload;
2112 
2113 	/* find a matching event for the ELS command */
2114 	for (i = 0; i < ARRAY_SIZE(els_cmd_list); i ++) {
2115 		if (els_cmd_list[i].cmd == buf[0]) {
2116 			evt = els_cmd_list[i].evt;
2117 			payload_size = els_cmd_list[i].payload_size;
2118 			break;
2119 		}
2120 	}
2121 
2122 	switch(evt) {
2123 	case OCS_EVT_FLOGI_RCVD:
2124 		ocs_display_sparams(node->display_name, "flogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2125 		break;
2126 	case OCS_EVT_FDISC_RCVD:
2127 		ocs_display_sparams(node->display_name, "fdisc rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2128 		break;
2129 	case OCS_EVT_PLOGI_RCVD:
2130 		ocs_display_sparams(node->display_name, "plogi rcvd req", 0, NULL, ((uint8_t*)seq->payload->dma.virt)+4);
2131 		break;
2132 	default:
2133 		break;
2134 	}
2135 
2136 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2137 
2138 	if (cbdata.io != NULL) {
2139 		cbdata.io->hw_priv = seq->hw_priv;
2140 		/* if we're here, sequence initiative has been transferred */
2141 		cbdata.io->seq_init = 1;
2142 
2143 		ocs_node_post_event(node, evt, &cbdata);
2144 	} else {
2145 		node_printf(node, "failure to allocate SCSI IO for ELS s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2146 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2147 	}
2148 	ocs_hw_sequence_free(&ocs->hw, seq);
2149 	return 0;
2150 }
2151 
2152 /**
2153  * @ingroup node_common
2154  * @brief Dispatch a ABTS frame (RQ Pair/sequence coalescing).
2155  *
2156  * <h3 class="desc">Description</h3>
2157  * An ABTS frame is dispatched to the node state machine. This
2158  * function is used for both RQ Pair and sequence coalescing.
2159  *
2160  * @param node Node that originated the frame.
2161  * @param seq Header/payload sequence buffers
2162  *
2163  * @return Returns 0 if frame processed and RX buffers cleaned
2164  * up appropriately, -1 if frame not handled and RX buffers need
2165  * to be returned.
2166  */
2167 
2168 int32_t
2169 ocs_node_recv_abts_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2170 {
2171 	ocs_t *ocs = node->ocs;
2172 	ocs_xport_t *xport = ocs->xport;
2173 	fc_header_t *hdr = seq->header->dma.virt;
2174 	uint16_t ox_id = ocs_be16toh(hdr->ox_id);
2175 	uint16_t rx_id = ocs_be16toh(hdr->rx_id);
2176 	ocs_node_cb_t cbdata;
2177 	int32_t rc = 0;
2178 
2179 	node->abort_cnt++;
2180 
2181 	/*
2182 	 * Check to see if the IO we want to abort is active, if it not active,
2183 	 * then we can send the BA_ACC using the send frame option
2184 	 */
2185 	if (ocs_io_find_tgt_io(ocs, node, ox_id, rx_id) == NULL) {
2186 		uint32_t send_frame_capable;
2187 
2188 		ocs_log_debug(ocs, "IO not found (ox_id %04x)\n", ox_id);
2189 
2190 		/* If we have SEND_FRAME capability, then use it to send BA_ACC */
2191 		rc = ocs_hw_get(&ocs->hw, OCS_HW_SEND_FRAME_CAPABLE, &send_frame_capable);
2192 		if ((rc == 0) && send_frame_capable) {
2193 			rc = ocs_sframe_send_bls_acc(node, seq);
2194 			if (rc) {
2195 				ocs_log_test(ocs, "ocs_bls_acc_send_frame failed\n");
2196 			}
2197 			return rc;
2198 		}
2199 		/* continuing */
2200 	}
2201 
2202 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2203 	cbdata.header = seq->header;
2204 	cbdata.payload = seq->payload;
2205 
2206 	cbdata.io = ocs_scsi_io_alloc(node, OCS_SCSI_IO_ROLE_RESPONDER);
2207 	if (cbdata.io != NULL) {
2208 		cbdata.io->hw_priv = seq->hw_priv;
2209 		/* If we got this far, SIT=1 */
2210 		cbdata.io->seq_init = 1;
2211 
2212 		/* fill out generic fields */
2213 		cbdata.io->ocs = ocs;
2214 		cbdata.io->node = node;
2215 		cbdata.io->cmd_tgt = TRUE;
2216 
2217 		ocs_node_post_event(node, OCS_EVT_ABTS_RCVD, &cbdata);
2218 	} else {
2219 		ocs_atomic_add_return(&xport->io_alloc_failed_count, 1);
2220 		node_printf(node, "SCSI IO allocation failed for ABTS received s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2221 			    fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id), ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2222 	}
2223 
2224 	/* ABTS processed, return RX buffer to the chip */
2225 	ocs_hw_sequence_free(&ocs->hw, seq);
2226 	return 0;
2227 }
2228 
2229 /**
2230  * @ingroup node_common
2231  * @brief Dispatch a CT frame.
2232  *
2233  * <h3 class="desc">Description</h3>
2234  * A CT frame is dispatched to the \c node state machine.
2235  * RQ Pair mode: this function is always called with a NULL hw
2236  * io.
2237  *
2238  * @param node Node that originated the frame.
2239  * @param seq header/payload sequence buffers
2240  *
2241  * @return Returns 0 if frame processed and RX buffers cleaned
2242  * up appropriately, -1 if frame not handled and RX buffers need
2243  * to be returned.
2244  */
2245 
2246 int32_t
2247 ocs_node_recv_ct_frame(ocs_node_t *node, ocs_hw_sequence_t *seq)
2248 {
2249 	ocs_t *ocs = node->ocs;
2250 	fc_header_t *hdr = seq->header->dma.virt;
2251 	fcct_iu_header_t *iu = seq->payload->dma.virt;
2252 	ocs_sm_event_t evt = OCS_EVT_ELS_RCVD;
2253 	uint32_t payload_size = MAX_ACC_REJECT_PAYLOAD;
2254 	uint16_t gscmd = ocs_be16toh(iu->cmd_rsp_code);
2255 	ocs_node_cb_t cbdata;
2256 	uint32_t i;
2257 	struct {
2258 		uint32_t cmd;
2259 		ocs_sm_event_t evt;
2260 		uint32_t payload_size;
2261 	} ct_cmd_list[] = {
2262 		{FC_GS_NAMESERVER_RFF_ID, OCS_EVT_RFF_ID_RCVD, 100},
2263 		{FC_GS_NAMESERVER_RFT_ID, OCS_EVT_RFT_ID_RCVD, 100},
2264 		{FC_GS_NAMESERVER_GNN_ID, OCS_EVT_GNN_ID_RCVD, 100},
2265 		{FC_GS_NAMESERVER_GPN_ID, OCS_EVT_GPN_ID_RCVD, 100},
2266 		{FC_GS_NAMESERVER_GFPN_ID, OCS_EVT_GFPN_ID_RCVD, 100},
2267 		{FC_GS_NAMESERVER_GFF_ID, OCS_EVT_GFF_ID_RCVD, 100},
2268 		{FC_GS_NAMESERVER_GID_FT, OCS_EVT_GID_FT_RCVD, 256},
2269 		{FC_GS_NAMESERVER_GID_PT, OCS_EVT_GID_PT_RCVD, 256},
2270 		{FC_GS_NAMESERVER_RPN_ID, OCS_EVT_RPN_ID_RCVD, 100},
2271 		{FC_GS_NAMESERVER_RNN_ID, OCS_EVT_RNN_ID_RCVD, 100},
2272 		{FC_GS_NAMESERVER_RCS_ID, OCS_EVT_RCS_ID_RCVD, 100},
2273 		{FC_GS_NAMESERVER_RSNN_NN, OCS_EVT_RSNN_NN_RCVD, 100},
2274 		{FC_GS_NAMESERVER_RSPN_ID, OCS_EVT_RSPN_ID_RCVD, 100},
2275 		{FC_GS_NAMESERVER_RHBA, OCS_EVT_RHBA_RCVD, 100},
2276 		{FC_GS_NAMESERVER_RPA, OCS_EVT_RPA_RCVD, 100},
2277 	};
2278 
2279 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2280 	cbdata.header = seq->header;
2281 	cbdata.payload = seq->payload;
2282 
2283 	/* find a matching event for the ELS/GS command */
2284 	for (i = 0; i < ARRAY_SIZE(ct_cmd_list); i ++) {
2285 		if (ct_cmd_list[i].cmd == gscmd) {
2286 			evt = ct_cmd_list[i].evt;
2287 			payload_size = ct_cmd_list[i].payload_size;
2288 			break;
2289 		}
2290 	}
2291 
2292 	/* Allocate an IO and send a reject */
2293 	cbdata.io = ocs_els_io_alloc(node, payload_size, OCS_ELS_ROLE_RESPONDER);
2294 	if (cbdata.io == NULL) {
2295 		node_printf(node, "GS IO failed for s_id %06x d_id %06x ox_id %04x rx_id %04x\n",
2296 			fc_be24toh(hdr->s_id), fc_be24toh(hdr->d_id),
2297 			ocs_be16toh(hdr->ox_id), ocs_be16toh(hdr->rx_id));
2298 		return -1;
2299 	}
2300 	cbdata.io->hw_priv = seq->hw_priv;
2301 	ocs_node_post_event(node, evt, &cbdata);
2302 
2303 	ocs_hw_sequence_free(&ocs->hw, seq);
2304 	return 0;
2305 }
2306 
2307 /**
2308  * @ingroup node_common
2309  * @brief Dispatch a FCP command frame when the node is not ready.
2310  *
2311  * <h3 class="desc">Description</h3>
2312  * A frame is dispatched to the \c node state machine.
2313  *
2314  * @param node Node that originated the frame.
2315  * @param seq header/payload sequence buffers
2316  *
2317  * @return Returns 0 if frame processed and RX buffers cleaned
2318  * up appropriately, -1 if frame not handled.
2319  */
2320 
2321 int32_t
2322 ocs_node_recv_fcp_cmd(ocs_node_t *node, ocs_hw_sequence_t *seq)
2323 {
2324 	ocs_node_cb_t cbdata;
2325 	ocs_t *ocs = node->ocs;
2326 
2327 	ocs_memset(&cbdata, 0, sizeof(cbdata));
2328 	cbdata.header = seq->header;
2329 	cbdata.payload = seq->payload;
2330 	ocs_node_post_event(node, OCS_EVT_FCP_CMD_RCVD, &cbdata);
2331 	ocs_hw_sequence_free(&ocs->hw, seq);
2332 	return 0;
2333 }
2334 
2335 /**
2336  * @ingroup node_common
2337  * @brief Stub handler for non-ABTS BLS frames
2338  *
2339  * <h3 class="desc">Description</h3>
2340  * Log message and drop. Customer can plumb it to their back-end as needed
2341  *
2342  * @param node Node that originated the frame.
2343  * @param seq header/payload sequence buffers
2344  *
2345  * @return Returns 0
2346  */
2347 
2348 int32_t
2349 ocs_node_recv_bls_no_sit(ocs_node_t *node, ocs_hw_sequence_t *seq)
2350 {
2351 	fc_header_t *hdr = seq->header->dma.virt;
2352 
2353 	node_printf(node, "Dropping frame hdr = %08x %08x %08x %08x %08x %08x\n",
2354 		    ocs_htobe32(((uint32_t *)hdr)[0]),
2355 		    ocs_htobe32(((uint32_t *)hdr)[1]),
2356 		    ocs_htobe32(((uint32_t *)hdr)[2]),
2357 		    ocs_htobe32(((uint32_t *)hdr)[3]),
2358 		    ocs_htobe32(((uint32_t *)hdr)[4]),
2359 		    ocs_htobe32(((uint32_t *)hdr)[5]));
2360 
2361 	return -1;
2362 }
2363