xref: /freebsd/sys/dev/ocs_fc/ocs_sport.c (revision f126890ac5386406dadf7c4cfa9566cbb56537c5)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /**
33  * @file
34  * Details SLI port (sport) functions.
35  */
36 
37 #include "ocs.h"
38 #include "ocs_fabric.h"
39 #include "ocs_els.h"
40 #include "ocs_device.h"
41 
42 static void ocs_vport_update_spec(ocs_sport_t *sport);
43 static void ocs_vport_link_down(ocs_sport_t *sport);
44 
45 void ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *sport);
46 void ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *sport);
47 int ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *sport);
48 int ocs_mgmt_sport_set(char *parent, char *name, char *value, void *sport);
49 int ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
50 		void *arg_out, uint32_t arg_out_length, void *sport);
51 static ocs_mgmt_functions_t sport_mgmt_functions = {
52 	.get_list_handler = ocs_mgmt_sport_list,
53 	.get_handler = ocs_mgmt_sport_get,
54 	.get_all_handler = ocs_mgmt_sport_get_all,
55 	.set_handler = ocs_mgmt_sport_set,
56 	.exec_handler = ocs_mgmt_sport_exec,
57 };
58 
59 /*!
60 @defgroup sport_sm SLI Port (sport) State Machine: States
61 */
62 
63 /**
64  * @ingroup sport_sm
65  * @brief SLI port HW callback.
66  *
67  * @par Description
68  * This function is called in response to a HW sport event. This code resolves
69  * the reference to the sport object, and posts the corresponding event.
70  *
71  * @param arg Pointer to the OCS context.
72  * @param event HW sport event.
73  * @param data Application-specific event (pointer to the sport).
74  *
75  * @return Returns 0 on success, or a negative error value on failure.
76  */
77 
78 int32_t
79 ocs_port_cb(void *arg, ocs_hw_port_event_e event, void *data)
80 {
81 	ocs_t *ocs = arg;
82 	ocs_sli_port_t *sport = data;
83 
84 	switch (event) {
85 	case OCS_HW_PORT_ALLOC_OK:
86 		ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_OK\n");
87 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_OK, NULL);
88 		break;
89 	case OCS_HW_PORT_ALLOC_FAIL:
90 		ocs_log_debug(ocs, "OCS_HW_PORT_ALLOC_FAIL\n");
91 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ALLOC_FAIL, NULL);
92 		break;
93 	case OCS_HW_PORT_ATTACH_OK:
94 		ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_OK\n");
95 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_OK, NULL);
96 		break;
97 	case OCS_HW_PORT_ATTACH_FAIL:
98 		ocs_log_debug(ocs, "OCS_HW_PORT_ATTACH_FAIL\n");
99 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_ATTACH_FAIL, NULL);
100 		break;
101 	case OCS_HW_PORT_FREE_OK:
102 		ocs_log_debug(ocs, "OCS_HW_PORT_FREE_OK\n");
103 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_OK, NULL);
104 		break;
105 	case OCS_HW_PORT_FREE_FAIL:
106 		ocs_log_debug(ocs, "OCS_HW_PORT_FREE_FAIL\n");
107 		ocs_sm_post_event(&sport->sm, OCS_EVT_SPORT_FREE_FAIL, NULL);
108 		break;
109 	default:
110 		ocs_log_test(ocs, "unknown event %#x\n", event);
111 	}
112 
113 	return 0;
114 }
115 
116 /**
117  * @ingroup sport_sm
118  * @brief Allocate a SLI port object.
119  *
120  * @par Description
121  * A sport object is allocated and associated with the domain. Various
122  * structure members are initialized.
123  *
124  * @param domain Pointer to the domain structure.
125  * @param wwpn World wide port name in host endian.
126  * @param wwnn World wide node name in host endian.
127  * @param fc_id Port ID of sport may be specified, use UINT32_MAX to fabric choose
128  * @param enable_ini Enables initiator capability on this port using a non-zero value.
129  * @param enable_tgt Enables target capability on this port using a non-zero value.
130  *
131  * @return Pointer to an ocs_sport_t object; or NULL.
132  */
133 
134 ocs_sport_t *
135 ocs_sport_alloc(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn, uint32_t fc_id, uint8_t enable_ini, uint8_t enable_tgt)
136 {
137 	ocs_sport_t *sport;
138 
139 	if (domain->ocs->ctrlmask & OCS_CTRLMASK_INHIBIT_INITIATOR) {
140 		enable_ini = 0;
141 	}
142 
143 	/* Return a failure if this sport has already been allocated */
144 	if (wwpn != 0) {
145 		sport = ocs_sport_find_wwn(domain, wwnn, wwpn);
146 		if (sport != NULL) {
147 			ocs_log_test(domain->ocs, "Failed: SPORT %016llx  %016llx already allocated\n",
148 				     (unsigned long long)wwnn, (unsigned long long)wwpn);
149 			return NULL;
150 		}
151 	}
152 
153 	sport = ocs_malloc(domain->ocs, sizeof(*sport), OCS_M_NOWAIT | OCS_M_ZERO);
154 	if (sport) {
155 		sport->ocs = domain->ocs;
156 		ocs_snprintf(sport->display_name, sizeof(sport->display_name), "------");
157 		sport->domain = domain;
158 		sport->lookup = spv_new(domain->ocs);
159 		sport->instance_index = domain->sport_instance_count++;
160 		ocs_sport_lock_init(sport);
161 		ocs_list_init(&sport->node_list, ocs_node_t, link);
162 		sport->sm.app = sport;
163 		sport->enable_ini = enable_ini;
164 		sport->enable_tgt = enable_tgt;
165 		sport->enable_rscn = (sport->enable_ini || (sport->enable_tgt && enable_target_rscn(sport->ocs)));
166 
167 		/* Copy service parameters from domain */
168 		ocs_memcpy(sport->service_params, domain->service_params, sizeof(fc_plogi_payload_t));
169 
170 		/* Update requested fc_id */
171 		sport->fc_id = fc_id;
172 
173 		/* Update the sport's service parameters for the new wwn's */
174 		sport->wwpn = wwpn;
175 		sport->wwnn = wwnn;
176 		ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx" , (unsigned long long)wwnn);
177 
178 		/* Initialize node group list */
179 		ocs_lock_init(sport->ocs, &sport->node_group_lock, "node_group_lock[%d]", sport->instance_index);
180 		ocs_list_init(&sport->node_group_dir_list, ocs_node_group_dir_t, link);
181 
182 		/* if this is the "first" sport of the domain, then make it the "phys" sport */
183 		ocs_domain_lock(domain);
184 			if (ocs_list_empty(&domain->sport_list)) {
185 				domain->sport = sport;
186 			}
187 
188 			ocs_list_add_tail(&domain->sport_list, sport);
189 		ocs_domain_unlock(domain);
190 
191 		sport->mgmt_functions = &sport_mgmt_functions;
192 
193 		ocs_log_debug(domain->ocs, "[%s] allocate sport\n", sport->display_name);
194 	}
195 	return sport;
196 }
197 
198 /**
199  * @ingroup sport_sm
200  * @brief Free a SLI port object.
201  *
202  * @par Description
203  * The sport object is freed.
204  *
205  * @param sport Pointer to the SLI port object.
206  *
207  * @return None.
208  */
209 
210 void
211 ocs_sport_free(ocs_sport_t *sport)
212 {
213 	ocs_domain_t *domain;
214 	ocs_node_group_dir_t *node_group_dir;
215 	ocs_node_group_dir_t *node_group_dir_next;
216 	int post_all_free = FALSE;
217 
218 	if (sport) {
219 		domain = sport->domain;
220 		ocs_log_debug(domain->ocs, "[%s] free sport\n", sport->display_name);
221 		ocs_domain_lock(domain);
222 			ocs_list_remove(&domain->sport_list, sport);
223 			ocs_sport_lock(sport);
224 				spv_del(sport->lookup);
225 				sport->lookup = NULL;
226 
227 				ocs_lock(&domain->lookup_lock);
228 					/* Remove the sport from the domain's sparse vector lookup table */
229 					spv_set(domain->lookup, sport->fc_id, NULL);
230 				ocs_unlock(&domain->lookup_lock);
231 
232 				/* if this is the physical sport, then clear it out of the domain */
233 				if (sport == domain->sport) {
234 					domain->sport = NULL;
235 				}
236 
237 				/*
238 				 * If the domain's sport_list is empty, then post the ALL_NODES_FREE event to the domain,
239 				 * after the lock is released. The domain may be free'd as a result of the event.
240 				 */
241 				if (ocs_list_empty(&domain->sport_list)) {
242 					post_all_free = TRUE;
243 				}
244 
245 				/* Free any node group directories */
246 				ocs_lock(&sport->node_group_lock);
247 					ocs_list_foreach_safe(&sport->node_group_dir_list, node_group_dir, node_group_dir_next) {
248 						ocs_unlock(&sport->node_group_lock);
249 							ocs_node_group_dir_free(node_group_dir);
250 						ocs_lock(&sport->node_group_lock);
251 					}
252 				ocs_unlock(&sport->node_group_lock);
253 			ocs_sport_unlock(sport);
254 		ocs_domain_unlock(domain);
255 
256 		if (post_all_free) {
257 			ocs_domain_post_event(domain, OCS_EVT_ALL_CHILD_NODES_FREE, NULL);
258 		}
259 
260 		ocs_sport_lock_free(sport);
261 		ocs_lock_free(&sport->node_group_lock);
262 		ocs_scsi_sport_deleted(sport);
263 
264 		ocs_free(sport->ocs, sport, sizeof(*sport));
265 
266 	}
267 }
268 
269 /**
270  * @ingroup sport_sm
271  * @brief Free memory resources of a SLI port object.
272  *
273  * @par Description
274  * After the sport object is freed, its child objects are freed.
275  *
276  * @param sport Pointer to the SLI port object.
277  *
278  * @return None.
279  */
280 
281 void ocs_sport_force_free(ocs_sport_t *sport)
282 {
283 	ocs_node_t *node;
284 	ocs_node_t *next;
285 
286 	/* shutdown sm processing */
287 	ocs_sm_disable(&sport->sm);
288 
289 	ocs_scsi_notify_sport_force_free(sport);
290 
291 	ocs_sport_lock(sport);
292 		ocs_list_foreach_safe(&sport->node_list, node, next) {
293 			ocs_node_force_free(node);
294 		}
295 	ocs_sport_unlock(sport);
296 	ocs_sport_free(sport);
297 }
298 
299 /**
300  * @ingroup sport_sm
301  * @brief Return a SLI port object, given an instance index.
302  *
303  * @par Description
304  * A pointer to a sport object is returned, given its instance @c index.
305  *
306  * @param domain Pointer to the domain.
307  * @param index Instance index value to find.
308  *
309  * @return Returns a pointer to the ocs_sport_t object; or NULL.
310  */
311 
312 ocs_sport_t *
313 ocs_sport_get_instance(ocs_domain_t *domain, uint32_t index)
314 {
315 	ocs_sport_t *sport;
316 
317 	ocs_domain_lock(domain);
318 		ocs_list_foreach(&domain->sport_list, sport) {
319 			if (sport->instance_index == index) {
320 				ocs_domain_unlock(domain);
321 				return sport;
322 			}
323 		}
324 	ocs_domain_unlock(domain);
325 	return NULL;
326 }
327 
328 /**
329  * @ingroup sport_sm
330  * @brief Find a SLI port object, given an FC_ID.
331  *
332  * @par Description
333  * Returns a pointer to the sport object, given an FC_ID.
334  *
335  * @param domain Pointer to the domain.
336  * @param d_id FC_ID to find.
337  *
338  * @return Returns a pointer to the ocs_sport_t; or NULL.
339  */
340 
341 ocs_sport_t *
342 ocs_sport_find(ocs_domain_t *domain, uint32_t d_id)
343 {
344 	ocs_sport_t *sport;
345 
346 	ocs_assert(domain, NULL);
347 	ocs_lock(&domain->lookup_lock);
348 		if (domain->lookup == NULL) {
349 			ocs_log_test(domain->ocs, "assertion failed: domain->lookup is not valid\n");
350 			ocs_unlock(&domain->lookup_lock);
351 			return NULL;
352 		}
353 
354 		sport = spv_get(domain->lookup, d_id);
355 	ocs_unlock(&domain->lookup_lock);
356 	return sport;
357 }
358 
359 /**
360  * @ingroup sport_sm
361  * @brief Find a SLI port, given the WWNN and WWPN.
362  *
363  * @par Description
364  * Return a pointer to a sport, given the WWNN and WWPN.
365  *
366  * @param domain Pointer to the domain.
367  * @param wwnn World wide node name.
368  * @param wwpn World wide port name.
369  *
370  * @return Returns a pointer to a SLI port, if found; or NULL.
371  */
372 
373 ocs_sport_t *
374 ocs_sport_find_wwn(ocs_domain_t *domain, uint64_t wwnn, uint64_t wwpn)
375 {
376 	ocs_sport_t *sport = NULL;
377 
378 	ocs_domain_lock(domain);
379 		ocs_list_foreach(&domain->sport_list, sport) {
380 			if ((sport->wwnn == wwnn) && (sport->wwpn == wwpn)) {
381 				ocs_domain_unlock(domain);
382 				return sport;
383 			}
384 		}
385 	ocs_domain_unlock(domain);
386 	return NULL;
387 }
388 
389 /**
390  * @ingroup sport_sm
391  * @brief Request a SLI port attach.
392  *
393  * @par Description
394  * External call to request an attach for a sport, given an FC_ID.
395  *
396  * @param sport Pointer to the sport context.
397  * @param fc_id FC_ID of which to attach.
398  *
399  * @return Returns 0 on success, or a negative error value on failure.
400  */
401 
402 int32_t
403 ocs_sport_attach(ocs_sport_t *sport, uint32_t fc_id)
404 {
405 	ocs_hw_rtn_e rc;
406 	ocs_node_t *node;
407 
408 	/* Set our lookup */
409 	ocs_lock(&sport->domain->lookup_lock);
410 		spv_set(sport->domain->lookup, fc_id, sport);
411 	ocs_unlock(&sport->domain->lookup_lock);
412 
413 	/* Update our display_name */
414 	ocs_node_fcid_display(fc_id, sport->display_name, sizeof(sport->display_name));
415 	ocs_sport_lock(sport);
416 		ocs_list_foreach(&sport->node_list, node) {
417 			ocs_node_update_display_name(node);
418 		}
419 	ocs_sport_unlock(sport);
420 	ocs_log_debug(sport->ocs, "[%s] attach sport: fc_id x%06x\n", sport->display_name, fc_id);
421 
422 	rc = ocs_hw_port_attach(&sport->ocs->hw, sport, fc_id);
423 	if (rc != OCS_HW_RTN_SUCCESS) {
424 		ocs_log_err(sport->ocs, "ocs_hw_port_attach failed: %d\n", rc);
425 		return -1;
426 	}
427 	return 0;
428 }
429 
430 /**
431  * @brief Common SLI port state machine declarations and initialization.
432  */
433 #define std_sport_state_decl() \
434 	ocs_sport_t *sport = NULL; \
435 	ocs_domain_t *domain = NULL; \
436 	ocs_t *ocs = NULL; \
437 	\
438 	ocs_assert(ctx, NULL); \
439 	sport = ctx->app; \
440 	ocs_assert(sport, NULL); \
441 	\
442 	domain = sport->domain; \
443 	ocs_assert(domain, NULL); \
444 	ocs = sport->ocs; \
445 	ocs_assert(ocs, NULL);
446 
447 /**
448  * @brief Common SLI port state machine trace logging.
449  */
450 #define sport_sm_trace(sport)  \
451 	do { \
452 		if (OCS_LOG_ENABLE_DOMAIN_SM_TRACE(ocs)) \
453 			ocs_log_debug(ocs, "[%s] %-20s\n", sport->display_name, ocs_sm_event_name(evt)); \
454 	} while (0)
455 
456 /**
457  * @brief SLI port state machine: Common event handler.
458  *
459  * @par Description
460  * Handle common sport events.
461  *
462  * @param funcname Function name to display.
463  * @param ctx Sport state machine context.
464  * @param evt Event to process.
465  * @param arg Per event optional argument.
466  *
467  * @return Returns NULL.
468  */
469 
470 static void *
471 __ocs_sport_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
472 {
473 	std_sport_state_decl();
474 
475 	switch(evt) {
476 	case OCS_EVT_ENTER:
477 	case OCS_EVT_REENTER:
478 	case OCS_EVT_EXIT:
479 	case OCS_EVT_ALL_CHILD_NODES_FREE:
480 		break;
481 	case OCS_EVT_SPORT_ATTACH_OK:
482 			ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
483 		break;
484 	case OCS_EVT_SHUTDOWN: {
485 		ocs_node_t *node;
486 		ocs_node_t *node_next;
487 		int node_list_empty;
488 
489 		/* Flag this sport as shutting down */
490 		sport->shutting_down = 1;
491 
492 		if (sport->is_vport) {
493 			ocs_vport_link_down(sport);
494 		}
495 
496 		ocs_sport_lock(sport);
497 			node_list_empty = ocs_list_empty(&sport->node_list);
498 		ocs_sport_unlock(sport);
499 
500 		if (node_list_empty) {
501 			/* sm: node list is empty / ocs_hw_port_free
502 			 * Remove the sport from the domain's sparse vector lookup table */
503 			ocs_lock(&domain->lookup_lock);
504 				spv_set(domain->lookup, sport->fc_id, NULL);
505 			ocs_unlock(&domain->lookup_lock);
506 			ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
507 			if (ocs_hw_port_free(&ocs->hw, sport)) {
508 				ocs_log_test(sport->ocs, "ocs_hw_port_free failed\n");
509 				/* Not much we can do, free the sport anyways */
510 				ocs_sport_free(sport);
511 			}
512 		} else {
513 			/* sm: node list is not empty / shutdown nodes */
514 			ocs_sm_transition(ctx, __ocs_sport_wait_shutdown, NULL);
515 			ocs_sport_lock(sport);
516 				ocs_list_foreach_safe(&sport->node_list, node, node_next) {
517 					/*
518 					 * If this is a vport, logout of the fabric controller so that it
519 					 * deletes the vport on the switch.
520 					 */
521 					if((node->rnode.fc_id == FC_ADDR_FABRIC) && (sport->is_vport)) {
522 						/* if link is down, don't send logo */
523 						if (sport->ocs->hw.link.status == SLI_LINK_STATUS_DOWN) {
524 							ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
525 						} else {
526 							ocs_log_debug(ocs,"[%s] sport shutdown vport,sending logo to node\n",
527 								      node->display_name);
528 
529 							if (ocs_send_logo(node, OCS_FC_ELS_SEND_DEFAULT_TIMEOUT,
530 								  0, NULL, NULL) == NULL) {
531 								/* failed to send LOGO, go ahead and cleanup node anyways */
532 								node_printf(node, "Failed to send LOGO\n");
533 								ocs_node_post_event(node, OCS_EVT_SHUTDOWN_EXPLICIT_LOGO, NULL);
534 							} else {
535 								/* sent LOGO, wait for response */
536 								ocs_node_transition(node, __ocs_d_wait_logo_rsp, NULL);
537 							}
538 						}
539 					} else {
540 						ocs_node_post_event(node, OCS_EVT_SHUTDOWN, NULL);
541 					}
542 				}
543 			ocs_sport_unlock(sport);
544 		}
545 		break;
546 	}
547 	default:
548 		ocs_log_test(sport->ocs, "[%s] %-20s %-20s not handled\n", sport->display_name, funcname, ocs_sm_event_name(evt));
549 		break;
550 	}
551 
552 	return NULL;
553 }
554 
555 /**
556  * @ingroup sport_sm
557  * @brief SLI port state machine: Physical sport allocated.
558  *
559  * @par Description
560  * This is the initial state for sport objects.
561  *
562  * @param ctx Remote node state machine context.
563  * @param evt Event to process.
564  * @param arg Per event optional argument.
565  *
566  * @return Returns NULL.
567  */
568 
569 void *
570 __ocs_sport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
571 {
572 	std_sport_state_decl();
573 
574 	sport_sm_trace(sport);
575 
576 	switch(evt) {
577 	/* the physical sport is attached */
578 	case OCS_EVT_SPORT_ATTACH_OK:
579 		ocs_assert(sport == domain->sport, NULL);
580 		ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
581 		break;
582 
583 	case OCS_EVT_SPORT_ALLOC_OK:
584 		/* ignore */
585 		break;
586 	default:
587 		__ocs_sport_common(__func__, ctx, evt, arg);
588 		return NULL;
589 	}
590 	return NULL;
591 }
592 
593 /**
594  * @ingroup sport_sm
595  * @brief SLI port state machine: Handle initial virtual port events.
596  *
597  * @par Description
598  * This state is entered when a virtual port is instantiated,
599  *
600  * @param ctx Remote node state machine context.
601  * @param evt Event to process.
602  * @param arg Per event optional argument.
603  *
604  * @return Returns NULL.
605  */
606 
607 void *
608 __ocs_sport_vport_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
609 {
610 	std_sport_state_decl();
611 
612 	sport_sm_trace(sport);
613 
614 	switch(evt) {
615 	case OCS_EVT_ENTER: {
616 		uint64_t be_wwpn = ocs_htobe64(sport->wwpn);
617 
618 		if (sport->wwpn == 0) {
619 			ocs_log_debug(ocs, "vport: letting f/w select WWN\n");
620 		}
621 
622 		if (sport->fc_id != UINT32_MAX) {
623 			ocs_log_debug(ocs, "vport: hard coding port id: %x\n", sport->fc_id);
624 		}
625 
626 		ocs_sm_transition(ctx, __ocs_sport_vport_wait_alloc, NULL);
627 		/* If wwpn is zero, then we'll let the f/w */
628 		if (ocs_hw_port_alloc(&ocs->hw, sport, sport->domain,
629 			(sport->wwpn == 0) ? NULL : (uint8_t *)&be_wwpn)) {
630 			ocs_log_err(ocs, "Can't allocate port\n");
631 			break;
632 		}
633 
634 		break;
635 	}
636 	default:
637 		__ocs_sport_common(__func__, ctx, evt, arg);
638 		return NULL;
639 	}
640 	return NULL;
641 }
642 
643 /**
644  * @ingroup sport_sm
645  * @brief SLI port state machine: Wait for the HW SLI port allocation to complete.
646  *
647  * @par Description
648  * Waits for the HW sport allocation request to complete.
649  *
650  * @param ctx Remote node state machine context.
651  * @param evt Event to process.
652  * @param arg Per event optional argument.
653  *
654  * @return Returns NULL.
655  */
656 
657 void *
658 __ocs_sport_vport_wait_alloc(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
659 {
660 	std_sport_state_decl();
661 
662 	sport_sm_trace(sport);
663 
664 	switch(evt) {
665 	case OCS_EVT_SPORT_ALLOC_OK: {
666 		fc_plogi_payload_t *sp = (fc_plogi_payload_t*) sport->service_params;
667 		ocs_node_t *fabric;
668 
669 		/* If we let f/w assign wwn's, then sport wwn's with those returned by hw */
670 		if (sport->wwnn == 0) {
671 			sport->wwnn = ocs_be64toh(sport->sli_wwnn);
672 			sport->wwpn = ocs_be64toh(sport->sli_wwpn);
673 			ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long) sport->wwpn);
674 		}
675 
676 		/* Update the sport's service parameters */
677 		sp->port_name_hi = ocs_htobe32((uint32_t) (sport->wwpn >> 32ll));
678 		sp->port_name_lo = ocs_htobe32((uint32_t) sport->wwpn);
679 		sp->node_name_hi = ocs_htobe32((uint32_t) (sport->wwnn >> 32ll));
680 		sp->node_name_lo = ocs_htobe32((uint32_t) sport->wwnn);
681 
682 		/* if sport->fc_id is uninitialized, then request that the fabric node use FDISC
683 		 * to find an fc_id.   Otherwise we're restoring vports, or we're in
684 		 * fabric emulation mode, so attach the fc_id
685 		 */
686 		if (sport->fc_id == UINT32_MAX) {
687 			fabric = ocs_node_alloc(sport, FC_ADDR_FABRIC, FALSE, FALSE);
688 			if (fabric == NULL) {
689 				ocs_log_err(ocs, "ocs_node_alloc() failed\n");
690 				return NULL;
691 			}
692 			ocs_node_transition(fabric, __ocs_vport_fabric_init, NULL);
693 		} else {
694 			ocs_snprintf(sport->wwnn_str, sizeof(sport->wwnn_str), "%016llx", (unsigned long long)sport->wwpn);
695 			ocs_sport_attach(sport, sport->fc_id);
696 		}
697 		ocs_sm_transition(ctx, __ocs_sport_vport_allocated, NULL);
698 		break;
699 	}
700 	default:
701 		__ocs_sport_common(__func__, ctx, evt, arg);
702 		return NULL;
703 	}
704 	return NULL;
705 }
706 
707 /**
708  * @ingroup sport_sm
709  * @brief SLI port state machine: virtual sport allocated.
710  *
711  * @par Description
712  * This state is entered after the sport is allocated; it then waits for a fabric node
713  * FDISC to complete, which requests a sport attach.
714  * The sport attach complete is handled in this state.
715  *
716  * @param ctx Remote node state machine context.
717  * @param evt Event to process.
718  * @param arg Per event optional argument.
719  *
720  * @return Returns NULL.
721  */
722 
723 void *
724 __ocs_sport_vport_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
725 {
726 	std_sport_state_decl();
727 
728 	sport_sm_trace(sport);
729 
730 	switch(evt) {
731 	case OCS_EVT_SPORT_ATTACH_OK: {
732 		ocs_node_t *node;
733 
734 		if (!(domain->femul_enable)) {
735 			/* Find our fabric node, and forward this event */
736 			node = ocs_node_find(sport, FC_ADDR_FABRIC);
737 			if (node == NULL) {
738 				ocs_log_test(ocs, "can't find node %06x\n", FC_ADDR_FABRIC);
739 				break;
740 			}
741 			/* sm: / forward sport attach to fabric node */
742 			ocs_node_post_event(node, evt, NULL);
743 		}
744 		ocs_sm_transition(ctx, __ocs_sport_attached, NULL);
745 		break;
746 	}
747 	default:
748 		__ocs_sport_common(__func__, ctx, evt, arg);
749 		return NULL;
750 	}
751 	return NULL;
752 }
753 
754 /**
755  * @ingroup sport_sm
756  * @brief SLI port state machine: Attached.
757  *
758  * @par Description
759  * State entered after the sport attach has completed.
760  *
761  * @param ctx Remote node state machine context.
762  * @param evt Event to process.
763  * @param arg Per event optional argument.
764  *
765  * @return Returns NULL.
766  */
767 
768 void *
769 __ocs_sport_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
770 {
771 	std_sport_state_decl();
772 
773 	sport_sm_trace(sport);
774 
775 	switch(evt) {
776 	case OCS_EVT_ENTER: {
777 		ocs_node_t *node;
778 
779 		ocs_log_debug(ocs, "[%s] SPORT attached WWPN %016llx WWNN %016llx \n", sport->display_name,
780 			sport->wwpn, sport->wwnn);
781 		ocs_sport_lock(sport);
782 			ocs_list_foreach(&sport->node_list, node) {
783 				ocs_node_update_display_name(node);
784 			}
785 		ocs_sport_unlock(sport);
786 		sport->tgt_id = sport->fc_id;
787 		if (sport->enable_ini) {
788 			ocs_scsi_ini_new_sport(sport);
789 		}
790 		if (sport->enable_tgt) {
791 			ocs_scsi_tgt_new_sport(sport);
792 		}
793 
794 		/* Update the vport (if its not the physical sport) parameters */
795 		if (sport->is_vport) {
796 			ocs_vport_update_spec(sport);
797 		}
798 
799 		break;
800 	}
801 
802 	case OCS_EVT_EXIT:
803 		ocs_log_debug(ocs, "[%s] SPORT deattached WWPN %016llx WWNN %016llx \n", sport->display_name,
804 			sport->wwpn, sport->wwnn);
805 		if (sport->enable_ini) {
806 			ocs_scsi_ini_del_sport(sport);
807 		}
808 		if (sport->enable_tgt) {
809 			ocs_scsi_tgt_del_sport(sport);
810 		}
811 		break;
812 	default:
813 		__ocs_sport_common(__func__, ctx, evt, arg);
814 		return NULL;
815 	}
816 	return NULL;
817 }
818 
819 /**
820  * @ingroup sport_sm
821  * @brief SLI port state machine: Wait for the node shutdowns to complete.
822  *
823  * @par Description
824  * Waits for the ALL_CHILD_NODES_FREE event to be posted from the node
825  * shutdown process.
826  *
827  * @param ctx Remote node state machine context.
828  * @param evt Event to process.
829  * @param arg Per event optional argument.
830  *
831  * @return Returns NULL.
832  */
833 
834 void *
835 __ocs_sport_wait_shutdown(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
836 {
837 	std_sport_state_decl();
838 
839 	sport_sm_trace(sport);
840 
841 	switch(evt) {
842 	case OCS_EVT_SPORT_ALLOC_OK:
843 	case OCS_EVT_SPORT_ALLOC_FAIL:
844 	case OCS_EVT_SPORT_ATTACH_OK:
845 	case OCS_EVT_SPORT_ATTACH_FAIL:
846 		/* ignore these events - just wait for the all free event */
847 		break;
848 
849 	case OCS_EVT_ALL_CHILD_NODES_FREE: {
850 		/* Remove the sport from the domain's sparse vector lookup table */
851 		ocs_lock(&domain->lookup_lock);
852 			spv_set(domain->lookup, sport->fc_id, NULL);
853 		ocs_unlock(&domain->lookup_lock);
854 		ocs_sm_transition(ctx, __ocs_sport_wait_port_free, NULL);
855 		if (ocs_hw_port_free(&ocs->hw, sport)) {
856 			ocs_log_err(sport->ocs, "ocs_hw_port_free failed\n");
857 			/* Not much we can do, free the sport anyways */
858 			ocs_sport_free(sport);
859 		}
860 		break;
861 	}
862 	default:
863 		__ocs_sport_common(__func__, ctx, evt, arg);
864 		return NULL;
865 	}
866 	return NULL;
867 }
868 
869 /**
870  * @ingroup sport_sm
871  * @brief SLI port state machine: Wait for the HW's port free to complete.
872  *
873  * @par Description
874  * Waits for the HW's port free to complete.
875  *
876  * @param ctx Remote node state machine context.
877  * @param evt Event to process.
878  * @param arg Per event optional argument.
879  *
880  * @return Returns NULL.
881  */
882 
883 void *
884 __ocs_sport_wait_port_free(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *arg)
885 {
886 	std_sport_state_decl();
887 
888 	sport_sm_trace(sport);
889 
890 	switch(evt) {
891 	case OCS_EVT_SPORT_ATTACH_OK:
892 		/* Ignore as we are waiting for the free CB */
893 		break;
894 	case OCS_EVT_SPORT_FREE_OK: {
895 		/* All done, free myself */
896 		ocs_sport_free(sport);
897 		break;
898 	}
899 	default:
900 		__ocs_sport_common(__func__, ctx, evt, arg);
901 		return NULL;
902 	}
903 	return NULL;
904 }
905 
906 /**
907  * @ingroup sport_sm
908  * @brief Start the vports on a domain
909  *
910  * @par Description
911  * Use the vport specification to find the associated vports and start them.
912  *
913  * @param domain Pointer to the domain context.
914  *
915  * @return Returns 0 on success, or a negative error value on failure.
916  */
917 int32_t
918 ocs_vport_start(ocs_domain_t *domain)
919 {
920 	ocs_t *ocs = domain->ocs;
921 	ocs_xport_t *xport = ocs->xport;
922 	ocs_vport_spec_t *vport;
923 	ocs_vport_spec_t *next;
924 	ocs_sport_t *sport;
925 	int32_t rc = 0;
926 
927 	ocs_device_lock(ocs);
928 	ocs_list_foreach_safe(&xport->vport_list, vport, next) {
929 		if (vport->domain_instance == domain->instance_index &&
930 		    vport->sport == NULL) {
931 			/* If role not set, skip this vport */
932 			if (!(vport->enable_ini || vport->enable_tgt)) {
933 				continue;
934 			}
935 
936 			/* Allocate a sport */
937 			vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, vport->fc_id,
938 							       vport->enable_ini, vport->enable_tgt);
939 			if (sport == NULL) {
940 				rc = -1;
941 			} else {
942 				sport->is_vport = 1;
943 				sport->tgt_data = vport->tgt_data;
944 				sport->ini_data = vport->ini_data;
945 
946 				/* Transition to vport_init */
947 				ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
948 			}
949 		}
950 	}
951 	ocs_device_unlock(ocs);
952 	return rc;
953 }
954 
955 /**
956  * @ingroup sport_sm
957  * @brief Clear the sport reference in the vport specification.
958  *
959  * @par Description
960  * Clear the sport pointer on the vport specification when the vport is torn down. This allows it to be
961  * re-created when the link is re-established.
962  *
963  * @param sport Pointer to the sport context.
964  */
965 static void
966 ocs_vport_link_down(ocs_sport_t *sport)
967 {
968 	ocs_t *ocs = sport->ocs;
969 	ocs_xport_t *xport = ocs->xport;
970 	ocs_vport_spec_t *vport;
971 
972 	ocs_device_lock(ocs);
973 	ocs_list_foreach(&xport->vport_list, vport) {
974 		if (vport->sport == sport) {
975 			vport->sport = NULL;
976 			break;
977 		}
978 	}
979 	ocs_device_unlock(ocs);
980 }
981 
982 /**
983  * @ingroup sport_sm
984  * @brief Allocate a new virtual SLI port.
985  *
986  * @par Description
987  * A new sport is created, in response to an external management request.
988  *
989  * @n @b Note: If the WWPN is zero, the firmware will assign the WWNs.
990  *
991  * @param domain Pointer to the domain context.
992  * @param wwpn World wide port name.
993  * @param wwnn World wide node name
994  * @param fc_id Requested port ID (used in fabric emulation mode).
995  * @param ini TRUE, if port is created as an initiator node.
996  * @param tgt TRUE, if port is created as a target node.
997  * @param tgt_data Pointer to target specific data
998  * @param ini_data Pointer to initiator specific data
999  * @param restore_vport If TRUE, then the vport will be re-created automatically
1000  *                      on link disruption.
1001  *
1002  * @return Returns 0 on success; or a negative error value on failure.
1003  */
1004 
1005 int32_t
1006 ocs_sport_vport_new(ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn,
1007 		    uint32_t fc_id, uint8_t ini, uint8_t tgt, void *tgt_data,
1008 		    void *ini_data, uint8_t restore_vport)
1009 {
1010 	ocs_sport_t *sport;
1011 
1012 	if (ini && (domain->ocs->enable_ini == 0)) {
1013 		ocs_log_test(domain->ocs, "driver initiator functionality not enabled\n");
1014 		return -1;
1015 	}
1016 
1017 	if (tgt && (domain->ocs->enable_tgt == 0)) {
1018 		ocs_log_test(domain->ocs, "driver target functionality not enabled\n");
1019 		return -1;
1020 	}
1021 
1022 	/* Create a vport spec if we need to recreate this vport after a link up event */
1023 	if (restore_vport) {
1024 		if (ocs_vport_create_spec(domain->ocs, wwnn, wwpn, fc_id, ini, tgt, tgt_data, ini_data)) {
1025 			ocs_log_test(domain->ocs, "failed to create vport object entry\n");
1026 			return -1;
1027 		}
1028 		return ocs_vport_start(domain);
1029 	}
1030 
1031 	/* Allocate a sport */
1032 	sport = ocs_sport_alloc(domain, wwpn, wwnn, fc_id, ini, tgt);
1033 
1034 	if (sport == NULL) {
1035 		return -1;
1036 	}
1037 
1038 	sport->is_vport = 1;
1039 	sport->tgt_data = tgt_data;
1040 	sport->ini_data = ini_data;
1041 
1042 	/* Transition to vport_init */
1043 	ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1044 
1045 	return 0;
1046 }
1047 
1048 int32_t
1049 ocs_sport_vport_alloc(ocs_domain_t *domain, ocs_vport_spec_t *vport)
1050 {
1051 	ocs_sport_t *sport = NULL;
1052 
1053 	if (domain == NULL) {
1054 		return (0);
1055 	}
1056 
1057 	ocs_assert((vport->sport == NULL), -1);
1058 
1059 	/* Allocate a sport */
1060 	vport->sport = sport = ocs_sport_alloc(domain, vport->wwpn, vport->wwnn, UINT32_MAX, vport->enable_ini, vport->enable_tgt);
1061 
1062 	if (sport == NULL) {
1063 		return -1;
1064 	}
1065 
1066 	sport->is_vport = 1;
1067 	sport->tgt_data = vport->tgt_data;
1068 	sport->ini_data = vport->tgt_data;
1069 
1070 	/* Transition to vport_init */
1071 	ocs_sm_transition(&sport->sm, __ocs_sport_vport_init, NULL);
1072 
1073 	return (0);
1074 }
1075 
1076 /**
1077  * @ingroup sport_sm
1078  * @brief Remove a previously-allocated virtual port.
1079  *
1080  * @par Description
1081  * A previously-allocated virtual port is removed by posting the shutdown event to the
1082  * sport with a matching WWN.
1083  *
1084  * @param ocs Pointer to the device object.
1085  * @param domain Pointer to the domain structure (may be NULL).
1086  * @param wwpn World wide port name of the port to delete (host endian).
1087  * @param wwnn World wide node name of the port to delete (host endian).
1088  *
1089  * @return Returns 0 on success, or a negative error value on failure.
1090  */
1091 
1092 int32_t ocs_sport_vport_del(ocs_t *ocs, ocs_domain_t *domain, uint64_t wwpn, uint64_t wwnn)
1093 {
1094 	ocs_xport_t *xport = ocs->xport;
1095 	ocs_sport_t *sport;
1096 	int found = 0;
1097 	ocs_vport_spec_t *vport;
1098 	ocs_vport_spec_t *next;
1099 	uint32_t instance;
1100 
1101 	/* If no domain is given, use instance 0, otherwise use domain instance */
1102 	if (domain == NULL) {
1103 		instance = 0;
1104 	} else {
1105 		instance = domain->instance_index;
1106 	}
1107 
1108 	/* walk the ocs_vport_list and remove from there */
1109 
1110 	ocs_device_lock(ocs);
1111 		ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1112 			if ((vport->domain_instance == instance) &&
1113 				(vport->wwpn == wwpn) && (vport->wwnn == wwnn)) {
1114 				vport->sport = NULL;
1115 				break;
1116 			}
1117 		}
1118 	ocs_device_unlock(ocs);
1119 
1120 	if (domain == NULL) {
1121 		/* No domain means no sport to look for */
1122 		return 0;
1123 	}
1124 
1125 	ocs_domain_lock(domain);
1126 		ocs_list_foreach(&domain->sport_list, sport) {
1127 			if ((sport->wwpn == wwpn) && (sport->wwnn == wwnn)) {
1128 				found = 1;
1129 				break;
1130 			}
1131 		}
1132 		if (found) {
1133 			/* Shutdown this SPORT */
1134 			ocs_sm_post_event(&sport->sm, OCS_EVT_SHUTDOWN, NULL);
1135 		}
1136 	ocs_domain_unlock(domain);
1137 	return 0;
1138 }
1139 
1140 /**
1141  * @brief Force free all saved vports.
1142  *
1143  * @par Description
1144  * Delete all device vports.
1145  *
1146  * @param ocs Pointer to the device object.
1147  *
1148  * @return None.
1149  */
1150 
1151 void
1152 ocs_vport_del_all(ocs_t *ocs)
1153 {
1154 	ocs_xport_t *xport = ocs->xport;
1155 	ocs_vport_spec_t *vport;
1156 	ocs_vport_spec_t *next;
1157 
1158 	ocs_device_lock(ocs);
1159 		ocs_list_foreach_safe(&xport->vport_list, vport, next) {
1160 			ocs_list_remove(&xport->vport_list, vport);
1161 			ocs_free(ocs, vport, sizeof(*vport));
1162 		}
1163 	ocs_device_unlock(ocs);
1164 }
1165 
1166 /**
1167  * @ingroup sport_sm
1168  * @brief Generate a SLI port ddump.
1169  *
1170  * @par Description
1171  * Generates the SLI port ddump data.
1172  *
1173  * @param textbuf Pointer to the text buffer.
1174  * @param sport Pointer to the SLI-4 port.
1175  *
1176  * @return Returns 0 on success, or a negative value on failure.
1177  */
1178 
1179 int
1180 ocs_ddump_sport(ocs_textbuf_t *textbuf, ocs_sli_port_t *sport)
1181 {
1182 	ocs_node_t *node;
1183 	ocs_node_group_dir_t *node_group_dir;
1184 	int retval = 0;
1185 
1186 	ocs_ddump_section(textbuf, "sport", sport->instance_index);
1187 	ocs_ddump_value(textbuf, "display_name", "%s", sport->display_name);
1188 
1189 	ocs_ddump_value(textbuf, "is_vport", "%d", sport->is_vport);
1190 	ocs_ddump_value(textbuf, "enable_ini", "%d", sport->enable_ini);
1191 	ocs_ddump_value(textbuf, "enable_tgt", "%d", sport->enable_tgt);
1192 	ocs_ddump_value(textbuf, "shutting_down", "%d", sport->shutting_down);
1193 	ocs_ddump_value(textbuf, "topology", "%d", sport->topology);
1194 	ocs_ddump_value(textbuf, "p2p_winner", "%d", sport->p2p_winner);
1195 	ocs_ddump_value(textbuf, "p2p_port_id", "%06x", sport->p2p_port_id);
1196 	ocs_ddump_value(textbuf, "p2p_remote_port_id", "%06x", sport->p2p_remote_port_id);
1197 	ocs_ddump_value(textbuf, "wwpn", "%016llx", (unsigned long long)sport->wwpn);
1198 	ocs_ddump_value(textbuf, "wwnn", "%016llx", (unsigned long long)sport->wwnn);
1199 	/*TODO: service_params */
1200 
1201 	ocs_ddump_value(textbuf, "indicator", "x%x", sport->indicator);
1202 	ocs_ddump_value(textbuf, "fc_id", "x%06x", sport->fc_id);
1203 	ocs_ddump_value(textbuf, "index", "%d", sport->index);
1204 
1205 	ocs_display_sparams(NULL, "sport_sparams", 1, textbuf, sport->service_params+4);
1206 
1207 	/* HLM dump */
1208 	ocs_ddump_section(textbuf, "hlm", sport->instance_index);
1209 	ocs_lock(&sport->node_group_lock);
1210 		ocs_list_foreach(&sport->node_group_dir_list, node_group_dir) {
1211 			ocs_remote_node_group_t *remote_node_group;
1212 
1213 			ocs_ddump_section(textbuf, "node_group_dir", node_group_dir->instance_index);
1214 
1215 			ocs_ddump_value(textbuf, "node_group_list_count", "%d", node_group_dir->node_group_list_count);
1216 			ocs_ddump_value(textbuf, "next_idx", "%d", node_group_dir->next_idx);
1217 			ocs_list_foreach(&node_group_dir->node_group_list, remote_node_group) {
1218 				ocs_ddump_section(textbuf, "node_group", remote_node_group->instance_index);
1219 				ocs_ddump_value(textbuf, "indicator", "x%x", remote_node_group->indicator);
1220 				ocs_ddump_value(textbuf, "index", "x%x", remote_node_group->index);
1221 				ocs_ddump_value(textbuf, "instance_index", "x%x", remote_node_group->instance_index);
1222 				ocs_ddump_endsection(textbuf, "node_group", 0);
1223 			}
1224 			ocs_ddump_endsection(textbuf, "node_group_dir", 0);
1225 		}
1226 	ocs_unlock(&sport->node_group_lock);
1227 	ocs_ddump_endsection(textbuf, "hlm", sport->instance_index);
1228 
1229 	ocs_scsi_ini_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1230 	ocs_scsi_tgt_ddump(textbuf, OCS_SCSI_DDUMP_SPORT, sport);
1231 
1232 	/* Dump all the nodes */
1233 	if (ocs_sport_lock_try(sport) != TRUE) {
1234 		/* Didn't get lock */
1235 		return -1;
1236 	}
1237 		/* Here the sport lock is held */
1238 		ocs_list_foreach(&sport->node_list, node) {
1239 			retval = ocs_ddump_node(textbuf, node);
1240 			if (retval != 0) {
1241 				break;
1242 			}
1243 		}
1244 	ocs_sport_unlock(sport);
1245 
1246 	ocs_ddump_endsection(textbuf, "sport", sport->index);
1247 
1248 	return retval;
1249 }
1250 
1251 void
1252 ocs_mgmt_sport_list(ocs_textbuf_t *textbuf, void *object)
1253 {
1254 	ocs_node_t *node;
1255 	ocs_sport_t *sport = (ocs_sport_t *)object;
1256 
1257 	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1258 
1259 	/* Add my status values to textbuf */
1260 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "indicator");
1261 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "fc_id");
1262 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "index");
1263 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "display_name");
1264 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "is_vport");
1265 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_ini");
1266 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "enable_tgt");
1267 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p");
1268 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_winner");
1269 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_port_id");
1270 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "p2p_remote_port_id");
1271 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwpn");
1272 	ocs_mgmt_emit_property_name(textbuf, MGMT_MODE_RD, "wwnn");
1273 
1274 	if (ocs_sport_lock_try(sport) == TRUE) {
1275 		/* If we get here, then we are holding the sport lock */
1276 		ocs_list_foreach(&sport->node_list, node) {
1277 			if ((node->mgmt_functions) && (node->mgmt_functions->get_list_handler)) {
1278 				node->mgmt_functions->get_list_handler(textbuf, node);
1279 			}
1280 		}
1281 		ocs_sport_unlock(sport);
1282 	}
1283 
1284 	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1285 }
1286 
1287 int
1288 ocs_mgmt_sport_get(ocs_textbuf_t *textbuf, char *parent, char *name, void *object)
1289 {
1290 	ocs_node_t *node;
1291 	ocs_sport_t *sport = (ocs_sport_t *)object;
1292 	char qualifier[80];
1293 	int retval = -1;
1294 
1295 	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1296 
1297 	snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1298 
1299 	/* If it doesn't start with my qualifier I don't know what to do with it */
1300 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1301 		char *unqualified_name = name + strlen(qualifier) +1;
1302 
1303 		/* See if it's a value I can supply */
1304 		if (ocs_strcmp(unqualified_name, "indicator") == 0) {
1305 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1306 			retval = 0;
1307 		} else if (ocs_strcmp(unqualified_name, "fc_id") == 0) {
1308 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1309 			retval = 0;
1310 		} else if (ocs_strcmp(unqualified_name, "index") == 0) {
1311 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1312 			retval = 0;
1313 		} else if (ocs_strcmp(unqualified_name, "display_name") == 0) {
1314 			ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1315 			retval = 0;
1316 		} else if (ocs_strcmp(unqualified_name, "is_vport") == 0) {
1317 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport",  sport->is_vport);
1318 			retval = 0;
1319 		} else if (ocs_strcmp(unqualified_name, "enable_ini") == 0) {
1320 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini",  sport->enable_ini);
1321 			retval = 0;
1322 		} else if (ocs_strcmp(unqualified_name, "enable_tgt") == 0) {
1323 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt",  sport->enable_tgt);
1324 			retval = 0;
1325 		} else if (ocs_strcmp(unqualified_name, "p2p_winner") == 0) {
1326 			ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner",  sport->p2p_winner);
1327 			retval = 0;
1328 		} else if (ocs_strcmp(unqualified_name, "p2p_port_id") == 0) {
1329 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1330 			retval = 0;
1331 		} else if (ocs_strcmp(unqualified_name, "p2p_remote_port_id") == 0) {
1332 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1333 			retval = 0;
1334 		} else if (ocs_strcmp(unqualified_name, "wwpn") == 0) {
1335 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx", (unsigned long long)sport->wwpn);
1336 			retval = 0;
1337 		} else if (ocs_strcmp(unqualified_name, "wwnn") == 0) {
1338 			ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1339 			retval = 0;
1340 		} else {
1341 			/* If I didn't know the value of this status pass the request to each of my children */
1342 			ocs_sport_lock(sport);
1343 				ocs_list_foreach(&sport->node_list, node) {
1344 					if ((node->mgmt_functions) && (node->mgmt_functions->get_handler)) {
1345 						retval = node->mgmt_functions->get_handler(textbuf, qualifier, name, node);
1346 					}
1347 
1348 					if (retval == 0) {
1349 						break;
1350 					}
1351 				}
1352 			ocs_sport_unlock(sport);
1353 		}
1354 	}
1355 
1356 	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1357 
1358 	return retval;
1359 }
1360 
1361 void
1362 ocs_mgmt_sport_get_all(ocs_textbuf_t *textbuf, void *object)
1363 {
1364 	ocs_node_t *node;
1365 	ocs_sport_t *sport = (ocs_sport_t *)object;
1366 
1367 	ocs_mgmt_start_section(textbuf, "sport", sport->instance_index);
1368 
1369 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "indicator", "0x%x", sport->indicator);
1370 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "fc_id", "0x%06x", sport->fc_id);
1371 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "index", "%d", sport->index);
1372 	ocs_mgmt_emit_string(textbuf, MGMT_MODE_RD, "display_name", sport->display_name);
1373 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "is_vport",  sport->is_vport);
1374 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_ini",  sport->enable_ini);
1375 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "enable_tgt",  sport->enable_tgt);
1376 	ocs_mgmt_emit_boolean(textbuf, MGMT_MODE_RD, "p2p_winner",  sport->p2p_winner);
1377 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_port_id", "0x%06x", sport->p2p_port_id);
1378 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "p2p_remote_port_id", "0x%06x", sport->p2p_remote_port_id);
1379 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwpn", "0x%016llx" , (unsigned long long)sport->wwpn);
1380 	ocs_mgmt_emit_int(textbuf, MGMT_MODE_RD, "wwnn", "0x%016llx", (unsigned long long)sport->wwnn);
1381 
1382 	ocs_sport_lock(sport);
1383 	ocs_list_foreach(&sport->node_list, node) {
1384 		if ((node->mgmt_functions) && (node->mgmt_functions->get_all_handler)) {
1385 			node->mgmt_functions->get_all_handler(textbuf, node);
1386 		}
1387 	}
1388 	ocs_sport_unlock(sport);
1389 
1390 	ocs_mgmt_end_section(textbuf, "sport", sport->instance_index);
1391 }
1392 
1393 int
1394 ocs_mgmt_sport_set(char *parent, char *name, char *value, void *object)
1395 {
1396 	ocs_node_t *node;
1397 	ocs_sport_t *sport = (ocs_sport_t *)object;
1398 	char qualifier[80];
1399 	int retval = -1;
1400 
1401 	snprintf(qualifier, sizeof(qualifier), "%s/sport[%d]", parent, sport->instance_index);
1402 
1403 	/* If it doesn't start with my qualifier I don't know what to do with it */
1404 	if (ocs_strncmp(name, qualifier, strlen(qualifier)) == 0) {
1405 		/* The sport has no settable values.  Pass the request to each node. */
1406 
1407 		ocs_sport_lock(sport);
1408 		ocs_list_foreach(&sport->node_list, node) {
1409 			if ((node->mgmt_functions) && (node->mgmt_functions->set_handler)) {
1410 				retval = node->mgmt_functions->set_handler(qualifier, name, value, node);
1411 			}
1412 			if (retval == 0) {
1413 				break;
1414 			}
1415 		}
1416 		ocs_sport_unlock(sport);
1417 	}
1418 
1419 	return retval;
1420 }
1421 
1422 int
1423 ocs_mgmt_sport_exec(char *parent, char *action, void *arg_in, uint32_t arg_in_length,
1424 		    void *arg_out, uint32_t arg_out_length, void *object)
1425 {
1426 	ocs_node_t *node;
1427 	ocs_sport_t *sport = (ocs_sport_t *)object;
1428 	char qualifier[80];
1429 	int retval = -1;
1430 
1431 	snprintf(qualifier, sizeof(qualifier), "%s.sport%d", parent, sport->instance_index);
1432 
1433 	/* If it doesn't start with my qualifier I don't know what to do with it */
1434 	if (ocs_strncmp(action, qualifier, strlen(qualifier)) == 0) {
1435 		/* See if it's an action I can perform */
1436 
1437 		/* if (ocs_strcmp ....
1438 		 * {
1439 		 * } else
1440 		 */
1441 
1442 		{
1443 			/* If I didn't know how to do this action pass the request to each of my children */
1444 			ocs_sport_lock(sport);
1445 				ocs_list_foreach(&sport->node_list, node) {
1446 					if ((node->mgmt_functions) && (node->mgmt_functions->exec_handler)) {
1447 						retval = node->mgmt_functions->exec_handler(qualifier, action, arg_in, arg_in_length,
1448 											    arg_out, arg_out_length, node);
1449 					}
1450 
1451 					if (retval == 0) {
1452 						break;
1453 					}
1454 				}
1455 			ocs_sport_unlock(sport);
1456 		}
1457 	}
1458 
1459 	return retval;
1460 }
1461 
1462 /**
1463  * @brief Save the virtual port's parameters.
1464  *
1465  * @par Description
1466  * The information required to restore a virtual port is saved.
1467  *
1468  * @param sport Pointer to the sport context.
1469  *
1470  * @return None.
1471  */
1472 
1473 static void
1474 ocs_vport_update_spec(ocs_sport_t *sport)
1475 {
1476 	ocs_t *ocs = sport->ocs;
1477 	ocs_xport_t *xport = ocs->xport;
1478 	ocs_vport_spec_t *vport;
1479 
1480 	ocs_device_lock(ocs);
1481 	ocs_list_foreach(&xport->vport_list, vport) {
1482 		if (vport->sport == sport) {
1483 			vport->wwnn = sport->wwnn;
1484 			vport->wwpn = sport->wwpn;
1485 			vport->tgt_data = sport->tgt_data;
1486 			vport->ini_data = sport->ini_data;
1487 			break;
1488 		}
1489 	}
1490 	ocs_device_unlock(ocs);
1491 }
1492 
1493 /**
1494  * @brief Create a saved vport entry.
1495  *
1496  * A saved vport entry is added to the vport list, which is restored following
1497  * a link up. This function is used to allow vports to be created the first time
1498  * the link comes up without having to go through the ioctl() API.
1499  *
1500  * @param ocs Pointer to device context.
1501  * @param wwnn World wide node name (may be zero for auto-select).
1502  * @param wwpn World wide port name (may be zero for auto-select).
1503  * @param fc_id Requested port ID (used in fabric emulation mode).
1504  * @param enable_ini TRUE if vport is to be an initiator port.
1505  * @param enable_tgt TRUE if vport is to be a target port.
1506  * @param tgt_data Pointer to target specific data.
1507  * @param ini_data Pointer to initiator specific data.
1508  *
1509  * @return None.
1510  */
1511 
1512 int8_t
1513 ocs_vport_create_spec(ocs_t *ocs, uint64_t wwnn, uint64_t wwpn, uint32_t fc_id, uint32_t enable_ini, uint32_t enable_tgt, void *tgt_data, void *ini_data)
1514 {
1515 	ocs_xport_t *xport = ocs->xport;
1516 	ocs_vport_spec_t *vport;
1517 
1518 	/* walk the ocs_vport_list and return failure if a valid(vport with non zero WWPN and WWNN) vport entry
1519 	   is already created */
1520 	ocs_list_foreach(&xport->vport_list, vport) {
1521 		if ((wwpn && (vport->wwpn == wwpn)) && (wwnn && (vport->wwnn == wwnn))) {
1522 			ocs_log_test(ocs, "Failed: VPORT %016llx  %016llx already allocated\n",
1523 				     (unsigned long long)wwnn, (unsigned long long)wwpn);
1524 			return -1;
1525 		}
1526 	}
1527 
1528 	vport = ocs_malloc(ocs, sizeof(*vport), OCS_M_ZERO | OCS_M_NOWAIT);
1529 	if (vport == NULL) {
1530 		ocs_log_err(ocs, "ocs_malloc failed\n");
1531 		return -1;
1532 	}
1533 
1534 	vport->wwnn = wwnn;
1535 	vport->wwpn = wwpn;
1536 	vport->fc_id = fc_id;
1537 	vport->domain_instance = 0;	/*TODO: may need to change this */
1538 	vport->enable_tgt = enable_tgt;
1539 	vport->enable_ini = enable_ini;
1540 	vport->tgt_data = tgt_data;
1541 	vport->ini_data = ini_data;
1542 
1543 	ocs_device_lock(ocs);
1544 		ocs_list_add_tail(&xport->vport_list, vport);
1545 	ocs_device_unlock(ocs);
1546 	return 0;
1547 }
1548 
1549 /* node group api */
1550 
1551 /**
1552  * @brief Perform the AND operation on source vectors.
1553  *
1554  * @par Description
1555  * Performs an AND operation on the 8-bit values in source vectors @c b and @c c.
1556  * The resulting value is stored in @c a.
1557  *
1558  * @param a Destination-byte vector.
1559  * @param b Source-byte vector.
1560  * @param c Source-byte vector.
1561  * @param n Byte count.
1562  *
1563  * @return None.
1564  */
1565 
1566 static void
1567 and8(uint8_t *a, uint8_t *b, uint8_t *c, uint32_t n)
1568 {
1569 	uint32_t i;
1570 
1571 	for (i = 0; i < n; i ++) {
1572 		*a = *b & *c;
1573 		a++;
1574 		b++;
1575 		c++;
1576 	}
1577 }
1578 
1579 /**
1580  * @brief Service parameters mask data.
1581  */
1582 static fc_sparms_t sparms_cmp_mask = {
1583 	0,			/*uint32_t	command_code: 8, */
1584 	0,			/*		resv1: 24; */
1585 	{~0, ~0, ~0, ~0},	/* uint32_t	common_service_parameters[4]; */
1586 	0,			/* uint32_t	port_name_hi; */
1587 	0,			/* uint32_t	port_name_lo; */
1588 	0,			/* uint32_t	node_name_hi; */
1589 	0,			/* uint32_t	node_name_lo; */
1590 	{~0, ~0, ~0, ~0},	/* uint32_t	class1_service_parameters[4]; */
1591 	{~0, ~0, ~0, ~0},	/* uint32_t	class2_service_parameters[4]; */
1592 	{~0, ~0, ~0, ~0},	/* uint32_t	class3_service_parameters[4]; */
1593 	{~0, ~0, ~0, ~0},	/* uint32_t	class4_service_parameters[4]; */
1594 	{~0, ~0, ~0, ~0}};	/* uint32_t	vendor_version_level[4]; */
1595 
1596 /**
1597  * @brief Compare service parameters.
1598  *
1599  * @par Description
1600  * Returns 0 if the two service parameters are the same, excluding the port/node name
1601  * elements.
1602  *
1603  * @param sp1 Pointer to service parameters 1.
1604  * @param sp2 Pointer to service parameters 2.
1605  *
1606  * @return Returns 0 if parameters match; otherwise, returns a positive or negative value,
1607  * depending on the arithmetic magnitude of the first mismatching byte.
1608  */
1609 
1610 int
1611 ocs_sparm_cmp(uint8_t *sp1, uint8_t *sp2)
1612 {
1613 	int i;
1614 	int v;
1615 	uint8_t *sp3 = (uint8_t*) &sparms_cmp_mask;
1616 
1617 	for (i = 0; i < OCS_SERVICE_PARMS_LENGTH; i ++) {
1618 		v = ((int)(sp1[i] & sp3[i])) - ((int)(sp2[i] & sp3[i]));
1619 		if (v) {
1620 			break;
1621 		}
1622 	}
1623 	return v;
1624 }
1625 
1626 /**
1627  * @brief Allocate a node group directory entry.
1628  *
1629  * @par Description
1630  * A node group directory entry is allocated, initialized, and added to the sport's
1631  * node group directory list.
1632  *
1633  * @param sport Pointer to the sport object.
1634  * @param sparms Pointer to the service parameters.
1635  *
1636  * @return Returns a pointer to the allocated ocs_node_group_dir_t; or NULL.
1637  */
1638 
1639 ocs_node_group_dir_t *
1640 ocs_node_group_dir_alloc(ocs_sport_t *sport, uint8_t *sparms)
1641 {
1642 	ocs_node_group_dir_t *node_group_dir;
1643 
1644 	node_group_dir = ocs_malloc(sport->ocs, sizeof(*node_group_dir), OCS_M_ZERO | OCS_M_NOWAIT);
1645 	if (node_group_dir != NULL) {
1646 		node_group_dir->sport = sport;
1647 
1648 		ocs_lock(&sport->node_group_lock);
1649 			node_group_dir->instance_index = sport->node_group_dir_next_instance++;
1650 			and8(node_group_dir->service_params, sparms, (uint8_t*)&sparms_cmp_mask, OCS_SERVICE_PARMS_LENGTH);
1651 			ocs_list_init(&node_group_dir->node_group_list, ocs_remote_node_group_t, link);
1652 
1653 			node_group_dir->node_group_list_count = 0;
1654 			node_group_dir->next_idx = 0;
1655 			ocs_list_add_tail(&sport->node_group_dir_list, node_group_dir);
1656 		ocs_unlock(&sport->node_group_lock);
1657 
1658 		ocs_log_debug(sport->ocs, "[%s] [%d] allocating node group directory\n", sport->display_name,
1659 			node_group_dir->instance_index);
1660 	}
1661 	return node_group_dir;
1662 }
1663 
1664 /**
1665  * @brief Free a node group directory entry.
1666  *
1667  * @par Description
1668  * The node group directory entry @c node_group_dir is removed
1669  * from the sport's node group directory list and freed.
1670  *
1671  * @param node_group_dir Pointer to the node group directory entry.
1672  *
1673  * @return None.
1674  */
1675 
1676 void
1677 ocs_node_group_dir_free(ocs_node_group_dir_t *node_group_dir)
1678 {
1679 	ocs_sport_t *sport;
1680 	if (node_group_dir != NULL) {
1681 		sport = node_group_dir->sport;
1682 		ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group directory\n", sport->display_name,
1683 			node_group_dir->instance_index);
1684 		ocs_lock(&sport->node_group_lock);
1685 			if (!ocs_list_empty(&node_group_dir->node_group_list)) {
1686 				ocs_log_test(sport->ocs, "[%s] WARNING: node group list not empty\n", sport->display_name);
1687 			}
1688 			ocs_list_remove(&sport->node_group_dir_list, node_group_dir);
1689 		ocs_unlock(&sport->node_group_lock);
1690 		ocs_free(sport->ocs, node_group_dir, sizeof(*node_group_dir));
1691 	}
1692 }
1693 
1694 /**
1695  * @brief Find a matching node group directory entry.
1696  *
1697  * @par Description
1698  * The sport's node group directory list is searched for a matching set of
1699  * service parameters. The first matching entry is returned; otherwise
1700  * NULL is returned.
1701  *
1702  * @param sport Pointer to the sport object.
1703  * @param sparms Pointer to the sparams to match.
1704  *
1705  * @return Returns a pointer to the first matching entry found; or NULL.
1706  */
1707 
1708 ocs_node_group_dir_t *
1709 ocs_node_group_dir_find(ocs_sport_t *sport, uint8_t *sparms)
1710 {
1711 	ocs_node_group_dir_t *node_dir = NULL;
1712 
1713 	ocs_lock(&sport->node_group_lock);
1714 		ocs_list_foreach(&sport->node_group_dir_list, node_dir) {
1715 			if (ocs_sparm_cmp(sparms, node_dir->service_params) == 0) {
1716 				ocs_unlock(&sport->node_group_lock);
1717 				return node_dir;
1718 			}
1719 		}
1720 	ocs_unlock(&sport->node_group_lock);
1721 	return NULL;
1722 }
1723 
1724 /**
1725  * @brief Allocate a remote node group object.
1726  *
1727  * @par Description
1728  * A remote node group object is allocated, initialized, and placed on the node group
1729  * list of @c node_group_dir. The HW remote node group @b alloc function is called.
1730  *
1731  * @param node_group_dir Pointer to the node group directory.
1732  *
1733  * @return Returns a pointer to the allocated remote node group object; or NULL.
1734  */
1735 
1736 ocs_remote_node_group_t *
1737 ocs_remote_node_group_alloc(ocs_node_group_dir_t *node_group_dir)
1738 {
1739 	ocs_t *ocs;
1740 	ocs_sport_t *sport;
1741 	ocs_remote_node_group_t *node_group;
1742 	ocs_hw_rtn_e hrc;
1743 
1744 	ocs_assert(node_group_dir, NULL);
1745 	ocs_assert(node_group_dir->sport, NULL);
1746 	ocs_assert(node_group_dir->sport->ocs, NULL);
1747 
1748 	sport = node_group_dir->sport;
1749 	ocs = sport->ocs;
1750 
1751 	node_group = ocs_malloc(ocs, sizeof(*node_group), OCS_M_ZERO | OCS_M_NOWAIT);
1752 	if (node_group != NULL) {
1753 		/* set pointer to node group directory */
1754 		node_group->node_group_dir = node_group_dir;
1755 
1756 		ocs_lock(&node_group_dir->sport->node_group_lock);
1757 			node_group->instance_index = sport->node_group_next_instance++;
1758 		ocs_unlock(&node_group_dir->sport->node_group_lock);
1759 
1760 		/* invoke HW node group inialization */
1761 		hrc = ocs_hw_node_group_alloc(&ocs->hw, node_group);
1762 		if (hrc != OCS_HW_RTN_SUCCESS) {
1763 			ocs_log_err(ocs, "ocs_hw_node_group_alloc() failed: %d\n", hrc);
1764 			ocs_free(ocs, node_group, sizeof(*node_group));
1765 			return NULL;
1766 		}
1767 
1768 		ocs_log_debug(ocs, "[%s] [%d] indicator x%03x allocating node group\n", sport->display_name,
1769 			node_group->indicator, node_group->instance_index);
1770 
1771 			/* add to the node group directory entry node group list */
1772 		ocs_lock(&node_group_dir->sport->node_group_lock);
1773 			ocs_list_add_tail(&node_group_dir->node_group_list, node_group);
1774 			node_group_dir->node_group_list_count ++;
1775 		ocs_unlock(&node_group_dir->sport->node_group_lock);
1776 	}
1777 	return node_group;
1778 }
1779 
1780 /**
1781  * @brief Free a remote node group object.
1782  *
1783  * @par Description
1784  * The remote node group object @c node_group is removed from its
1785  * node group directory entry and freed.
1786  *
1787  * @param node_group Pointer to the remote node group object.
1788  *
1789  * @return None.
1790  */
1791 
1792 void
1793 ocs_remote_node_group_free(ocs_remote_node_group_t *node_group)
1794 {
1795 	ocs_sport_t *sport;
1796 	ocs_node_group_dir_t *node_group_dir;
1797 
1798 	if (node_group != NULL) {
1799 		ocs_assert(node_group->node_group_dir);
1800 		ocs_assert(node_group->node_group_dir->sport);
1801 		ocs_assert(node_group->node_group_dir->sport->ocs);
1802 
1803 		node_group_dir = node_group->node_group_dir;
1804 		sport = node_group_dir->sport;
1805 
1806 		ocs_log_debug(sport->ocs, "[%s] [%d] freeing node group\n", sport->display_name, node_group->instance_index);
1807 
1808 		/* Remove from node group directory node group list */
1809 		ocs_lock(&sport->node_group_lock);
1810 			ocs_list_remove(&node_group_dir->node_group_list, node_group);
1811 			node_group_dir->node_group_list_count --;
1812 		/* TODO: note that we're going to have the node_group_dir entry persist forever ... we could delete it if
1813 		 * the group_list_count goes to zero (or the linked list is empty */
1814 		ocs_unlock(&sport->node_group_lock);
1815 		ocs_free(sport->ocs, node_group, sizeof(*node_group));
1816 	}
1817 }
1818 
1819 /**
1820  * @brief Initialize a node for high login mode.
1821  *
1822  * @par Description
1823  * The @c node is initialized for high login mode. The following steps are performed:
1824  * 1. The sports node group directory is searched for a matching set of service parameters.
1825  * 2. If a matching set is not found, a node group directory entry is allocated.
1826  * 3. If less than the @c hlm_group_size number of remote node group objects is present in the
1827  *   node group directory, a new remote node group object is allocated and added to the list.
1828  * 4. A remote node group object is selected, and the node is attached to the node group.
1829  *
1830  * @param node Pointer to the node.
1831  *
1832  * @return Returns 0 on success, or a negative error value on failure.
1833  */
1834 
1835 int
1836 ocs_node_group_init(ocs_node_t *node)
1837 {
1838 	ocs_t *ocs;
1839 	ocs_sport_t *sport;
1840 	ocs_node_group_dir_t *node_group_dir;
1841 	ocs_remote_node_group_t *node_group;
1842 	ocs_hw_rtn_e hrc;
1843 
1844 	ocs_assert(node, -1);
1845 	ocs_assert(node->sport, -1);
1846 	ocs_assert(node->ocs, -1);
1847 
1848 	ocs = node->ocs;
1849 	sport = node->sport;
1850 
1851 	ocs_assert(ocs->enable_hlm, -1);
1852 
1853 	/* see if there's a node group directory allocated for this service parameter set */
1854 	node_group_dir = ocs_node_group_dir_find(sport, node->service_params);
1855 	if (node_group_dir == NULL) {
1856 		/* not found, so allocate one */
1857 		node_group_dir = ocs_node_group_dir_alloc(sport, node->service_params);
1858 		if (node_group_dir == NULL) {
1859 			/* node group directory allocation failed ... can't continue, however,
1860 			 * the node will be allocated with a normal (not shared) RPI
1861 			 */
1862 			ocs_log_err(ocs, "ocs_node_group_dir_alloc() failed\n");
1863 			return -1;
1864 		}
1865 	}
1866 
1867 	/* check to see if we've allocated hlm_group_size's worth of node group structures for this
1868 	 * directory entry, if not, then allocate and use a new one, otherwise pick the next one.
1869 	 */
1870 	ocs_lock(&node->sport->node_group_lock);
1871 		if (node_group_dir->node_group_list_count < ocs->hlm_group_size) {
1872 			ocs_unlock(&node->sport->node_group_lock);
1873 				node_group = ocs_remote_node_group_alloc(node_group_dir);
1874 			if (node_group == NULL) {
1875 				ocs_log_err(ocs, "ocs_remote_node_group_alloc() failed\n");
1876 				return -1;
1877 			}
1878 			ocs_lock(&node->sport->node_group_lock);
1879 		} else {
1880 			uint32_t idx = 0;
1881 
1882 			ocs_list_foreach(&node_group_dir->node_group_list, node_group) {
1883 				if (idx >= ocs->hlm_group_size) {
1884 					ocs_log_err(node->ocs, "assertion failed: idx >= ocs->hlm_group_size\n");
1885 					ocs_unlock(&node->sport->node_group_lock);
1886 					return -1;
1887 				}
1888 
1889 				if (idx == node_group_dir->next_idx) {
1890 					break;
1891 				}
1892 				idx ++;
1893 			}
1894 			if (idx == ocs->hlm_group_size) {
1895 				node_group = ocs_list_get_head(&node_group_dir->node_group_list);
1896 			}
1897 			if (++node_group_dir->next_idx >= node_group_dir->node_group_list_count) {
1898 				node_group_dir->next_idx = 0;
1899 			}
1900 		}
1901 	ocs_unlock(&node->sport->node_group_lock);
1902 
1903 	/* Initialize a pointer in the node back to the node group */
1904 	node->node_group = node_group;
1905 
1906 	/* Join this node into the group */
1907 	hrc = ocs_hw_node_group_attach(&ocs->hw, node_group, &node->rnode);
1908 
1909 	return (hrc == OCS_HW_RTN_SUCCESS) ? 0 : -1;
1910 }
1911