xref: /freebsd/sys/dev/ocs_fc/ocs_hw.c (revision 4928135658a9d0eaee37003df6137ab363fcb0b4)
1 /*-
2  * Copyright (c) 2017 Broadcom. All rights reserved.
3  * The term "Broadcom" refers to Broadcom Limited and/or its subsidiaries.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright notice,
9  *    this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright notice,
12  *    this list of conditions and the following disclaimer in the documentation
13  *    and/or other materials provided with the distribution.
14  *
15  * 3. Neither the name of the copyright holder nor the names of its contributors
16  *    may be used to endorse or promote products derived from this software
17  *    without specific prior written permission.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
23  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  *
31  * $FreeBSD$
32  */
33 
34 /**
35  * @file
36  * Defines and implements the Hardware Abstraction Layer (HW).
37  * All interaction with the hardware is performed through the HW, which abstracts
38  * the details of the underlying SLI-4 implementation.
39  */
40 
41 /**
42  * @defgroup devInitShutdown Device Initialization and Shutdown
43  * @defgroup domain Domain Functions
44  * @defgroup port Port Functions
45  * @defgroup node Remote Node Functions
46  * @defgroup io IO Functions
47  * @defgroup interrupt Interrupt handling
48  * @defgroup os OS Required Functions
49  */
50 
51 #include "ocs.h"
52 #include "ocs_os.h"
53 #include "ocs_hw.h"
54 #include "ocs_hw_queues.h"
55 
56 #define OCS_HW_MQ_DEPTH	128
57 #define OCS_HW_READ_FCF_SIZE	4096
58 #define OCS_HW_DEFAULT_AUTO_XFER_RDY_IOS	256
59 #define OCS_HW_WQ_TIMER_PERIOD_MS	500
60 
61 /* values used for setting the auto xfer rdy parameters */
62 #define OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT		0 /* 512 bytes */
63 #define OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT	TRUE
64 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT	FALSE
65 #define OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT	0
66 #define OCS_HW_REQUE_XRI_REGTAG			65534
67 /* max command and response buffer lengths -- arbitrary at the moment */
68 #define OCS_HW_DMTF_CLP_CMD_MAX	256
69 #define OCS_HW_DMTF_CLP_RSP_MAX	256
70 
71 /* HW global data */
72 ocs_hw_global_t hw_global;
73 
74 static void ocs_hw_queue_hash_add(ocs_queue_hash_t *, uint16_t, uint16_t);
75 static void ocs_hw_adjust_wqs(ocs_hw_t *hw);
76 static uint32_t ocs_hw_get_num_chutes(ocs_hw_t *hw);
77 static int32_t ocs_hw_cb_link(void *, void *);
78 static int32_t ocs_hw_cb_fip(void *, void *);
79 static int32_t ocs_hw_command_process(ocs_hw_t *, int32_t, uint8_t *, size_t);
80 static int32_t ocs_hw_mq_process(ocs_hw_t *, int32_t, sli4_queue_t *);
81 static int32_t ocs_hw_cb_read_fcf(ocs_hw_t *, int32_t, uint8_t *, void *);
82 static int32_t ocs_hw_cb_node_attach(ocs_hw_t *, int32_t, uint8_t *, void *);
83 static int32_t ocs_hw_cb_node_free(ocs_hw_t *, int32_t, uint8_t *, void *);
84 static int32_t ocs_hw_cb_node_free_all(ocs_hw_t *, int32_t, uint8_t *, void *);
85 static ocs_hw_rtn_e ocs_hw_setup_io(ocs_hw_t *);
86 static ocs_hw_rtn_e ocs_hw_init_io(ocs_hw_t *);
87 static int32_t ocs_hw_flush(ocs_hw_t *);
88 static int32_t ocs_hw_command_cancel(ocs_hw_t *);
89 static int32_t ocs_hw_io_cancel(ocs_hw_t *);
90 static void ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io);
91 static void ocs_hw_io_restore_sgl(ocs_hw_t *, ocs_hw_io_t *);
92 static int32_t ocs_hw_io_ini_sge(ocs_hw_t *, ocs_hw_io_t *, ocs_dma_t *, uint32_t, ocs_dma_t *);
93 static ocs_hw_rtn_e ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg);
94 static int32_t ocs_hw_cb_fw_write(ocs_hw_t *, int32_t, uint8_t *, void  *);
95 static int32_t ocs_hw_cb_sfp(ocs_hw_t *, int32_t, uint8_t *, void  *);
96 static int32_t ocs_hw_cb_temp(ocs_hw_t *, int32_t, uint8_t *, void  *);
97 static int32_t ocs_hw_cb_link_stat(ocs_hw_t *, int32_t, uint8_t *, void  *);
98 static int32_t ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
99 static void ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg);
100 static int32_t ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len);
101 typedef void (*ocs_hw_dmtf_clp_cb_t)(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
102 static ocs_hw_rtn_e ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg);
103 static void ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg);
104 
105 static int32_t __ocs_read_topology_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
106 static ocs_hw_rtn_e ocs_hw_get_linkcfg(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
107 static ocs_hw_rtn_e ocs_hw_get_linkcfg_lancer(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
108 static ocs_hw_rtn_e ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *, uint32_t, ocs_hw_port_control_cb_t, void *);
109 static ocs_hw_rtn_e ocs_hw_set_linkcfg(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
110 static ocs_hw_rtn_e ocs_hw_set_linkcfg_lancer(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
111 static ocs_hw_rtn_e ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *, ocs_hw_linkcfg_e, uint32_t, ocs_hw_port_control_cb_t, void *);
112 static void ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg);
113 static ocs_hw_rtn_e ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license);
114 static ocs_hw_rtn_e ocs_hw_set_dif_seed(ocs_hw_t *hw);
115 static ocs_hw_rtn_e ocs_hw_set_dif_mode(ocs_hw_t *hw);
116 static void ocs_hw_io_free_internal(void *arg);
117 static void ocs_hw_io_free_port_owned(void *arg);
118 static ocs_hw_rtn_e ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf);
119 static ocs_hw_rtn_e ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint);
120 static void ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status);
121 static int32_t ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t, uint16_t, uint16_t);
122 static ocs_hw_rtn_e ocs_hw_config_watchdog_timer(ocs_hw_t *hw);
123 static ocs_hw_rtn_e ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable);
124 
125 /* HW domain database operations */
126 static int32_t ocs_hw_domain_add(ocs_hw_t *, ocs_domain_t *);
127 static int32_t ocs_hw_domain_del(ocs_hw_t *, ocs_domain_t *);
128 
129 
130 /* Port state machine */
131 static void *__ocs_hw_port_alloc_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
132 static void *__ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
133 static void *__ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
134 static void *__ocs_hw_port_done(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
135 static void *__ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
136 
137 /* Domain state machine */
138 static void *__ocs_hw_domain_init(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
139 static void *__ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
140 static void * __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
141 static void *__ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *, ocs_sm_event_t, void *);
142 static void *__ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data);
143 static int32_t __ocs_hw_domain_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
144 static int32_t __ocs_hw_port_cb(ocs_hw_t *, int32_t, uint8_t *, void *);
145 static int32_t __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg);
146 
147 /* BZ 161832 */
148 static void ocs_hw_check_sec_hio_list(ocs_hw_t *hw);
149 
150 /* WQE timeouts */
151 static void target_wqe_timer_cb(void *arg);
152 static void shutdown_target_wqe_timer(ocs_hw_t *hw);
153 
154 static inline void
155 ocs_hw_add_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
156 {
157 	if (hw->config.emulate_tgt_wqe_timeout && io->tgt_wqe_timeout) {
158 		/*
159 		 * Active WQE list currently only used for
160 		 * target WQE timeouts.
161 		 */
162 		ocs_lock(&hw->io_lock);
163 			ocs_list_add_tail(&hw->io_timed_wqe, io);
164 			io->submit_ticks = ocs_get_os_ticks();
165 		ocs_unlock(&hw->io_lock);
166 	}
167 }
168 
169 static inline void
170 ocs_hw_remove_io_timed_wqe(ocs_hw_t *hw, ocs_hw_io_t *io)
171 {
172 	if (hw->config.emulate_tgt_wqe_timeout) {
173 		/*
174 		 * If target wqe timeouts are enabled,
175 		 * remove from active wqe list.
176 		 */
177 		ocs_lock(&hw->io_lock);
178 			if (ocs_list_on_list(&io->wqe_link)) {
179 				ocs_list_remove(&hw->io_timed_wqe, io);
180 			}
181 		ocs_unlock(&hw->io_lock);
182 	}
183 }
184 
185 static uint8_t ocs_hw_iotype_is_originator(uint16_t io_type)
186 {
187 	switch (io_type) {
188 	case OCS_HW_IO_INITIATOR_READ:
189 	case OCS_HW_IO_INITIATOR_WRITE:
190 	case OCS_HW_IO_INITIATOR_NODATA:
191 	case OCS_HW_FC_CT:
192 	case OCS_HW_ELS_REQ:
193 		return 1;
194 	default:
195 		return 0;
196 	}
197 }
198 
199 static uint8_t ocs_hw_wcqe_abort_needed(uint16_t status, uint8_t ext, uint8_t xb)
200 {
201 	/* if exchange not active, nothing to abort */
202 	if (!xb) {
203 		return FALSE;
204 	}
205 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT) {
206 		switch (ext) {
207 		/* exceptions where abort is not needed */
208 		case SLI4_FC_LOCAL_REJECT_INVALID_RPI: /* lancer returns this after unreg_rpi */
209 		case SLI4_FC_LOCAL_REJECT_ABORT_REQUESTED: /* abort already in progress */
210 			return FALSE;
211 		default:
212 			break;
213 		}
214 	}
215 	return TRUE;
216 }
217 
218 /**
219  * @brief Determine the number of chutes on the device.
220  *
221  * @par Description
222  * Some devices require queue resources allocated per protocol processor
223  * (chute). This function returns the number of chutes on this device.
224  *
225  * @param hw Hardware context allocated by the caller.
226  *
227  * @return Returns the number of chutes on the device for protocol.
228  */
229 static uint32_t
230 ocs_hw_get_num_chutes(ocs_hw_t *hw)
231 {
232 	uint32_t num_chutes = 1;
233 
234 	if (sli_get_is_dual_ulp_capable(&hw->sli) &&
235 	    sli_get_is_ulp_enabled(&hw->sli, 0) &&
236 	    sli_get_is_ulp_enabled(&hw->sli, 1)) {
237 		num_chutes = 2;
238 	}
239 	return num_chutes;
240 }
241 
242 static ocs_hw_rtn_e
243 ocs_hw_link_event_init(ocs_hw_t *hw)
244 {
245 	if (hw == NULL) {
246 		ocs_log_err(hw->os, "bad parameter hw=%p\n", hw);
247 		return OCS_HW_RTN_ERROR;
248 	}
249 
250 	hw->link.status = SLI_LINK_STATUS_MAX;
251 	hw->link.topology = SLI_LINK_TOPO_NONE;
252 	hw->link.medium = SLI_LINK_MEDIUM_MAX;
253 	hw->link.speed = 0;
254 	hw->link.loop_map = NULL;
255 	hw->link.fc_id = UINT32_MAX;
256 
257 	return OCS_HW_RTN_SUCCESS;
258 }
259 
260 /**
261  * @ingroup devInitShutdown
262  * @brief If this is physical port 0, then read the max dump size.
263  *
264  * @par Description
265  * Queries the FW for the maximum dump size
266  *
267  * @param hw Hardware context allocated by the caller.
268  *
269  * @return Returns 0 on success, or a non-zero value on failure.
270  */
271 static ocs_hw_rtn_e
272 ocs_hw_read_max_dump_size(ocs_hw_t *hw)
273 {
274 	uint8_t	buf[SLI4_BMBX_SIZE];
275 	uint8_t bus, dev, func;
276 	int 	rc;
277 
278 	/* lancer only */
279 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
280 		ocs_log_debug(hw->os, "Function only supported for I/F type 2\n");
281 		return OCS_HW_RTN_ERROR;
282 	}
283 
284 	/*
285 	 * Make sure the FW is new enough to support this command. If the FW
286 	 * is too old, the FW will UE.
287 	 */
288 	if (hw->workaround.disable_dump_loc) {
289 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
290 		return OCS_HW_RTN_ERROR;
291 	}
292 
293 	/* attempt to detemine the dump size for function 0 only. */
294 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
295 	if (func == 0) {
296 		if (sli_cmd_common_set_dump_location(&hw->sli, buf,
297 							SLI4_BMBX_SIZE, 1, 0, NULL, 0)) {
298 			sli4_res_common_set_dump_location_t *rsp =
299 				(sli4_res_common_set_dump_location_t *)
300 				(buf + offsetof(sli4_cmd_sli_config_t,
301 						payload.embed));
302 
303 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
304 			if (rc != OCS_HW_RTN_SUCCESS) {
305 				ocs_log_test(hw->os, "set dump location command failed\n");
306 				return rc;
307 			} else {
308 				hw->dump_size = rsp->buffer_length;
309 				ocs_log_debug(hw->os, "Dump size %x\n", rsp->buffer_length);
310 			}
311 		}
312 	}
313 	return OCS_HW_RTN_SUCCESS;
314 }
315 
316 /**
317  * @ingroup devInitShutdown
318  * @brief Set up the Hardware Abstraction Layer module.
319  *
320  * @par Description
321  * Calls set up to configure the hardware.
322  *
323  * @param hw Hardware context allocated by the caller.
324  * @param os Device abstraction.
325  * @param port_type Protocol type of port, such as FC and NIC.
326  *
327  * @todo Why is port_type a parameter?
328  *
329  * @return Returns 0 on success, or a non-zero value on failure.
330  */
331 ocs_hw_rtn_e
332 ocs_hw_setup(ocs_hw_t *hw, ocs_os_handle_t os, sli4_port_type_e port_type)
333 {
334 	uint32_t i;
335 	char prop_buf[32];
336 
337 	if (hw == NULL) {
338 		ocs_log_err(os, "bad parameter(s) hw=%p\n", hw);
339 		return OCS_HW_RTN_ERROR;
340 	}
341 
342 	if (hw->hw_setup_called) {
343 		/* Setup run-time workarounds.
344 		 * Call for each setup, to allow for hw_war_version
345 		 */
346 		ocs_hw_workaround_setup(hw);
347 		return OCS_HW_RTN_SUCCESS;
348 	}
349 
350 	/*
351 	 * ocs_hw_init() relies on NULL pointers indicating that a structure
352 	 * needs allocation. If a structure is non-NULL, ocs_hw_init() won't
353 	 * free/realloc that memory
354 	 */
355 	ocs_memset(hw, 0, sizeof(ocs_hw_t));
356 
357 	hw->hw_setup_called = TRUE;
358 
359 	hw->os = os;
360 
361 	ocs_lock_init(hw->os, &hw->cmd_lock, "HW_cmd_lock[%d]", ocs_instance(hw->os));
362 	ocs_list_init(&hw->cmd_head, ocs_command_ctx_t, link);
363 	ocs_list_init(&hw->cmd_pending, ocs_command_ctx_t, link);
364 	hw->cmd_head_count = 0;
365 
366 	ocs_lock_init(hw->os, &hw->io_lock, "HW_io_lock[%d]", ocs_instance(hw->os));
367 	ocs_lock_init(hw->os, &hw->io_abort_lock, "HW_io_abort_lock[%d]", ocs_instance(hw->os));
368 
369 	ocs_atomic_init(&hw->io_alloc_failed_count, 0);
370 
371 	hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
372 	hw->config.dif_seed = 0;
373 	hw->config.auto_xfer_rdy_blk_size_chip = OCS_HW_AUTO_XFER_RDY_BLK_SIZE_DEFAULT;
374 	hw->config.auto_xfer_rdy_ref_tag_is_lba = OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA_DEFAULT;
375 	hw->config.auto_xfer_rdy_app_tag_valid =  OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID_DEFAULT;
376 	hw->config.auto_xfer_rdy_app_tag_value = OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE_DEFAULT;
377 
378 
379 	if (sli_setup(&hw->sli, hw->os, port_type)) {
380 		ocs_log_err(hw->os, "SLI setup failed\n");
381 		return OCS_HW_RTN_ERROR;
382 	}
383 
384 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
385 
386 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
387 
388 	ocs_hw_link_event_init(hw);
389 
390 	sli_callback(&hw->sli, SLI4_CB_LINK, ocs_hw_cb_link, hw);
391 	sli_callback(&hw->sli, SLI4_CB_FIP, ocs_hw_cb_fip, hw);
392 
393 	/*
394 	 * Set all the queue sizes to the maximum allowed. These values may
395 	 * be changes later by the adjust and workaround functions.
396 	 */
397 	for (i = 0; i < ARRAY_SIZE(hw->num_qentries); i++) {
398 		hw->num_qentries[i] = sli_get_max_qentries(&hw->sli, i);
399 	}
400 
401 	/*
402 	 * The RQ assignment for RQ pair mode.
403 	 */
404 	hw->config.rq_default_buffer_size = OCS_HW_RQ_SIZE_PAYLOAD;
405 	hw->config.n_io = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
406 	if (ocs_get_property("auto_xfer_rdy_xri_cnt", prop_buf, sizeof(prop_buf)) == 0) {
407 		hw->config.auto_xfer_rdy_xri_cnt = ocs_strtoul(prop_buf, 0, 0);
408 	}
409 
410 	/* by default, enable initiator-only auto-ABTS emulation */
411 	hw->config.i_only_aab = TRUE;
412 
413 	/* Setup run-time workarounds */
414 	ocs_hw_workaround_setup(hw);
415 
416 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
417 	if (hw->workaround.override_fcfi) {
418 		hw->first_domain_idx = -1;
419 	}
420 
421 	/* Must be done after the workaround setup */
422 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
423 		(void)ocs_hw_read_max_dump_size(hw);
424 	}
425 
426 	/* calculate the number of WQs required. */
427 	ocs_hw_adjust_wqs(hw);
428 
429 	/* Set the default dif mode */
430 	if (! sli_is_dif_inline_capable(&hw->sli)) {
431 		ocs_log_test(hw->os, "not inline capable, setting mode to separate\n");
432 		hw->config.dif_mode = OCS_HW_DIF_MODE_SEPARATE;
433 	}
434 	/* Workaround: BZ 161832 */
435 	if (hw->workaround.use_dif_sec_xri) {
436 		ocs_list_init(&hw->sec_hio_wait_list, ocs_hw_io_t, link);
437 	}
438 
439 	/*
440 	 * Figure out the starting and max ULP to spread the WQs across the
441 	 * ULPs.
442 	 */
443 	if (sli_get_is_dual_ulp_capable(&hw->sli)) {
444 		if (sli_get_is_ulp_enabled(&hw->sli, 0) &&
445 		    sli_get_is_ulp_enabled(&hw->sli, 1)) {
446 			hw->ulp_start = 0;
447 			hw->ulp_max   = 1;
448 		} else if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
449 			hw->ulp_start = 0;
450 			hw->ulp_max   = 0;
451 		} else {
452 			hw->ulp_start = 1;
453 			hw->ulp_max   = 1;
454 		}
455 	} else {
456 		if (sli_get_is_ulp_enabled(&hw->sli, 0)) {
457 			hw->ulp_start = 0;
458 			hw->ulp_max   = 0;
459 		} else {
460 			hw->ulp_start = 1;
461 			hw->ulp_max   = 1;
462 		}
463 	}
464 	ocs_log_debug(hw->os, "ulp_start %d, ulp_max %d\n",
465 		hw->ulp_start, hw->ulp_max);
466 	hw->config.queue_topology = hw_global.queue_topology_string;
467 
468 	hw->qtop = ocs_hw_qtop_parse(hw, hw->config.queue_topology);
469 
470 	hw->config.n_eq = hw->qtop->entry_counts[QTOP_EQ];
471 	hw->config.n_cq = hw->qtop->entry_counts[QTOP_CQ];
472 	hw->config.n_rq = hw->qtop->entry_counts[QTOP_RQ];
473 	hw->config.n_wq = hw->qtop->entry_counts[QTOP_WQ];
474 	hw->config.n_mq = hw->qtop->entry_counts[QTOP_MQ];
475 
476 	/* Verify qtop configuration against driver supported configuration */
477 	if (hw->config.n_rq > OCE_HW_MAX_NUM_MRQ_PAIRS) {
478 		ocs_log_crit(hw->os, "Max supported MRQ pairs = %d\n",
479 				OCE_HW_MAX_NUM_MRQ_PAIRS);
480 		return OCS_HW_RTN_ERROR;
481 	}
482 
483 	if (hw->config.n_eq > OCS_HW_MAX_NUM_EQ) {
484 		ocs_log_crit(hw->os, "Max supported EQs = %d\n",
485 				OCS_HW_MAX_NUM_EQ);
486 		return OCS_HW_RTN_ERROR;
487 	}
488 
489 	if (hw->config.n_cq > OCS_HW_MAX_NUM_CQ) {
490 		ocs_log_crit(hw->os, "Max supported CQs = %d\n",
491 				OCS_HW_MAX_NUM_CQ);
492 		return OCS_HW_RTN_ERROR;
493 	}
494 
495 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
496 		ocs_log_crit(hw->os, "Max supported WQs = %d\n",
497 				OCS_HW_MAX_NUM_WQ);
498 		return OCS_HW_RTN_ERROR;
499 	}
500 
501 	if (hw->config.n_mq > OCS_HW_MAX_NUM_MQ) {
502 		ocs_log_crit(hw->os, "Max supported MQs = %d\n",
503 				OCS_HW_MAX_NUM_MQ);
504 		return OCS_HW_RTN_ERROR;
505 	}
506 
507 	return OCS_HW_RTN_SUCCESS;
508 }
509 
510 /**
511  * @ingroup devInitShutdown
512  * @brief Allocate memory structures to prepare for the device operation.
513  *
514  * @par Description
515  * Allocates memory structures needed by the device and prepares the device
516  * for operation.
517  * @n @n @b Note: This function may be called more than once (for example, at
518  * initialization and then after a reset), but the size of the internal resources
519  * may not be changed without tearing down the HW (ocs_hw_teardown()).
520  *
521  * @param hw Hardware context allocated by the caller.
522  *
523  * @return Returns 0 on success, or a non-zero value on failure.
524  */
525 ocs_hw_rtn_e
526 ocs_hw_init(ocs_hw_t *hw)
527 {
528 	ocs_hw_rtn_e	rc;
529 	uint32_t	i = 0;
530 	uint8_t		buf[SLI4_BMBX_SIZE];
531 	uint32_t	max_rpi;
532 	int		rem_count;
533 	int	        written_size = 0;
534 	uint32_t	count;
535 	char		prop_buf[32];
536 	uint32_t ramdisc_blocksize = 512;
537 	uint32_t q_count = 0;
538 	/*
539 	 * Make sure the command lists are empty. If this is start-of-day,
540 	 * they'll be empty since they were just initialized in ocs_hw_setup.
541 	 * If we've just gone through a reset, the command and command pending
542 	 * lists should have been cleaned up as part of the reset (ocs_hw_reset()).
543 	 */
544 	ocs_lock(&hw->cmd_lock);
545 		if (!ocs_list_empty(&hw->cmd_head)) {
546 			ocs_log_test(hw->os, "command found on cmd list\n");
547 			ocs_unlock(&hw->cmd_lock);
548 			return OCS_HW_RTN_ERROR;
549 		}
550 		if (!ocs_list_empty(&hw->cmd_pending)) {
551 			ocs_log_test(hw->os, "command found on pending list\n");
552 			ocs_unlock(&hw->cmd_lock);
553 			return OCS_HW_RTN_ERROR;
554 		}
555 	ocs_unlock(&hw->cmd_lock);
556 
557 	/* Free RQ buffers if prevously allocated */
558 	ocs_hw_rx_free(hw);
559 
560 	/*
561 	 * The IO queues must be initialized here for the reset case. The
562 	 * ocs_hw_init_io() function will re-add the IOs to the free list.
563 	 * The cmd_head list should be OK since we free all entries in
564 	 * ocs_hw_command_cancel() that is called in the ocs_hw_reset().
565 	 */
566 
567 	/* If we are in this function due to a reset, there may be stale items
568 	 * on lists that need to be removed.  Clean them up.
569 	 */
570 	rem_count=0;
571 	if (ocs_list_valid(&hw->io_wait_free)) {
572 		while ((!ocs_list_empty(&hw->io_wait_free))) {
573 			rem_count++;
574 			ocs_list_remove_head(&hw->io_wait_free);
575 		}
576 		if (rem_count > 0) {
577 			ocs_log_debug(hw->os, "removed %d items from io_wait_free list\n", rem_count);
578 		}
579 	}
580 	rem_count=0;
581 	if (ocs_list_valid(&hw->io_inuse)) {
582 		while ((!ocs_list_empty(&hw->io_inuse))) {
583 			rem_count++;
584 			ocs_list_remove_head(&hw->io_inuse);
585 		}
586 		if (rem_count > 0) {
587 			ocs_log_debug(hw->os, "removed %d items from io_inuse list\n", rem_count);
588 		}
589 	}
590 	rem_count=0;
591 	if (ocs_list_valid(&hw->io_free)) {
592 		while ((!ocs_list_empty(&hw->io_free))) {
593 			rem_count++;
594 			ocs_list_remove_head(&hw->io_free);
595 		}
596 		if (rem_count > 0) {
597 			ocs_log_debug(hw->os, "removed %d items from io_free list\n", rem_count);
598 		}
599 	}
600 	if (ocs_list_valid(&hw->io_port_owned)) {
601 		while ((!ocs_list_empty(&hw->io_port_owned))) {
602 			ocs_list_remove_head(&hw->io_port_owned);
603 		}
604 	}
605 	ocs_list_init(&hw->io_inuse, ocs_hw_io_t, link);
606 	ocs_list_init(&hw->io_free, ocs_hw_io_t, link);
607 	ocs_list_init(&hw->io_port_owned, ocs_hw_io_t, link);
608 	ocs_list_init(&hw->io_wait_free, ocs_hw_io_t, link);
609 	ocs_list_init(&hw->io_timed_wqe, ocs_hw_io_t, wqe_link);
610 	ocs_list_init(&hw->io_port_dnrx, ocs_hw_io_t, dnrx_link);
611 
612 	/* If MRQ not required, Make sure we dont request feature. */
613 	if (hw->config.n_rq == 1) {
614 		hw->sli.config.features.flag.mrqp = FALSE;
615 	}
616 
617 	if (sli_init(&hw->sli)) {
618 		ocs_log_err(hw->os, "SLI failed to initialize\n");
619 		return OCS_HW_RTN_ERROR;
620 	}
621 
622 	/*
623 	 * Enable the auto xfer rdy feature if requested.
624 	 */
625 	hw->auto_xfer_rdy_enabled = FALSE;
626 	if (sli_get_auto_xfer_rdy_capable(&hw->sli) &&
627 	    hw->config.auto_xfer_rdy_size > 0) {
628 		if (hw->config.esoc){
629 			if (ocs_get_property("ramdisc_blocksize", prop_buf, sizeof(prop_buf)) == 0) {
630 				ramdisc_blocksize = ocs_strtoul(prop_buf, 0, 0);
631 			}
632 			written_size = sli_cmd_config_auto_xfer_rdy_hp(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size, 1, ramdisc_blocksize);
633 		} else {
634 			written_size = sli_cmd_config_auto_xfer_rdy(&hw->sli, buf, SLI4_BMBX_SIZE, hw->config.auto_xfer_rdy_size);
635 		}
636 		if (written_size) {
637 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
638 			if (rc != OCS_HW_RTN_SUCCESS) {
639 				ocs_log_err(hw->os, "config auto xfer rdy failed\n");
640 				return rc;
641 			}
642 		}
643 		hw->auto_xfer_rdy_enabled = TRUE;
644 
645 		if (hw->config.auto_xfer_rdy_t10_enable) {
646 			rc = ocs_hw_config_auto_xfer_rdy_t10pi(hw, buf);
647 			if (rc != OCS_HW_RTN_SUCCESS) {
648 				ocs_log_err(hw->os, "set parameters auto xfer rdy T10 PI failed\n");
649 				return rc;
650 			}
651 		}
652 	}
653 
654 	if(hw->sliport_healthcheck) {
655 		rc = ocs_hw_config_sli_port_health_check(hw, 0, 1);
656 		if (rc != OCS_HW_RTN_SUCCESS) {
657 			ocs_log_err(hw->os, "Enabling Sliport Health check failed \n");
658 			return rc;
659 		}
660 	}
661 
662 	/*
663 	 * Set FDT transfer hint, only works on Lancer
664 	 */
665 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) && (OCS_HW_FDT_XFER_HINT != 0)) {
666 		/*
667 		 * Non-fatal error. In particular, we can disregard failure to set OCS_HW_FDT_XFER_HINT on
668 		 * devices with legacy firmware that do not support OCS_HW_FDT_XFER_HINT feature.
669 		 */
670 		ocs_hw_config_set_fdt_xfer_hint(hw, OCS_HW_FDT_XFER_HINT);
671 	}
672 
673 	/*
674 	 * Verify that we have not exceeded any queue sizes
675 	 */
676 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_EQ),
677 					OCS_HW_MAX_NUM_EQ);
678 	if (hw->config.n_eq > q_count) {
679 		ocs_log_err(hw->os, "requested %d EQ but %d allowed\n",
680 			    hw->config.n_eq, q_count);
681 		return OCS_HW_RTN_ERROR;
682 	}
683 
684 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_CQ),
685 					OCS_HW_MAX_NUM_CQ);
686 	if (hw->config.n_cq > q_count) {
687 		ocs_log_err(hw->os, "requested %d CQ but %d allowed\n",
688 			    hw->config.n_cq, q_count);
689 		return OCS_HW_RTN_ERROR;
690 	}
691 
692 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_MQ),
693 					OCS_HW_MAX_NUM_MQ);
694 	if (hw->config.n_mq > q_count) {
695 		ocs_log_err(hw->os, "requested %d MQ but %d allowed\n",
696 			    hw->config.n_mq, q_count);
697 		return OCS_HW_RTN_ERROR;
698 	}
699 
700 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_RQ),
701 					OCS_HW_MAX_NUM_RQ);
702 	if (hw->config.n_rq > q_count) {
703 		ocs_log_err(hw->os, "requested %d RQ but %d allowed\n",
704 			    hw->config.n_rq, q_count);
705 		return OCS_HW_RTN_ERROR;
706 	}
707 
708 	q_count = MIN(sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ),
709 					OCS_HW_MAX_NUM_WQ);
710 	if (hw->config.n_wq > q_count) {
711 		ocs_log_err(hw->os, "requested %d WQ but %d allowed\n",
712 			    hw->config.n_wq, q_count);
713 		return OCS_HW_RTN_ERROR;
714 	}
715 
716 	/* zero the hashes */
717 	ocs_memset(hw->cq_hash, 0, sizeof(hw->cq_hash));
718 	ocs_log_debug(hw->os, "Max CQs %d, hash size = %d\n",
719 			OCS_HW_MAX_NUM_CQ, OCS_HW_Q_HASH_SIZE);
720 
721 	ocs_memset(hw->rq_hash, 0, sizeof(hw->rq_hash));
722 	ocs_log_debug(hw->os, "Max RQs %d, hash size = %d\n",
723 			OCS_HW_MAX_NUM_RQ, OCS_HW_Q_HASH_SIZE);
724 
725 	ocs_memset(hw->wq_hash, 0, sizeof(hw->wq_hash));
726 	ocs_log_debug(hw->os, "Max WQs %d, hash size = %d\n",
727 			OCS_HW_MAX_NUM_WQ, OCS_HW_Q_HASH_SIZE);
728 
729 
730 	rc = ocs_hw_init_queues(hw, hw->qtop);
731 	if (rc != OCS_HW_RTN_SUCCESS) {
732 		return rc;
733 	}
734 
735 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
736 	i = sli_fc_get_rpi_requirements(&hw->sli, max_rpi);
737 	if (i) {
738 		ocs_dma_t payload_memory;
739 
740 		rc = OCS_HW_RTN_ERROR;
741 
742 		if (hw->rnode_mem.size) {
743 			ocs_dma_free(hw->os, &hw->rnode_mem);
744 		}
745 
746 		if (ocs_dma_alloc(hw->os, &hw->rnode_mem, i, 4096)) {
747 			ocs_log_err(hw->os, "remote node memory allocation fail\n");
748 			return OCS_HW_RTN_NO_MEMORY;
749 		}
750 
751 		payload_memory.size = 0;
752 		if (sli_cmd_fcoe_post_hdr_templates(&hw->sli, buf, SLI4_BMBX_SIZE,
753 					&hw->rnode_mem, UINT16_MAX, &payload_memory)) {
754 			rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
755 
756 			if (payload_memory.size != 0) {
757 				/* The command was non-embedded - need to free the dma buffer */
758 				ocs_dma_free(hw->os, &payload_memory);
759 			}
760 		}
761 
762 		if (rc != OCS_HW_RTN_SUCCESS) {
763 			ocs_log_err(hw->os, "header template registration failed\n");
764 			return rc;
765 		}
766 	}
767 
768 	/* Allocate and post RQ buffers */
769 	rc = ocs_hw_rx_allocate(hw);
770 	if (rc) {
771 		ocs_log_err(hw->os, "rx_allocate failed\n");
772 		return rc;
773 	}
774 
775 	/* Populate hw->seq_free_list */
776 	if (hw->seq_pool == NULL) {
777 		uint32_t count = 0;
778 		uint32_t i;
779 
780 		/* Sum up the total number of RQ entries, to use to allocate the sequence object pool */
781 		for (i = 0; i < hw->hw_rq_count; i++) {
782 			count += hw->hw_rq[i]->entry_count;
783 		}
784 
785 		hw->seq_pool = ocs_array_alloc(hw->os, sizeof(ocs_hw_sequence_t), count);
786 		if (hw->seq_pool == NULL) {
787 			ocs_log_err(hw->os, "malloc seq_pool failed\n");
788 			return OCS_HW_RTN_NO_MEMORY;
789 		}
790 	}
791 
792 	if(ocs_hw_rx_post(hw)) {
793 		ocs_log_err(hw->os, "WARNING - error posting RQ buffers\n");
794 	}
795 
796 	/* Allocate rpi_ref if not previously allocated */
797 	if (hw->rpi_ref == NULL) {
798 		hw->rpi_ref = ocs_malloc(hw->os, max_rpi * sizeof(*hw->rpi_ref),
799 					  OCS_M_ZERO | OCS_M_NOWAIT);
800 		if (hw->rpi_ref == NULL) {
801 			ocs_log_err(hw->os, "rpi_ref allocation failure (%d)\n", i);
802 			return OCS_HW_RTN_NO_MEMORY;
803 		}
804 	}
805 
806 	for (i = 0; i < max_rpi; i ++) {
807 		ocs_atomic_init(&hw->rpi_ref[i].rpi_count, 0);
808 		ocs_atomic_init(&hw->rpi_ref[i].rpi_attached, 0);
809 	}
810 
811 	ocs_memset(hw->domains, 0, sizeof(hw->domains));
812 
813 	/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
814 	if (hw->workaround.override_fcfi) {
815 		hw->first_domain_idx = -1;
816 	}
817 
818 	ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
819 
820 	/* Register a FCFI to allow unsolicited frames to be routed to the driver */
821 	if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
822 
823 		if (hw->hw_mrq_count) {
824 			ocs_log_debug(hw->os, "using REG_FCFI MRQ\n");
825 
826 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE, 0, 0);
827 			if (rc != OCS_HW_RTN_SUCCESS) {
828 				ocs_log_err(hw->os, "REG_FCFI_MRQ FCFI registration failed\n");
829 				return rc;
830 			}
831 
832 			rc = ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_MRQ_MODE, 0, 0);
833 			if (rc != OCS_HW_RTN_SUCCESS) {
834 				ocs_log_err(hw->os, "REG_FCFI_MRQ MRQ registration failed\n");
835 				return rc;
836 			}
837 		} else {
838 			sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
839 
840 			ocs_log_debug(hw->os, "using REG_FCFI standard\n");
841 
842 			/* Set the filter match/mask values from hw's filter_def values */
843 			for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
844 				rq_cfg[i].rq_id = 0xffff;
845 				rq_cfg[i].r_ctl_mask =	(uint8_t)  hw->config.filter_def[i];
846 				rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
847 				rq_cfg[i].type_mask =	(uint8_t) (hw->config.filter_def[i] >> 16);
848 				rq_cfg[i].type_match =	(uint8_t) (hw->config.filter_def[i] >> 24);
849 			}
850 
851 			/*
852 			 * Update the rq_id's of the FCF configuration (don't update more than the number
853 			 * of rq_cfg elements)
854 			 */
855 			for (i = 0; i < OCS_MIN(hw->hw_rq_count, SLI4_CMD_REG_FCFI_NUM_RQ_CFG); i++) {
856 				hw_rq_t *rq = hw->hw_rq[i];
857 				uint32_t j;
858 				for (j = 0; j < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; j++) {
859 					uint32_t mask = (rq->filter_mask != 0) ? rq->filter_mask : 1;
860 					if (mask & (1U << j)) {
861 						rq_cfg[j].rq_id = rq->hdr->id;
862 						ocs_log_debug(hw->os, "REG_FCFI: filter[%d] %08X -> RQ[%d] id=%d\n",
863 							j, hw->config.filter_def[j], i, rq->hdr->id);
864 					}
865 				}
866 			}
867 
868 			rc = OCS_HW_RTN_ERROR;
869 
870 			if (sli_cmd_reg_fcfi(&hw->sli, buf, SLI4_BMBX_SIZE, 0, rq_cfg, 0)) {
871 				rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
872 			}
873 
874 			if (rc != OCS_HW_RTN_SUCCESS) {
875 				ocs_log_err(hw->os, "FCFI registration failed\n");
876 				return rc;
877 			}
878 			hw->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)buf)->fcfi;
879 		}
880 
881 	}
882 
883 	/*
884 	 * Allocate the WQ request tag pool, if not previously allocated (the request tag value is 16 bits,
885 	 * thus the pool allocation size of 64k)
886 	 */
887 	rc = ocs_hw_reqtag_init(hw);
888 	if (rc) {
889 		ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed: %d\n", rc);
890 		return rc;
891 	}
892 
893 	rc = ocs_hw_setup_io(hw);
894 	if (rc) {
895 		ocs_log_err(hw->os, "IO allocation failure\n");
896 		return rc;
897 	}
898 
899 	rc = ocs_hw_init_io(hw);
900 	if (rc) {
901 		ocs_log_err(hw->os, "IO initialization failure\n");
902 		return rc;
903 	}
904 
905 	ocs_queue_history_init(hw->os, &hw->q_hist);
906 
907 	/* get hw link config; polling, so callback will be called immediately */
908 	hw->linkcfg = OCS_HW_LINKCFG_NA;
909 	ocs_hw_get_linkcfg(hw, OCS_CMD_POLL, ocs_hw_init_linkcfg_cb, hw);
910 
911 	/* if lancer ethernet, ethernet ports need to be enabled */
912 	if ((hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) &&
913 	    (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_ETHERNET)) {
914 		if (ocs_hw_set_eth_license(hw, hw->eth_license)) {
915 			/* log warning but continue */
916 			ocs_log_err(hw->os, "Failed to set ethernet license\n");
917 		}
918 	}
919 
920 	/* Set the DIF seed - only for lancer right now */
921 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli) &&
922 	    ocs_hw_set_dif_seed(hw) != OCS_HW_RTN_SUCCESS) {
923 		ocs_log_err(hw->os, "Failed to set DIF seed value\n");
924 		return rc;
925 	}
926 
927 	/* Set the DIF mode - skyhawk only */
928 	if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli) &&
929 	    sli_get_dif_capable(&hw->sli)) {
930 		rc = ocs_hw_set_dif_mode(hw);
931 		if (rc != OCS_HW_RTN_SUCCESS) {
932 			ocs_log_err(hw->os, "Failed to set DIF mode value\n");
933 			return rc;
934 		}
935 	}
936 
937 	/*
938 	 * Arming the EQ allows (e.g.) interrupts when CQ completions write EQ entries
939 	 */
940 	for (i = 0; i < hw->eq_count; i++) {
941 		sli_queue_arm(&hw->sli, &hw->eq[i], TRUE);
942 	}
943 
944 	/*
945 	 * Initialize RQ hash
946 	 */
947 	for (i = 0; i < hw->rq_count; i++) {
948 		ocs_hw_queue_hash_add(hw->rq_hash, hw->rq[i].id, i);
949 	}
950 
951 	/*
952 	 * Initialize WQ hash
953 	 */
954 	for (i = 0; i < hw->wq_count; i++) {
955 		ocs_hw_queue_hash_add(hw->wq_hash, hw->wq[i].id, i);
956 	}
957 
958 	/*
959 	 * Arming the CQ allows (e.g.) MQ completions to write CQ entries
960 	 */
961 	for (i = 0; i < hw->cq_count; i++) {
962 		ocs_hw_queue_hash_add(hw->cq_hash, hw->cq[i].id, i);
963 		sli_queue_arm(&hw->sli, &hw->cq[i], TRUE);
964 	}
965 
966 	/* record the fact that the queues are functional */
967 	hw->state = OCS_HW_STATE_ACTIVE;
968 
969 	/* Note: Must be after the IOs are setup and the state is active*/
970 	if (ocs_hw_rqpair_init(hw)) {
971 		ocs_log_err(hw->os, "WARNING - error initializing RQ pair\n");
972 	}
973 
974 	/* finally kick off periodic timer to check for timed out target WQEs */
975 	if (hw->config.emulate_tgt_wqe_timeout) {
976 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw,
977 				OCS_HW_WQ_TIMER_PERIOD_MS);
978 	}
979 
980 	/*
981 	 * Allocate a HW IOs for send frame.  Allocate one for each Class 1 WQ, or if there
982 	 * are none of those, allocate one for WQ[0]
983 	 */
984 	if ((count = ocs_varray_get_count(hw->wq_class_array[1])) > 0) {
985 		for (i = 0; i < count; i++) {
986 			hw_wq_t *wq = ocs_varray_iter_next(hw->wq_class_array[1]);
987 			wq->send_frame_io = ocs_hw_io_alloc(hw);
988 			if (wq->send_frame_io == NULL) {
989 				ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
990 			}
991 		}
992 	} else {
993 		hw->hw_wq[0]->send_frame_io = ocs_hw_io_alloc(hw);
994 		if (hw->hw_wq[0]->send_frame_io == NULL) {
995 			ocs_log_err(hw->os, "ocs_hw_io_alloc for send_frame_io failed\n");
996 		}
997 	}
998 
999 	/* Initialize send frame frame sequence id */
1000 	ocs_atomic_init(&hw->send_frame_seq_id, 0);
1001 
1002 	/* Initialize watchdog timer if enabled by user */
1003 	hw->expiration_logged = 0;
1004 	if(hw->watchdog_timeout) {
1005 		if((hw->watchdog_timeout < 1) || (hw->watchdog_timeout > 65534)) {
1006 			ocs_log_err(hw->os, "watchdog_timeout out of range: Valid range is 1 - 65534\n");
1007 		}else if(!ocs_hw_config_watchdog_timer(hw)) {
1008 			ocs_log_info(hw->os, "watchdog timer configured with timeout = %d seconds \n", hw->watchdog_timeout);
1009 		}
1010 	}
1011 
1012 	if (ocs_dma_alloc(hw->os, &hw->domain_dmem, 112, 4)) {
1013 	   ocs_log_err(hw->os, "domain node memory allocation fail\n");
1014 	   return OCS_HW_RTN_NO_MEMORY;
1015 	}
1016 
1017 	if (ocs_dma_alloc(hw->os, &hw->fcf_dmem, OCS_HW_READ_FCF_SIZE, OCS_HW_READ_FCF_SIZE)) {
1018 	   ocs_log_err(hw->os, "domain fcf memory allocation fail\n");
1019 	   return OCS_HW_RTN_NO_MEMORY;
1020 	}
1021 
1022 	if ((0 == hw->loop_map.size) &&	ocs_dma_alloc(hw->os, &hw->loop_map,
1023 				SLI4_MIN_LOOP_MAP_BYTES, 4)) {
1024 		ocs_log_err(hw->os, "Loop dma alloc failed size:%d \n", hw->loop_map.size);
1025 	}
1026 
1027 	return OCS_HW_RTN_SUCCESS;
1028 }
1029 
1030 /**
1031  * @brief Configure Multi-RQ
1032  *
1033  * @param hw	Hardware context allocated by the caller.
1034  * @param mode	1 to set MRQ filters and 0 to set FCFI index
1035  * @param vlanid    valid in mode 0
1036  * @param fcf_index valid in mode 0
1037  *
1038  * @return Returns 0 on success, or a non-zero value on failure.
1039  */
1040 static int32_t
1041 ocs_hw_config_mrq(ocs_hw_t *hw, uint8_t mode, uint16_t vlanid, uint16_t fcf_index)
1042 {
1043 	uint8_t buf[SLI4_BMBX_SIZE], mrq_bitmask = 0;
1044 	hw_rq_t *rq;
1045 	sli4_cmd_reg_fcfi_mrq_t *rsp = NULL;
1046 	uint32_t i, j;
1047 	sli4_cmd_rq_cfg_t rq_filter[SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG];
1048 	int32_t rc;
1049 
1050 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1051 		goto issue_cmd;
1052 	}
1053 
1054 	/* Set the filter match/mask values from hw's filter_def values */
1055 	for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
1056 		rq_filter[i].rq_id = 0xffff;
1057 		rq_filter[i].r_ctl_mask  = (uint8_t)  hw->config.filter_def[i];
1058 		rq_filter[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
1059 		rq_filter[i].type_mask   = (uint8_t) (hw->config.filter_def[i] >> 16);
1060 		rq_filter[i].type_match  = (uint8_t) (hw->config.filter_def[i] >> 24);
1061 	}
1062 
1063 	/* Accumulate counts for each filter type used, build rq_ids[] list */
1064 	for (i = 0; i < hw->hw_rq_count; i++) {
1065 		rq = hw->hw_rq[i];
1066 		for (j = 0; j < SLI4_CMD_REG_FCFI_MRQ_NUM_RQ_CFG; j++) {
1067 			if (rq->filter_mask & (1U << j)) {
1068 				if (rq_filter[j].rq_id != 0xffff) {
1069 					/* Already used. Bailout ifts not RQset case */
1070 					if (!rq->is_mrq || (rq_filter[j].rq_id != rq->base_mrq_id)) {
1071 						ocs_log_err(hw->os, "Wrong queue topology.\n");
1072 						return OCS_HW_RTN_ERROR;
1073 					}
1074 					continue;
1075 				}
1076 
1077 				if (rq->is_mrq) {
1078 					rq_filter[j].rq_id = rq->base_mrq_id;
1079 					mrq_bitmask |= (1U << j);
1080 				} else {
1081 					rq_filter[j].rq_id = rq->hdr->id;
1082 				}
1083 			}
1084 		}
1085 	}
1086 
1087 issue_cmd:
1088 	/* Invoke REG_FCFI_MRQ */
1089 	rc = sli_cmd_reg_fcfi_mrq(&hw->sli,
1090 				 buf,					/* buf */
1091 				 SLI4_BMBX_SIZE,			/* size */
1092 				 mode,					/* mode 1 */
1093 				 fcf_index,				/* fcf_index */
1094 				 vlanid,				/* vlan_id */
1095 				 hw->config.rq_selection_policy,	/* RQ selection policy*/
1096 				 mrq_bitmask,				/* MRQ bitmask */
1097 				 hw->hw_mrq_count,			/* num_mrqs */
1098 				 rq_filter);				/* RQ filter */
1099 	if (rc == 0) {
1100 		ocs_log_err(hw->os, "sli_cmd_reg_fcfi_mrq() failed: %d\n", rc);
1101 		return OCS_HW_RTN_ERROR;
1102 	}
1103 
1104 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
1105 
1106 	rsp = (sli4_cmd_reg_fcfi_mrq_t *)buf;
1107 
1108 	if ((rc != OCS_HW_RTN_SUCCESS) || (rsp->hdr.status)) {
1109 		ocs_log_err(hw->os, "FCFI MRQ registration failed. cmd = %x status = %x\n",
1110 			    rsp->hdr.command, rsp->hdr.status);
1111 		return OCS_HW_RTN_ERROR;
1112 	}
1113 
1114 	if (mode == SLI4_CMD_REG_FCFI_SET_FCFI_MODE) {
1115 		hw->fcf_indicator = rsp->fcfi;
1116 	}
1117 	return 0;
1118 }
1119 
1120 /**
1121  * @brief Callback function for getting linkcfg during HW initialization.
1122  *
1123  * @param status Status of the linkcfg get operation.
1124  * @param value Link configuration enum to which the link configuration is set.
1125  * @param arg Callback argument (ocs_hw_t *).
1126  *
1127  * @return None.
1128  */
1129 static void
1130 ocs_hw_init_linkcfg_cb(int32_t status, uintptr_t value, void *arg)
1131 {
1132 	ocs_hw_t *hw = (ocs_hw_t *)arg;
1133 	if (status == 0) {
1134 		hw->linkcfg = (ocs_hw_linkcfg_e)value;
1135 	} else {
1136 		hw->linkcfg = OCS_HW_LINKCFG_NA;
1137 	}
1138 	ocs_log_debug(hw->os, "linkcfg=%d\n", hw->linkcfg);
1139 }
1140 
1141 /**
1142  * @ingroup devInitShutdown
1143  * @brief Tear down the Hardware Abstraction Layer module.
1144  *
1145  * @par Description
1146  * Frees memory structures needed by the device, and shuts down the device. Does
1147  * not free the HW context memory (which is done by the caller).
1148  *
1149  * @param hw Hardware context allocated by the caller.
1150  *
1151  * @return Returns 0 on success, or a non-zero value on failure.
1152  */
1153 ocs_hw_rtn_e
1154 ocs_hw_teardown(ocs_hw_t *hw)
1155 {
1156 	uint32_t	i = 0;
1157 	uint32_t	iters = 10;/*XXX*/
1158 	uint32_t	max_rpi;
1159 	uint32_t destroy_queues;
1160 	uint32_t free_memory;
1161 
1162 	if (!hw) {
1163 		ocs_log_err(NULL, "bad parameter(s) hw=%p\n", hw);
1164 		return OCS_HW_RTN_ERROR;
1165 	}
1166 
1167 	destroy_queues = (hw->state == OCS_HW_STATE_ACTIVE);
1168 	free_memory = (hw->state != OCS_HW_STATE_UNINITIALIZED);
1169 
1170 	/* shutdown target wqe timer */
1171 	shutdown_target_wqe_timer(hw);
1172 
1173 	/* Cancel watchdog timer if enabled */
1174 	if(hw->watchdog_timeout) {
1175 		hw->watchdog_timeout = 0;
1176 		ocs_hw_config_watchdog_timer(hw);
1177 	}
1178 
1179 	/* Cancel Sliport Healthcheck */
1180 	if(hw->sliport_healthcheck) {
1181 		hw->sliport_healthcheck = 0;
1182 		ocs_hw_config_sli_port_health_check(hw, 0, 0);
1183 	}
1184 
1185 	if (hw->state != OCS_HW_STATE_QUEUES_ALLOCATED) {
1186 
1187 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1188 
1189 		ocs_hw_flush(hw);
1190 
1191 		/* If there are outstanding commands, wait for them to complete */
1192 		while (!ocs_list_empty(&hw->cmd_head) && iters) {
1193 			ocs_udelay(10000);
1194 			ocs_hw_flush(hw);
1195 			iters--;
1196 		}
1197 
1198 		if (ocs_list_empty(&hw->cmd_head)) {
1199 			ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1200 		} else {
1201 			ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1202 		}
1203 
1204 		/* Cancel any remaining commands */
1205 		ocs_hw_command_cancel(hw);
1206 	} else {
1207 		hw->state = OCS_HW_STATE_TEARDOWN_IN_PROGRESS;
1208 	}
1209 
1210 	ocs_lock_free(&hw->cmd_lock);
1211 
1212 	/* Free unregistered RPI if workaround is in force */
1213 	if (hw->workaround.use_unregistered_rpi) {
1214 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, hw->workaround.unregistered_rid);
1215 	}
1216 
1217 	max_rpi = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1218 	if (hw->rpi_ref) {
1219 		for (i = 0; i < max_rpi; i++) {
1220 			if (ocs_atomic_read(&hw->rpi_ref[i].rpi_count)) {
1221 				ocs_log_debug(hw->os, "non-zero ref [%d]=%d\n",
1222 						i, ocs_atomic_read(&hw->rpi_ref[i].rpi_count));
1223 			}
1224 		}
1225 		ocs_free(hw->os, hw->rpi_ref, max_rpi * sizeof(*hw->rpi_ref));
1226 		hw->rpi_ref = NULL;
1227 	}
1228 
1229 	ocs_dma_free(hw->os, &hw->rnode_mem);
1230 
1231 	if (hw->io) {
1232 		for (i = 0; i < hw->config.n_io; i++) {
1233 			if (hw->io[i] && (hw->io[i]->sgl != NULL) &&
1234 			    (hw->io[i]->sgl->virt != NULL)) {
1235 				if(hw->io[i]->is_port_owned) {
1236 					ocs_lock_free(&hw->io[i]->axr_lock);
1237 				}
1238 				ocs_dma_free(hw->os, hw->io[i]->sgl);
1239 			}
1240 			ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
1241 			hw->io[i] = NULL;
1242 		}
1243 		ocs_free(hw->os, hw->wqe_buffs, hw->config.n_io * hw->sli.config.wqe_size);
1244 		hw->wqe_buffs = NULL;
1245 		ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t *));
1246 		hw->io = NULL;
1247 	}
1248 
1249 	ocs_dma_free(hw->os, &hw->xfer_rdy);
1250 	ocs_dma_free(hw->os, &hw->dump_sges);
1251 	ocs_dma_free(hw->os, &hw->loop_map);
1252 
1253 	ocs_lock_free(&hw->io_lock);
1254 	ocs_lock_free(&hw->io_abort_lock);
1255 
1256 
1257 	for (i = 0; i < hw->wq_count; i++) {
1258 		sli_queue_free(&hw->sli, &hw->wq[i], destroy_queues, free_memory);
1259 	}
1260 
1261 
1262 	for (i = 0; i < hw->rq_count; i++) {
1263 		sli_queue_free(&hw->sli, &hw->rq[i], destroy_queues, free_memory);
1264 	}
1265 
1266 	for (i = 0; i < hw->mq_count; i++) {
1267 		sli_queue_free(&hw->sli, &hw->mq[i], destroy_queues, free_memory);
1268 	}
1269 
1270 	for (i = 0; i < hw->cq_count; i++) {
1271 		sli_queue_free(&hw->sli, &hw->cq[i], destroy_queues, free_memory);
1272 	}
1273 
1274 	for (i = 0; i < hw->eq_count; i++) {
1275 		sli_queue_free(&hw->sli, &hw->eq[i], destroy_queues, free_memory);
1276 	}
1277 
1278 	ocs_hw_qtop_free(hw->qtop);
1279 
1280 	/* Free rq buffers */
1281 	ocs_hw_rx_free(hw);
1282 
1283 	hw_queue_teardown(hw);
1284 
1285 	ocs_hw_rqpair_teardown(hw);
1286 
1287 	if (sli_teardown(&hw->sli)) {
1288 		ocs_log_err(hw->os, "SLI teardown failed\n");
1289 	}
1290 
1291 	ocs_queue_history_free(&hw->q_hist);
1292 
1293 	/* record the fact that the queues are non-functional */
1294 	hw->state = OCS_HW_STATE_UNINITIALIZED;
1295 
1296 	/* free sequence free pool */
1297 	ocs_array_free(hw->seq_pool);
1298 	hw->seq_pool = NULL;
1299 
1300 	/* free hw_wq_callback pool */
1301 	ocs_pool_free(hw->wq_reqtag_pool);
1302 
1303 	ocs_dma_free(hw->os, &hw->domain_dmem);
1304 	ocs_dma_free(hw->os, &hw->fcf_dmem);
1305 	/* Mark HW setup as not having been called */
1306 	hw->hw_setup_called = FALSE;
1307 
1308 	return OCS_HW_RTN_SUCCESS;
1309 }
1310 
1311 ocs_hw_rtn_e
1312 ocs_hw_reset(ocs_hw_t *hw, ocs_hw_reset_e reset)
1313 {
1314 	uint32_t	i;
1315 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
1316 	uint32_t	iters;
1317 	ocs_hw_state_e prev_state = hw->state;
1318 
1319 	if (hw->state != OCS_HW_STATE_ACTIVE) {
1320 		ocs_log_test(hw->os, "HW state %d is not active\n", hw->state);
1321 	}
1322 
1323 	hw->state = OCS_HW_STATE_RESET_IN_PROGRESS;
1324 
1325 	/* shutdown target wqe timer */
1326 	shutdown_target_wqe_timer(hw);
1327 
1328 	ocs_hw_flush(hw);
1329 
1330 	/*
1331 	 * If an mailbox command requiring a DMA is outstanding (i.e. SFP/DDM),
1332 	 * then the FW will UE when the reset is issued. So attempt to complete
1333 	 * all mailbox commands.
1334 	 */
1335 	iters = 10;
1336 	while (!ocs_list_empty(&hw->cmd_head) && iters) {
1337 		ocs_udelay(10000);
1338 		ocs_hw_flush(hw);
1339 		iters--;
1340 	}
1341 
1342 	if (ocs_list_empty(&hw->cmd_head)) {
1343 		ocs_log_debug(hw->os, "All commands completed on MQ queue\n");
1344 	} else {
1345 		ocs_log_debug(hw->os, "Some commands still pending on MQ queue\n");
1346 	}
1347 
1348 	/* Reset the chip */
1349 	switch(reset) {
1350 	case OCS_HW_RESET_FUNCTION:
1351 		ocs_log_debug(hw->os, "issuing function level reset\n");
1352 		if (sli_reset(&hw->sli)) {
1353 			ocs_log_err(hw->os, "sli_reset failed\n");
1354 			rc = OCS_HW_RTN_ERROR;
1355 		}
1356 		break;
1357 	case OCS_HW_RESET_FIRMWARE:
1358 		ocs_log_debug(hw->os, "issuing firmware reset\n");
1359 		if (sli_fw_reset(&hw->sli)) {
1360 			ocs_log_err(hw->os, "sli_soft_reset failed\n");
1361 			rc = OCS_HW_RTN_ERROR;
1362 		}
1363 		/*
1364 		 * Because the FW reset leaves the FW in a non-running state,
1365 		 * follow that with a regular reset.
1366 		 */
1367 		ocs_log_debug(hw->os, "issuing function level reset\n");
1368 		if (sli_reset(&hw->sli)) {
1369 			ocs_log_err(hw->os, "sli_reset failed\n");
1370 			rc = OCS_HW_RTN_ERROR;
1371 		}
1372 		break;
1373 	default:
1374 		ocs_log_test(hw->os, "unknown reset type - no reset performed\n");
1375 		hw->state = prev_state;
1376 		return OCS_HW_RTN_ERROR;
1377 	}
1378 
1379 	/* Not safe to walk command/io lists unless they've been initialized */
1380 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1381 		ocs_hw_command_cancel(hw);
1382 
1383 		/* Clean up the inuse list, the free list and the wait free list */
1384 		ocs_hw_io_cancel(hw);
1385 
1386 		ocs_memset(hw->domains, 0, sizeof(hw->domains));
1387 		ocs_memset(hw->fcf_index_fcfi, 0, sizeof(hw->fcf_index_fcfi));
1388 
1389 		ocs_hw_link_event_init(hw);
1390 
1391 		ocs_lock(&hw->io_lock);
1392 			/* The io lists should be empty, but remove any that didn't get cleaned up. */
1393 			while (!ocs_list_empty(&hw->io_timed_wqe)) {
1394 				ocs_list_remove_head(&hw->io_timed_wqe);
1395 			}
1396 			/* Don't clean up the io_inuse list, the backend will do that when it finishes the IO */
1397 
1398 			while (!ocs_list_empty(&hw->io_free)) {
1399 				ocs_list_remove_head(&hw->io_free);
1400 			}
1401 			while (!ocs_list_empty(&hw->io_wait_free)) {
1402 				ocs_list_remove_head(&hw->io_wait_free);
1403 			}
1404 
1405 			/* Reset the request tag pool, the HW IO request tags are reassigned in ocs_hw_setup_io() */
1406 			ocs_hw_reqtag_reset(hw);
1407 
1408 		ocs_unlock(&hw->io_lock);
1409 	}
1410 
1411 	if (prev_state != OCS_HW_STATE_UNINITIALIZED) {
1412 		for (i = 0; i < hw->wq_count; i++) {
1413 			sli_queue_reset(&hw->sli, &hw->wq[i]);
1414 		}
1415 
1416 		for (i = 0; i < hw->rq_count; i++) {
1417 			sli_queue_reset(&hw->sli, &hw->rq[i]);
1418 		}
1419 
1420 		for (i = 0; i < hw->hw_rq_count; i++) {
1421 			hw_rq_t *rq = hw->hw_rq[i];
1422 			if (rq->rq_tracker != NULL) {
1423 				uint32_t j;
1424 
1425 				for (j = 0; j < rq->entry_count; j++) {
1426 					rq->rq_tracker[j] = NULL;
1427 				}
1428 			}
1429 		}
1430 
1431 		for (i = 0; i < hw->mq_count; i++) {
1432 			sli_queue_reset(&hw->sli, &hw->mq[i]);
1433 		}
1434 
1435 		for (i = 0; i < hw->cq_count; i++) {
1436 			sli_queue_reset(&hw->sli, &hw->cq[i]);
1437 		}
1438 
1439 		for (i = 0; i < hw->eq_count; i++) {
1440 			sli_queue_reset(&hw->sli, &hw->eq[i]);
1441 		}
1442 
1443 		/* Free rq buffers */
1444 		ocs_hw_rx_free(hw);
1445 
1446 		/* Teardown the HW queue topology */
1447 		hw_queue_teardown(hw);
1448 	} else {
1449 
1450 		/* Free rq buffers */
1451 		ocs_hw_rx_free(hw);
1452 	}
1453 
1454 	/*
1455 	 * Re-apply the run-time workarounds after clearing the SLI config
1456 	 * fields in sli_reset.
1457 	 */
1458 	ocs_hw_workaround_setup(hw);
1459 	hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
1460 
1461 	return rc;
1462 }
1463 
1464 int32_t
1465 ocs_hw_get_num_eq(ocs_hw_t *hw)
1466 {
1467 	return hw->eq_count;
1468 }
1469 
1470 static int32_t
1471 ocs_hw_get_fw_timed_out(ocs_hw_t *hw)
1472 {
1473 	/* The error values below are taken from LOWLEVEL_SET_WATCHDOG_TIMER_rev1.pdf
1474 	* No further explanation is given in the document.
1475 	* */
1476 	return (sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1) == 0x2 &&
1477 		sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2) == 0x10);
1478 }
1479 
1480 
1481 ocs_hw_rtn_e
1482 ocs_hw_get(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t *value)
1483 {
1484 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1485 	int32_t			tmp;
1486 
1487 	if (!value) {
1488 		return OCS_HW_RTN_ERROR;
1489 	}
1490 
1491 	*value = 0;
1492 
1493 	switch (prop) {
1494 	case OCS_HW_N_IO:
1495 		*value = hw->config.n_io;
1496 		break;
1497 	case OCS_HW_N_SGL:
1498 		*value = (hw->config.n_sgl - SLI4_SGE_MAX_RESERVED);
1499 		break;
1500 	case OCS_HW_MAX_IO:
1501 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI);
1502 		break;
1503 	case OCS_HW_MAX_NODES:
1504 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI);
1505 		break;
1506 	case OCS_HW_MAX_RQ_ENTRIES:
1507 		*value = hw->num_qentries[SLI_QTYPE_RQ];
1508 		break;
1509 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1510 		*value = hw->config.rq_default_buffer_size;
1511 		break;
1512 	case OCS_HW_AUTO_XFER_RDY_CAPABLE:
1513 		*value = sli_get_auto_xfer_rdy_capable(&hw->sli);
1514 		break;
1515 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1516 		*value = hw->config.auto_xfer_rdy_xri_cnt;
1517 		break;
1518 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1519 		*value = hw->config.auto_xfer_rdy_size;
1520 		break;
1521 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1522 		switch (hw->config.auto_xfer_rdy_blk_size_chip) {
1523 		case 0:
1524 			*value = 512;
1525 			break;
1526 		case 1:
1527 			*value = 1024;
1528 			break;
1529 		case 2:
1530 			*value = 2048;
1531 			break;
1532 		case 3:
1533 			*value = 4096;
1534 			break;
1535 		case 4:
1536 			*value = 520;
1537 			break;
1538 		default:
1539 			*value = 0;
1540 			rc = OCS_HW_RTN_ERROR;
1541 			break;
1542 		}
1543 		break;
1544 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1545 		*value = hw->config.auto_xfer_rdy_t10_enable;
1546 		break;
1547 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1548 		*value = hw->config.auto_xfer_rdy_p_type;
1549 		break;
1550 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1551 		*value = hw->config.auto_xfer_rdy_ref_tag_is_lba;
1552 		break;
1553 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1554 		*value = hw->config.auto_xfer_rdy_app_tag_valid;
1555 		break;
1556 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1557 		*value = hw->config.auto_xfer_rdy_app_tag_value;
1558 		break;
1559 	case OCS_HW_MAX_SGE:
1560 		*value = sli_get_max_sge(&hw->sli);
1561 		break;
1562 	case OCS_HW_MAX_SGL:
1563 		*value = sli_get_max_sgl(&hw->sli);
1564 		break;
1565 	case OCS_HW_TOPOLOGY:
1566 		/*
1567 		 * Infer link.status based on link.speed.
1568 		 * Report OCS_HW_TOPOLOGY_NONE if the link is down.
1569 		 */
1570 		if (hw->link.speed == 0) {
1571 			*value = OCS_HW_TOPOLOGY_NONE;
1572 			break;
1573 		}
1574 		switch (hw->link.topology) {
1575 		case SLI_LINK_TOPO_NPORT:
1576 			*value = OCS_HW_TOPOLOGY_NPORT;
1577 			break;
1578 		case SLI_LINK_TOPO_LOOP:
1579 			*value = OCS_HW_TOPOLOGY_LOOP;
1580 			break;
1581 		case SLI_LINK_TOPO_NONE:
1582 			*value = OCS_HW_TOPOLOGY_NONE;
1583 			break;
1584 		default:
1585 			ocs_log_test(hw->os, "unsupported topology %#x\n", hw->link.topology);
1586 			rc = OCS_HW_RTN_ERROR;
1587 			break;
1588 		}
1589 		break;
1590 	case OCS_HW_CONFIG_TOPOLOGY:
1591 		*value = hw->config.topology;
1592 		break;
1593 	case OCS_HW_LINK_SPEED:
1594 		*value = hw->link.speed;
1595 		break;
1596 	case OCS_HW_LINK_CONFIG_SPEED:
1597 		switch (hw->config.speed) {
1598 		case FC_LINK_SPEED_10G:
1599 			*value = 10000;
1600 			break;
1601 		case FC_LINK_SPEED_AUTO_16_8_4:
1602 			*value = 0;
1603 			break;
1604 		case FC_LINK_SPEED_2G:
1605 			*value = 2000;
1606 			break;
1607 		case FC_LINK_SPEED_4G:
1608 			*value = 4000;
1609 			break;
1610 		case FC_LINK_SPEED_8G:
1611 			*value = 8000;
1612 			break;
1613 		case FC_LINK_SPEED_16G:
1614 			*value = 16000;
1615 			break;
1616 		case FC_LINK_SPEED_32G:
1617 			*value = 32000;
1618 			break;
1619 		default:
1620 			ocs_log_test(hw->os, "unsupported speed %#x\n", hw->config.speed);
1621 			rc = OCS_HW_RTN_ERROR;
1622 			break;
1623 		}
1624 		break;
1625 	case OCS_HW_IF_TYPE:
1626 		*value = sli_get_if_type(&hw->sli);
1627 		break;
1628 	case OCS_HW_SLI_REV:
1629 		*value = sli_get_sli_rev(&hw->sli);
1630 		break;
1631 	case OCS_HW_SLI_FAMILY:
1632 		*value = sli_get_sli_family(&hw->sli);
1633 		break;
1634 	case OCS_HW_DIF_CAPABLE:
1635 		*value = sli_get_dif_capable(&hw->sli);
1636 		break;
1637 	case OCS_HW_DIF_SEED:
1638 		*value = hw->config.dif_seed;
1639 		break;
1640 	case OCS_HW_DIF_MODE:
1641 		*value = hw->config.dif_mode;
1642 		break;
1643 	case OCS_HW_DIF_MULTI_SEPARATE:
1644 		/* Lancer supports multiple DIF separates */
1645 		if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
1646 			*value = TRUE;
1647 		} else {
1648 			*value = FALSE;
1649 		}
1650 		break;
1651 	case OCS_HW_DUMP_MAX_SIZE:
1652 		*value = hw->dump_size;
1653 		break;
1654 	case OCS_HW_DUMP_READY:
1655 		*value = sli_dump_is_ready(&hw->sli);
1656 		break;
1657 	case OCS_HW_DUMP_PRESENT:
1658 		*value = sli_dump_is_present(&hw->sli);
1659 		break;
1660 	case OCS_HW_RESET_REQUIRED:
1661 		tmp = sli_reset_required(&hw->sli);
1662 		if(tmp < 0) {
1663 			rc = OCS_HW_RTN_ERROR;
1664 		} else {
1665 			*value = tmp;
1666 		}
1667 		break;
1668 	case OCS_HW_FW_ERROR:
1669 		*value = sli_fw_error_status(&hw->sli);
1670 		break;
1671 	case OCS_HW_FW_READY:
1672 		*value = sli_fw_ready(&hw->sli);
1673 		break;
1674 	case OCS_HW_FW_TIMED_OUT:
1675 		*value = ocs_hw_get_fw_timed_out(hw);
1676 		break;
1677 	case OCS_HW_HIGH_LOGIN_MODE:
1678 		*value = sli_get_hlm_capable(&hw->sli);
1679 		break;
1680 	case OCS_HW_PREREGISTER_SGL:
1681 		*value = sli_get_sgl_preregister_required(&hw->sli);
1682 		break;
1683 	case OCS_HW_HW_REV1:
1684 		*value = sli_get_hw_revision(&hw->sli, 0);
1685 		break;
1686 	case OCS_HW_HW_REV2:
1687 		*value = sli_get_hw_revision(&hw->sli, 1);
1688 		break;
1689 	case OCS_HW_HW_REV3:
1690 		*value = sli_get_hw_revision(&hw->sli, 2);
1691 		break;
1692 	case OCS_HW_LINKCFG:
1693 		*value = hw->linkcfg;
1694 		break;
1695 	case OCS_HW_ETH_LICENSE:
1696 		*value = hw->eth_license;
1697 		break;
1698 	case OCS_HW_LINK_MODULE_TYPE:
1699 		*value = sli_get_link_module_type(&hw->sli);
1700 		break;
1701 	case OCS_HW_NUM_CHUTES:
1702 		*value = ocs_hw_get_num_chutes(hw);
1703 		break;
1704 	case OCS_HW_DISABLE_AR_TGT_DIF:
1705 		*value = hw->workaround.disable_ar_tgt_dif;
1706 		break;
1707 	case OCS_HW_EMULATE_I_ONLY_AAB:
1708 		*value = hw->config.i_only_aab;
1709 		break;
1710 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
1711 		*value = hw->config.emulate_tgt_wqe_timeout;
1712 		break;
1713 	case OCS_HW_VPD_LEN:
1714 		*value = sli_get_vpd_len(&hw->sli);
1715 		break;
1716 	case OCS_HW_SGL_CHAINING_CAPABLE:
1717 		*value = sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported;
1718 		break;
1719 	case OCS_HW_SGL_CHAINING_ALLOWED:
1720 		/*
1721 		 * SGL Chaining is allowed in the following cases:
1722 		 *   1. Lancer with host SGL Lists
1723 		 *   2. Skyhawk with pre-registered SGL Lists
1724 		 */
1725 		*value = FALSE;
1726 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1727 		    !sli_get_sgl_preregister(&hw->sli) &&
1728 		    SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)) {
1729 			*value = TRUE;
1730 		}
1731 
1732 		if ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1733 		    sli_get_sgl_preregister(&hw->sli) &&
1734 		    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
1735 			(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
1736 			*value = TRUE;
1737 		}
1738 		break;
1739 	case OCS_HW_SGL_CHAINING_HOST_ALLOCATED:
1740 		/* Only lancer supports host allocated SGL Chaining buffers. */
1741 		*value = ((sli_get_is_sgl_chaining_capable(&hw->sli) || hw->workaround.sglc_misreported) &&
1742 			  (SLI4_IF_TYPE_LANCER_FC_ETH  == sli_get_if_type(&hw->sli)));
1743 		break;
1744 	case OCS_HW_SEND_FRAME_CAPABLE:
1745 		if (hw->workaround.ignore_send_frame) {
1746 			*value = 0;
1747 		} else {
1748 			/* Only lancer is capable */
1749 			*value = sli_get_if_type(&hw->sli) == SLI4_IF_TYPE_LANCER_FC_ETH;
1750 		}
1751 		break;
1752 	case OCS_HW_RQ_SELECTION_POLICY:
1753 		*value = hw->config.rq_selection_policy;
1754 		break;
1755 	case OCS_HW_RR_QUANTA:
1756 		*value = hw->config.rr_quanta;
1757 		break;
1758 	case OCS_HW_MAX_VPORTS:
1759 		*value = sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_VPI);
1760 	default:
1761 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1762 		rc = OCS_HW_RTN_ERROR;
1763 	}
1764 
1765 	return rc;
1766 }
1767 
1768 void *
1769 ocs_hw_get_ptr(ocs_hw_t *hw, ocs_hw_property_e prop)
1770 {
1771 	void	*rc = NULL;
1772 
1773 	switch (prop) {
1774 	case OCS_HW_WWN_NODE:
1775 		rc = sli_get_wwn_node(&hw->sli);
1776 		break;
1777 	case OCS_HW_WWN_PORT:
1778 		rc = sli_get_wwn_port(&hw->sli);
1779 		break;
1780 	case OCS_HW_VPD:
1781 		/* make sure VPD length is non-zero */
1782 		if (sli_get_vpd_len(&hw->sli)) {
1783 			rc = sli_get_vpd(&hw->sli);
1784 		}
1785 		break;
1786 	case OCS_HW_FW_REV:
1787 		rc = sli_get_fw_name(&hw->sli, 0);
1788 		break;
1789 	case OCS_HW_FW_REV2:
1790 		rc = sli_get_fw_name(&hw->sli, 1);
1791 		break;
1792 	case OCS_HW_IPL:
1793 		rc = sli_get_ipl_name(&hw->sli);
1794 		break;
1795 	case OCS_HW_PORTNUM:
1796 		rc = sli_get_portnum(&hw->sli);
1797 		break;
1798 	case OCS_HW_BIOS_VERSION_STRING:
1799 		rc = sli_get_bios_version_string(&hw->sli);
1800 		break;
1801 	default:
1802 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
1803 	}
1804 
1805 	return rc;
1806 }
1807 
1808 
1809 
1810 ocs_hw_rtn_e
1811 ocs_hw_set(ocs_hw_t *hw, ocs_hw_property_e prop, uint32_t value)
1812 {
1813 	ocs_hw_rtn_e		rc = OCS_HW_RTN_SUCCESS;
1814 
1815 	switch (prop) {
1816 	case OCS_HW_N_IO:
1817 		if (value > sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI) ||
1818 		    value == 0) {
1819 			ocs_log_test(hw->os, "IO value out of range %d vs %d\n",
1820 					value, sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_XRI));
1821 			rc = OCS_HW_RTN_ERROR;
1822 		} else {
1823 			hw->config.n_io = value;
1824 		}
1825 		break;
1826 	case OCS_HW_N_SGL:
1827 		value += SLI4_SGE_MAX_RESERVED;
1828 		if (value > sli_get_max_sgl(&hw->sli)) {
1829 			ocs_log_test(hw->os, "SGL value out of range %d vs %d\n",
1830 					value, sli_get_max_sgl(&hw->sli));
1831 			rc = OCS_HW_RTN_ERROR;
1832 		} else {
1833 			hw->config.n_sgl = value;
1834 		}
1835 		break;
1836 	case OCS_HW_TOPOLOGY:
1837 		if ((sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) &&
1838 				(value != OCS_HW_TOPOLOGY_AUTO)) {
1839 			ocs_log_test(hw->os, "unsupported topology=%#x medium=%#x\n",
1840 					value, sli_get_medium(&hw->sli));
1841 			rc = OCS_HW_RTN_ERROR;
1842 			break;
1843 		}
1844 
1845 		switch (value) {
1846 		case OCS_HW_TOPOLOGY_AUTO:
1847 			if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
1848 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC);
1849 			} else {
1850 				sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FCOE);
1851 			}
1852 			break;
1853 		case OCS_HW_TOPOLOGY_NPORT:
1854 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_DA);
1855 			break;
1856 		case OCS_HW_TOPOLOGY_LOOP:
1857 			sli_set_topology(&hw->sli, SLI4_READ_CFG_TOPO_FC_AL);
1858 			break;
1859 		default:
1860 			ocs_log_test(hw->os, "unsupported topology %#x\n", value);
1861 			rc = OCS_HW_RTN_ERROR;
1862 		}
1863 		hw->config.topology = value;
1864 		break;
1865 	case OCS_HW_LINK_SPEED:
1866 		if (sli_get_medium(&hw->sli) != SLI_LINK_MEDIUM_FC) {
1867 			switch (value) {
1868 			case 0: 	/* Auto-speed negotiation */
1869 			case 10000:	/* FCoE speed */
1870 				hw->config.speed = FC_LINK_SPEED_10G;
1871 				break;
1872 			default:
1873 				ocs_log_test(hw->os, "unsupported speed=%#x medium=%#x\n",
1874 						value, sli_get_medium(&hw->sli));
1875 				rc = OCS_HW_RTN_ERROR;
1876 			}
1877 			break;
1878 		}
1879 
1880 		switch (value) {
1881 		case 0:		/* Auto-speed negotiation */
1882 			hw->config.speed = FC_LINK_SPEED_AUTO_16_8_4;
1883 			break;
1884 		case 2000:	/* FC speeds */
1885 			hw->config.speed = FC_LINK_SPEED_2G;
1886 			break;
1887 		case 4000:
1888 			hw->config.speed = FC_LINK_SPEED_4G;
1889 			break;
1890 		case 8000:
1891 			hw->config.speed = FC_LINK_SPEED_8G;
1892 			break;
1893 		case 16000:
1894 			hw->config.speed = FC_LINK_SPEED_16G;
1895 			break;
1896 		case 32000:
1897 			hw->config.speed = FC_LINK_SPEED_32G;
1898 			break;
1899 		default:
1900 			ocs_log_test(hw->os, "unsupported speed %d\n", value);
1901 			rc = OCS_HW_RTN_ERROR;
1902 		}
1903 		break;
1904 	case OCS_HW_DIF_SEED:
1905 		/* Set the DIF seed - only for lancer right now */
1906 		if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
1907 			ocs_log_test(hw->os, "DIF seed not supported for this device\n");
1908 			rc = OCS_HW_RTN_ERROR;
1909 		} else {
1910 			hw->config.dif_seed = value;
1911 		}
1912 		break;
1913 	case OCS_HW_DIF_MODE:
1914 		switch (value) {
1915 		case OCS_HW_DIF_MODE_INLINE:
1916 			/*
1917 			 *  Make sure we support inline DIF.
1918 			 *
1919 			 * Note: Having both bits clear means that we have old
1920 			 *	FW that doesn't set the bits.
1921 			 */
1922 			if (sli_is_dif_inline_capable(&hw->sli)) {
1923 				hw->config.dif_mode = value;
1924 			} else {
1925 				ocs_log_test(hw->os, "chip does not support DIF inline\n");
1926 				rc = OCS_HW_RTN_ERROR;
1927 			}
1928 			break;
1929 		case OCS_HW_DIF_MODE_SEPARATE:
1930 			/* Make sure we support DIF separates. */
1931 			if (sli_is_dif_separate_capable(&hw->sli)) {
1932 				hw->config.dif_mode = value;
1933 			} else {
1934 				ocs_log_test(hw->os, "chip does not support DIF separate\n");
1935 				rc = OCS_HW_RTN_ERROR;
1936 			}
1937 		}
1938 		break;
1939 	case OCS_HW_RQ_PROCESS_LIMIT: {
1940 		hw_rq_t *rq;
1941 		uint32_t i;
1942 
1943 		/* For each hw_rq object, set its parent CQ limit value */
1944 		for (i = 0; i < hw->hw_rq_count; i++) {
1945 			rq = hw->hw_rq[i];
1946 			hw->cq[rq->cq->instance].proc_limit = value;
1947 		}
1948 		break;
1949 	}
1950 	case OCS_HW_RQ_DEFAULT_BUFFER_SIZE:
1951 		hw->config.rq_default_buffer_size = value;
1952 		break;
1953 	case OCS_HW_AUTO_XFER_RDY_XRI_CNT:
1954 		hw->config.auto_xfer_rdy_xri_cnt = value;
1955 		break;
1956 	case OCS_HW_AUTO_XFER_RDY_SIZE:
1957 		hw->config.auto_xfer_rdy_size = value;
1958 		break;
1959 	case OCS_HW_AUTO_XFER_RDY_BLK_SIZE:
1960 		switch (value) {
1961 		case 512:
1962 			hw->config.auto_xfer_rdy_blk_size_chip = 0;
1963 			break;
1964 		case 1024:
1965 			hw->config.auto_xfer_rdy_blk_size_chip = 1;
1966 			break;
1967 		case 2048:
1968 			hw->config.auto_xfer_rdy_blk_size_chip = 2;
1969 			break;
1970 		case 4096:
1971 			hw->config.auto_xfer_rdy_blk_size_chip = 3;
1972 			break;
1973 		case 520:
1974 			hw->config.auto_xfer_rdy_blk_size_chip = 4;
1975 			break;
1976 		default:
1977 			ocs_log_err(hw->os, "Invalid block size %d\n",
1978 				    value);
1979 			rc = OCS_HW_RTN_ERROR;
1980 		}
1981 		break;
1982 	case OCS_HW_AUTO_XFER_RDY_T10_ENABLE:
1983 		hw->config.auto_xfer_rdy_t10_enable = value;
1984 		break;
1985 	case OCS_HW_AUTO_XFER_RDY_P_TYPE:
1986 		hw->config.auto_xfer_rdy_p_type = value;
1987 		break;
1988 	case OCS_HW_AUTO_XFER_RDY_REF_TAG_IS_LBA:
1989 		hw->config.auto_xfer_rdy_ref_tag_is_lba = value;
1990 		break;
1991 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALID:
1992 		hw->config.auto_xfer_rdy_app_tag_valid = value;
1993 		break;
1994 	case OCS_HW_AUTO_XFER_RDY_APP_TAG_VALUE:
1995 		hw->config.auto_xfer_rdy_app_tag_value = value;
1996 		break;
1997 	case OCS_ESOC:
1998 		hw->config.esoc = value;
1999 	case OCS_HW_HIGH_LOGIN_MODE:
2000 		rc = sli_set_hlm(&hw->sli, value);
2001 		break;
2002 	case OCS_HW_PREREGISTER_SGL:
2003 		rc = sli_set_sgl_preregister(&hw->sli, value);
2004 		break;
2005 	case OCS_HW_ETH_LICENSE:
2006 		hw->eth_license = value;
2007 		break;
2008 	case OCS_HW_EMULATE_I_ONLY_AAB:
2009 		hw->config.i_only_aab = value;
2010 		break;
2011 	case OCS_HW_EMULATE_TARGET_WQE_TIMEOUT:
2012 		hw->config.emulate_tgt_wqe_timeout = value;
2013 		break;
2014 	case OCS_HW_BOUNCE:
2015 		hw->config.bounce = value;
2016 		break;
2017 	case OCS_HW_RQ_SELECTION_POLICY:
2018 		hw->config.rq_selection_policy = value;
2019 		break;
2020 	case OCS_HW_RR_QUANTA:
2021 		hw->config.rr_quanta = value;
2022 		break;
2023 	default:
2024 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2025 		rc = OCS_HW_RTN_ERROR;
2026 	}
2027 
2028 	return rc;
2029 }
2030 
2031 
2032 ocs_hw_rtn_e
2033 ocs_hw_set_ptr(ocs_hw_t *hw, ocs_hw_property_e prop, void *value)
2034 {
2035 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2036 
2037 	switch (prop) {
2038 	case OCS_HW_WAR_VERSION:
2039 		hw->hw_war_version = value;
2040 		break;
2041 	case OCS_HW_FILTER_DEF: {
2042 		char *p = value;
2043 		uint32_t idx = 0;
2044 
2045 		for (idx = 0; idx < ARRAY_SIZE(hw->config.filter_def); idx++) {
2046 			hw->config.filter_def[idx] = 0;
2047 		}
2048 
2049 		for (idx = 0; (idx < ARRAY_SIZE(hw->config.filter_def)) && (p != NULL) && *p; ) {
2050 			hw->config.filter_def[idx++] = ocs_strtoul(p, 0, 0);
2051 			p = ocs_strchr(p, ',');
2052 			if (p != NULL) {
2053 				p++;
2054 			}
2055 		}
2056 
2057 		break;
2058 	}
2059 	default:
2060 		ocs_log_test(hw->os, "unsupported property %#x\n", prop);
2061 		rc = OCS_HW_RTN_ERROR;
2062 		break;
2063 	}
2064 	return rc;
2065 }
2066 /**
2067  * @ingroup interrupt
2068  * @brief Check for the events associated with the interrupt vector.
2069  *
2070  * @param hw Hardware context.
2071  * @param vector Zero-based interrupt vector number.
2072  *
2073  * @return Returns 0 on success, or a non-zero value on failure.
2074  */
2075 int32_t
2076 ocs_hw_event_check(ocs_hw_t *hw, uint32_t vector)
2077 {
2078 	int32_t rc = 0;
2079 
2080 	if (!hw) {
2081 		ocs_log_err(NULL, "HW context NULL?!?\n");
2082 		return -1;
2083 	}
2084 
2085 	if (vector > hw->eq_count) {
2086 		ocs_log_err(hw->os, "vector %d. max %d\n",
2087 				vector, hw->eq_count);
2088 		return -1;
2089 	}
2090 
2091 	/*
2092 	 * The caller should disable interrupts if they wish to prevent us
2093 	 * from processing during a shutdown. The following states are defined:
2094 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2095 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2096 	 *                                    queues are cleared.
2097 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2098 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2099 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2100 	 *                                        completions.
2101 	 */
2102 	if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
2103 		rc = sli_queue_is_empty(&hw->sli, &hw->eq[vector]);
2104 
2105 		/* Re-arm queue if there are no entries */
2106 		if (rc != 0) {
2107 			sli_queue_arm(&hw->sli, &hw->eq[vector], TRUE);
2108 		}
2109 	}
2110 	return rc;
2111 }
2112 
2113 void
2114 ocs_hw_unsol_process_bounce(void *arg)
2115 {
2116 	ocs_hw_sequence_t *seq = arg;
2117 	ocs_hw_t *hw = seq->hw;
2118 
2119 	ocs_hw_assert(hw != NULL);
2120 	ocs_hw_assert(hw->callback.unsolicited != NULL);
2121 
2122 	hw->callback.unsolicited(hw->args.unsolicited, seq);
2123 }
2124 
2125 int32_t
2126 ocs_hw_process(ocs_hw_t *hw, uint32_t vector, uint32_t max_isr_time_msec)
2127 {
2128 	hw_eq_t *eq;
2129 	int32_t rc = 0;
2130 
2131 	CPUTRACE("");
2132 
2133 	/*
2134 	 * The caller should disable interrupts if they wish to prevent us
2135 	 * from processing during a shutdown. The following states are defined:
2136 	 *   OCS_HW_STATE_UNINITIALIZED - No queues allocated
2137 	 *   OCS_HW_STATE_QUEUES_ALLOCATED - The state after a chip reset,
2138 	 *                                    queues are cleared.
2139 	 *   OCS_HW_STATE_ACTIVE - Chip and queues are operational
2140 	 *   OCS_HW_STATE_RESET_IN_PROGRESS - reset, we still want completions
2141 	 *   OCS_HW_STATE_TEARDOWN_IN_PROGRESS - We still want mailbox
2142 	 *                                        completions.
2143 	 */
2144 	if (hw->state == OCS_HW_STATE_UNINITIALIZED) {
2145 		return 0;
2146 	}
2147 
2148 	/* Get pointer to hw_eq_t */
2149 	eq = hw->hw_eq[vector];
2150 
2151 	OCS_STAT(eq->use_count++);
2152 
2153 	rc = ocs_hw_eq_process(hw, eq, max_isr_time_msec);
2154 
2155 	return rc;
2156 }
2157 
2158 /**
2159  * @ingroup interrupt
2160  * @brief Process events associated with an EQ.
2161  *
2162  * @par Description
2163  * Loop termination:
2164  * @n @n Without a mechanism to terminate the completion processing loop, it
2165  * is possible under some workload conditions for the loop to never terminate
2166  * (or at least take longer than the OS is happy to have an interrupt handler
2167  * or kernel thread context hold a CPU without yielding).
2168  * @n @n The approach taken here is to periodically check how much time
2169  * we have been in this
2170  * processing loop, and if we exceed a predetermined time (multiple seconds), the
2171  * loop is terminated, and ocs_hw_process() returns.
2172  *
2173  * @param hw Hardware context.
2174  * @param eq Pointer to HW EQ object.
2175  * @param max_isr_time_msec Maximum time in msec to stay in this function.
2176  *
2177  * @return Returns 0 on success, or a non-zero value on failure.
2178  */
2179 int32_t
2180 ocs_hw_eq_process(ocs_hw_t *hw, hw_eq_t *eq, uint32_t max_isr_time_msec)
2181 {
2182 	uint8_t		eqe[sizeof(sli4_eqe_t)] = { 0 };
2183 	uint32_t	done = FALSE;
2184 	uint32_t	tcheck_count;
2185 	time_t		tstart;
2186 	time_t		telapsed;
2187 
2188 	tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2189 	tstart = ocs_msectime();
2190 
2191 	CPUTRACE("");
2192 
2193 	while (!done && !sli_queue_read(&hw->sli, eq->queue, eqe)) {
2194 		uint16_t	cq_id = 0;
2195 		int32_t		rc;
2196 
2197 		rc = sli_eq_parse(&hw->sli, eqe, &cq_id);
2198 		if (unlikely(rc)) {
2199 			if (rc > 0) {
2200 				uint32_t i;
2201 
2202 				/*
2203 				 * Received a sentinel EQE indicating the EQ is full.
2204 				 * Process all CQs
2205 				 */
2206 				for (i = 0; i < hw->cq_count; i++) {
2207 					ocs_hw_cq_process(hw, hw->hw_cq[i]);
2208 				}
2209 				continue;
2210 			} else {
2211 				return rc;
2212 			}
2213 		} else {
2214 			int32_t index = ocs_hw_queue_hash_find(hw->cq_hash, cq_id);
2215 			if (likely(index >= 0)) {
2216 				ocs_hw_cq_process(hw, hw->hw_cq[index]);
2217 			} else {
2218 				ocs_log_err(hw->os, "bad CQ_ID %#06x\n", cq_id);
2219 			}
2220 		}
2221 
2222 
2223 		if (eq->queue->n_posted > (eq->queue->posted_limit)) {
2224 			sli_queue_arm(&hw->sli, eq->queue, FALSE);
2225 		}
2226 
2227 		if (tcheck_count && (--tcheck_count == 0)) {
2228 			tcheck_count = OCS_HW_TIMECHECK_ITERATIONS;
2229 			telapsed = ocs_msectime() - tstart;
2230 			if (telapsed >= max_isr_time_msec) {
2231 				done = TRUE;
2232 			}
2233 		}
2234 	}
2235 	sli_queue_eq_arm(&hw->sli, eq->queue, TRUE);
2236 
2237 	return 0;
2238 }
2239 
2240 /**
2241  * @brief Submit queued (pending) mbx commands.
2242  *
2243  * @par Description
2244  * Submit queued mailbox commands.
2245  * --- Assumes that hw->cmd_lock is held ---
2246  *
2247  * @param hw Hardware context.
2248  *
2249  * @return Returns 0 on success, or a negative error code value on failure.
2250  */
2251 static int32_t
2252 ocs_hw_cmd_submit_pending(ocs_hw_t *hw)
2253 {
2254 	ocs_command_ctx_t *ctx;
2255 	int32_t rc = 0;
2256 
2257 	/* Assumes lock held */
2258 
2259 	/* Only submit MQE if there's room */
2260 	while (hw->cmd_head_count < (OCS_HW_MQ_DEPTH - 1)) {
2261 		ctx = ocs_list_remove_head(&hw->cmd_pending);
2262 		if (ctx == NULL) {
2263 			break;
2264 		}
2265 		ocs_list_add_tail(&hw->cmd_head, ctx);
2266 		hw->cmd_head_count++;
2267 		if (sli_queue_write(&hw->sli, hw->mq, ctx->buf) < 0) {
2268 			ocs_log_test(hw->os, "sli_queue_write failed: %d\n", rc);
2269 			rc = -1;
2270 			break;
2271 		}
2272 	}
2273 	return rc;
2274 }
2275 
2276 /**
2277  * @ingroup io
2278  * @brief Issue a SLI command.
2279  *
2280  * @par Description
2281  * Send a mailbox command to the hardware, and either wait for a completion
2282  * (OCS_CMD_POLL) or get an optional asynchronous completion (OCS_CMD_NOWAIT).
2283  *
2284  * @param hw Hardware context.
2285  * @param cmd Buffer containing a formatted command and results.
2286  * @param opts Command options:
2287  *  - OCS_CMD_POLL - Command executes synchronously and busy-waits for the completion.
2288  *  - OCS_CMD_NOWAIT - Command executes asynchronously. Uses callback.
2289  * @param cb Function callback used for asynchronous mode. May be NULL.
2290  * @n Prototype is <tt>(*cb)(void *arg, uint8_t *cmd)</tt>.
2291  * @n @n @b Note: If the
2292  * callback function pointer is NULL, the results of the command are silently
2293  * discarded, allowing this pointer to exist solely on the stack.
2294  * @param arg Argument passed to an asynchronous callback.
2295  *
2296  * @return Returns 0 on success, or a non-zero value on failure.
2297  */
2298 ocs_hw_rtn_e
2299 ocs_hw_command(ocs_hw_t *hw, uint8_t *cmd, uint32_t opts, void *cb, void *arg)
2300 {
2301 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2302 
2303 	/*
2304 	 * If the chip is in an error state (UE'd) then reject this mailbox
2305 	 *  command.
2306 	 */
2307 	if (sli_fw_error_status(&hw->sli) > 0) {
2308 		uint32_t err1 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR1);
2309 		uint32_t err2 = sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_ERROR2);
2310 		if (hw->expiration_logged == 0 && err1 == 0x2 && err2 == 0x10) {
2311 			hw->expiration_logged = 1;
2312 			ocs_log_crit(hw->os,"Emulex: Heartbeat expired after %d seconds\n",
2313 					hw->watchdog_timeout);
2314 		}
2315 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2316 		ocs_log_crit(hw->os, "status=%#x error1=%#x error2=%#x\n",
2317 			sli_reg_read(&hw->sli, SLI4_REG_SLIPORT_STATUS),
2318 			err1, err2);
2319 
2320 		return OCS_HW_RTN_ERROR;
2321 	}
2322 
2323 	if (OCS_CMD_POLL == opts) {
2324 
2325 		ocs_lock(&hw->cmd_lock);
2326 		if (hw->mq->length && !sli_queue_is_empty(&hw->sli, hw->mq)) {
2327 			/*
2328 			 * Can't issue Boot-strap mailbox command with other
2329 			 * mail-queue commands pending as this interaction is
2330 			 * undefined
2331 			 */
2332 			rc = OCS_HW_RTN_ERROR;
2333 		} else {
2334 			void *bmbx = hw->sli.bmbx.virt;
2335 
2336 			ocs_memset(bmbx, 0, SLI4_BMBX_SIZE);
2337 			ocs_memcpy(bmbx, cmd, SLI4_BMBX_SIZE);
2338 
2339 			if (sli_bmbx_command(&hw->sli) == 0) {
2340 				rc = OCS_HW_RTN_SUCCESS;
2341 				ocs_memcpy(cmd, bmbx, SLI4_BMBX_SIZE);
2342 			}
2343 		}
2344 		ocs_unlock(&hw->cmd_lock);
2345 	} else if (OCS_CMD_NOWAIT == opts) {
2346 		ocs_command_ctx_t	*ctx = NULL;
2347 
2348 		ctx = ocs_malloc(hw->os, sizeof(ocs_command_ctx_t), OCS_M_ZERO | OCS_M_NOWAIT);
2349 		if (!ctx) {
2350 			ocs_log_err(hw->os, "can't allocate command context\n");
2351 			return OCS_HW_RTN_NO_RESOURCES;
2352 		}
2353 
2354 		if (hw->state != OCS_HW_STATE_ACTIVE) {
2355 			ocs_log_err(hw->os, "Can't send command, HW state=%d\n", hw->state);
2356 			ocs_free(hw->os, ctx, sizeof(*ctx));
2357 			return OCS_HW_RTN_ERROR;
2358 		}
2359 
2360 		if (cb) {
2361 			ctx->cb = cb;
2362 			ctx->arg = arg;
2363 		}
2364 		ctx->buf = cmd;
2365 		ctx->ctx = hw;
2366 
2367 		ocs_lock(&hw->cmd_lock);
2368 
2369 			/* Add to pending list */
2370 			ocs_list_add_tail(&hw->cmd_pending, ctx);
2371 
2372 			/* Submit as much of the pending list as we can */
2373 			if (ocs_hw_cmd_submit_pending(hw) == 0) {
2374 				rc = OCS_HW_RTN_SUCCESS;
2375 			}
2376 
2377 		ocs_unlock(&hw->cmd_lock);
2378 	}
2379 
2380 	return rc;
2381 }
2382 
2383 /**
2384  * @ingroup devInitShutdown
2385  * @brief Register a callback for the given event.
2386  *
2387  * @param hw Hardware context.
2388  * @param which Event of interest.
2389  * @param func Function to call when the event occurs.
2390  * @param arg Argument passed to the callback function.
2391  *
2392  * @return Returns 0 on success, or a non-zero value on failure.
2393  */
2394 ocs_hw_rtn_e
2395 ocs_hw_callback(ocs_hw_t *hw, ocs_hw_callback_e which, void *func, void *arg)
2396 {
2397 
2398 	if (!hw || !func || (which >= OCS_HW_CB_MAX)) {
2399 		ocs_log_err(NULL, "bad parameter hw=%p which=%#x func=%p\n",
2400 			    hw, which, func);
2401 		return OCS_HW_RTN_ERROR;
2402 	}
2403 
2404 	switch (which) {
2405 	case OCS_HW_CB_DOMAIN:
2406 		hw->callback.domain = func;
2407 		hw->args.domain = arg;
2408 		break;
2409 	case OCS_HW_CB_PORT:
2410 		hw->callback.port = func;
2411 		hw->args.port = arg;
2412 		break;
2413 	case OCS_HW_CB_UNSOLICITED:
2414 		hw->callback.unsolicited = func;
2415 		hw->args.unsolicited = arg;
2416 		break;
2417 	case OCS_HW_CB_REMOTE_NODE:
2418 		hw->callback.rnode = func;
2419 		hw->args.rnode = arg;
2420 		break;
2421 	case OCS_HW_CB_BOUNCE:
2422 		hw->callback.bounce = func;
2423 		hw->args.bounce = arg;
2424 		break;
2425 	default:
2426 		ocs_log_test(hw->os, "unknown callback %#x\n", which);
2427 		return OCS_HW_RTN_ERROR;
2428 	}
2429 
2430 	return OCS_HW_RTN_SUCCESS;
2431 }
2432 
2433 /**
2434  * @ingroup port
2435  * @brief Allocate a port object.
2436  *
2437  * @par Description
2438  * This function allocates a VPI object for the port and stores it in the
2439  * indicator field of the port object.
2440  *
2441  * @param hw Hardware context.
2442  * @param sport SLI port object used to connect to the domain.
2443  * @param domain Domain object associated with this port (may be NULL).
2444  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
2445  *
2446  * @return Returns 0 on success, or a non-zero value on failure.
2447  */
2448 ocs_hw_rtn_e
2449 ocs_hw_port_alloc(ocs_hw_t *hw, ocs_sli_port_t *sport, ocs_domain_t *domain,
2450 		uint8_t *wwpn)
2451 {
2452 	uint8_t	*cmd = NULL;
2453 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2454 	uint32_t index;
2455 
2456 	sport->indicator = UINT32_MAX;
2457 	sport->hw = hw;
2458 	sport->ctx.app = sport;
2459 	sport->sm_free_req_pending = 0;
2460 
2461 	/*
2462 	 * Check if the chip is in an error state (UE'd) before proceeding.
2463 	 */
2464 	if (sli_fw_error_status(&hw->sli) > 0) {
2465 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2466 		return OCS_HW_RTN_ERROR;
2467 	}
2468 
2469 	if (wwpn) {
2470 		ocs_memcpy(&sport->sli_wwpn, wwpn, sizeof(sport->sli_wwpn));
2471 	}
2472 
2473 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VPI, &sport->indicator, &index)) {
2474 		ocs_log_err(hw->os, "FCOE_VPI allocation failure\n");
2475 		return OCS_HW_RTN_ERROR;
2476 	}
2477 
2478 	if (domain != NULL) {
2479 		ocs_sm_function_t	next = NULL;
2480 
2481 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2482 		if (!cmd) {
2483 			ocs_log_err(hw->os, "command memory allocation failed\n");
2484 			rc = OCS_HW_RTN_NO_MEMORY;
2485 			goto ocs_hw_port_alloc_out;
2486 		}
2487 
2488 		/* If the WWPN is NULL, fetch the default WWPN and WWNN before
2489 		 * initializing the VPI
2490 		 */
2491 		if (!wwpn) {
2492 			next = __ocs_hw_port_alloc_read_sparm64;
2493 		} else {
2494 			next = __ocs_hw_port_alloc_init_vpi;
2495 		}
2496 
2497 		ocs_sm_transition(&sport->ctx, next, cmd);
2498 	} else if (!wwpn) {
2499 		/* This is the convention for the HW, not SLI */
2500 		ocs_log_test(hw->os, "need WWN for physical port\n");
2501 		rc = OCS_HW_RTN_ERROR;
2502 	} else {
2503 		/* domain NULL and wwpn non-NULL */
2504 		ocs_sm_transition(&sport->ctx, __ocs_hw_port_alloc_init, NULL);
2505 	}
2506 
2507 ocs_hw_port_alloc_out:
2508 	if (rc != OCS_HW_RTN_SUCCESS) {
2509 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2510 
2511 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
2512 	}
2513 
2514 	return rc;
2515 }
2516 
2517 /**
2518  * @ingroup port
2519  * @brief Attach a physical/virtual SLI port to a domain.
2520  *
2521  * @par Description
2522  * This function registers a previously-allocated VPI with the
2523  * device.
2524  *
2525  * @param hw Hardware context.
2526  * @param sport Pointer to the SLI port object.
2527  * @param fc_id Fibre Channel ID to associate with this port.
2528  *
2529  * @return Returns OCS_HW_RTN_SUCCESS on success, or an error code on failure.
2530  */
2531 ocs_hw_rtn_e
2532 ocs_hw_port_attach(ocs_hw_t *hw, ocs_sli_port_t *sport, uint32_t fc_id)
2533 {
2534 	uint8_t	*buf = NULL;
2535 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2536 
2537 	if (!hw || !sport) {
2538 		ocs_log_err(hw ? hw->os : NULL,
2539 			"bad parameter(s) hw=%p sport=%p\n", hw,
2540 			sport);
2541 		return OCS_HW_RTN_ERROR;
2542 	}
2543 
2544 	/*
2545 	 * Check if the chip is in an error state (UE'd) before proceeding.
2546 	 */
2547 	if (sli_fw_error_status(&hw->sli) > 0) {
2548 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2549 		return OCS_HW_RTN_ERROR;
2550 	}
2551 
2552 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2553 	if (!buf) {
2554 		ocs_log_err(hw->os, "no buffer for command\n");
2555 		return OCS_HW_RTN_NO_MEMORY;
2556 	}
2557 
2558 	sport->fc_id = fc_id;
2559 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_ATTACH, buf);
2560 	return rc;
2561 }
2562 
2563 /**
2564  * @brief Called when the port control command completes.
2565  *
2566  * @par Description
2567  * We only need to free the mailbox command buffer.
2568  *
2569  * @param hw Hardware context.
2570  * @param status Status field from the mbox completion.
2571  * @param mqe Mailbox response structure.
2572  * @param arg Pointer to a callback function that signals the caller that the command is done.
2573  *
2574  * @return Returns 0.
2575  */
2576 static int32_t
2577 ocs_hw_cb_port_control(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
2578 {
2579 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
2580 	return 0;
2581 }
2582 
2583 /**
2584  * @ingroup port
2585  * @brief Control a port (initialize, shutdown, or set link configuration).
2586  *
2587  * @par Description
2588  * This function controls a port depending on the @c ctrl parameter:
2589  * - @b OCS_HW_PORT_INIT -
2590  * Issues the CONFIG_LINK and INIT_LINK commands for the specified port.
2591  * The HW generates an OCS_HW_DOMAIN_FOUND event when the link comes up.
2592  * .
2593  * - @b OCS_HW_PORT_SHUTDOWN -
2594  * Issues the DOWN_LINK command for the specified port.
2595  * The HW generates an OCS_HW_DOMAIN_LOST event when the link is down.
2596  * .
2597  * - @b OCS_HW_PORT_SET_LINK_CONFIG -
2598  * Sets the link configuration.
2599  *
2600  * @param hw Hardware context.
2601  * @param ctrl Specifies the operation:
2602  * - OCS_HW_PORT_INIT
2603  * - OCS_HW_PORT_SHUTDOWN
2604  * - OCS_HW_PORT_SET_LINK_CONFIG
2605  *
2606  * @param value Operation-specific value.
2607  * - OCS_HW_PORT_INIT - Selective reset AL_PA
2608  * - OCS_HW_PORT_SHUTDOWN - N/A
2609  * - OCS_HW_PORT_SET_LINK_CONFIG - An enum #ocs_hw_linkcfg_e value.
2610  *
2611  * @param cb Callback function to invoke the following operation.
2612  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2613  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2614  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2615  * completes.
2616  *
2617  * @param arg Callback argument invoked after the command completes.
2618  * - OCS_HW_PORT_INIT/OCS_HW_PORT_SHUTDOWN - NULL (link events
2619  * are handled by the OCS_HW_CB_DOMAIN callbacks).
2620  * - OCS_HW_PORT_SET_LINK_CONFIG - Invoked after linkcfg mailbox command
2621  * completes.
2622  *
2623  * @return Returns 0 on success, or a non-zero value on failure.
2624  */
2625 ocs_hw_rtn_e
2626 ocs_hw_port_control(ocs_hw_t *hw, ocs_hw_port_e ctrl, uintptr_t value, ocs_hw_port_control_cb_t cb, void *arg)
2627 {
2628 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
2629 
2630 	switch (ctrl) {
2631 	case OCS_HW_PORT_INIT:
2632 	{
2633 		uint8_t	*init_link;
2634 		uint32_t speed = 0;
2635 		uint8_t reset_alpa = 0;
2636 
2637 		if (SLI_LINK_MEDIUM_FC == sli_get_medium(&hw->sli)) {
2638 			uint8_t	*cfg_link;
2639 
2640 			cfg_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2641 			if (cfg_link == NULL) {
2642 				ocs_log_err(hw->os, "no buffer for command\n");
2643 				return OCS_HW_RTN_NO_MEMORY;
2644 			}
2645 
2646 			if (sli_cmd_config_link(&hw->sli, cfg_link, SLI4_BMBX_SIZE)) {
2647 				rc = ocs_hw_command(hw, cfg_link, OCS_CMD_NOWAIT,
2648 							ocs_hw_cb_port_control, NULL);
2649 			}
2650 
2651 			if (rc != OCS_HW_RTN_SUCCESS) {
2652 				ocs_free(hw->os, cfg_link, SLI4_BMBX_SIZE);
2653 				ocs_log_err(hw->os, "CONFIG_LINK failed\n");
2654 				break;
2655 			}
2656 			speed = hw->config.speed;
2657 			reset_alpa = (uint8_t)(value & 0xff);
2658 		} else {
2659 			speed = FC_LINK_SPEED_10G;
2660 		}
2661 
2662 		/*
2663 		 * Bring link up, unless FW version is not supported
2664 		 */
2665 		if (hw->workaround.fw_version_too_low) {
2666 			if (SLI4_IF_TYPE_LANCER_FC_ETH == hw->sli.if_type) {
2667 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2668 					OCS_FW_VER_STR(OCS_MIN_FW_VER_LANCER), (char *) sli_get_fw_name(&hw->sli,0));
2669 			} else {
2670 				ocs_log_err(hw->os, "Cannot bring up link.  Please update firmware to %s or later (current version is %s)\n",
2671 					OCS_FW_VER_STR(OCS_MIN_FW_VER_SKYHAWK), (char *) sli_get_fw_name(&hw->sli, 0));
2672 			}
2673 
2674 			return OCS_HW_RTN_ERROR;
2675 		}
2676 
2677 		rc = OCS_HW_RTN_ERROR;
2678 
2679 		/* Allocate a new buffer for the init_link command */
2680 		init_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2681 		if (init_link == NULL) {
2682 			ocs_log_err(hw->os, "no buffer for command\n");
2683 			return OCS_HW_RTN_NO_MEMORY;
2684 		}
2685 
2686 
2687 		if (sli_cmd_init_link(&hw->sli, init_link, SLI4_BMBX_SIZE, speed, reset_alpa)) {
2688 			rc = ocs_hw_command(hw, init_link, OCS_CMD_NOWAIT,
2689 						ocs_hw_cb_port_control, NULL);
2690 		}
2691 		/* Free buffer on error, since no callback is coming */
2692 		if (rc != OCS_HW_RTN_SUCCESS) {
2693 			ocs_free(hw->os, init_link, SLI4_BMBX_SIZE);
2694 			ocs_log_err(hw->os, "INIT_LINK failed\n");
2695 		}
2696 		break;
2697 	}
2698 	case OCS_HW_PORT_SHUTDOWN:
2699 	{
2700 		uint8_t	*down_link;
2701 
2702 		down_link = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2703 		if (down_link == NULL) {
2704 			ocs_log_err(hw->os, "no buffer for command\n");
2705 			return OCS_HW_RTN_NO_MEMORY;
2706 		}
2707 		if (sli_cmd_down_link(&hw->sli, down_link, SLI4_BMBX_SIZE)) {
2708 			rc = ocs_hw_command(hw, down_link, OCS_CMD_NOWAIT,
2709 						ocs_hw_cb_port_control, NULL);
2710 		}
2711 		/* Free buffer on error, since no callback is coming */
2712 		if (rc != OCS_HW_RTN_SUCCESS) {
2713 			ocs_free(hw->os, down_link, SLI4_BMBX_SIZE);
2714 			ocs_log_err(hw->os, "DOWN_LINK failed\n");
2715 		}
2716 		break;
2717 	}
2718 	case OCS_HW_PORT_SET_LINK_CONFIG:
2719 		rc = ocs_hw_set_linkcfg(hw, (ocs_hw_linkcfg_e)value, OCS_CMD_NOWAIT, cb, arg);
2720 		break;
2721 	default:
2722 		ocs_log_test(hw->os, "unhandled control %#x\n", ctrl);
2723 		break;
2724 	}
2725 
2726 	return rc;
2727 }
2728 
2729 
2730 /**
2731  * @ingroup port
2732  * @brief Free port resources.
2733  *
2734  * @par Description
2735  * Issue the UNREG_VPI command to free the assigned VPI context.
2736  *
2737  * @param hw Hardware context.
2738  * @param sport SLI port object used to connect to the domain.
2739  *
2740  * @return Returns 0 on success, or a non-zero value on failure.
2741  */
2742 ocs_hw_rtn_e
2743 ocs_hw_port_free(ocs_hw_t *hw, ocs_sli_port_t *sport)
2744 {
2745 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2746 
2747 	if (!hw || !sport) {
2748 		ocs_log_err(hw ? hw->os : NULL,
2749 			"bad parameter(s) hw=%p sport=%p\n", hw,
2750 			sport);
2751 		return OCS_HW_RTN_ERROR;
2752 	}
2753 
2754 	/*
2755 	 * Check if the chip is in an error state (UE'd) before proceeding.
2756 	 */
2757 	if (sli_fw_error_status(&hw->sli) > 0) {
2758 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2759 		return OCS_HW_RTN_ERROR;
2760 	}
2761 
2762 	ocs_sm_post_event(&sport->ctx, OCS_EVT_HW_PORT_REQ_FREE, NULL);
2763 	return rc;
2764 }
2765 
2766 /**
2767  * @ingroup domain
2768  * @brief Allocate a fabric domain object.
2769  *
2770  * @par Description
2771  * This function starts a series of commands needed to connect to the domain, including
2772  *   - REG_FCFI
2773  *   - INIT_VFI
2774  *   - READ_SPARMS
2775  *   .
2776  * @b Note: Not all SLI interface types use all of the above commands.
2777  * @n @n Upon successful allocation, the HW generates a OCS_HW_DOMAIN_ALLOC_OK
2778  * event. On failure, it generates a OCS_HW_DOMAIN_ALLOC_FAIL event.
2779  *
2780  * @param hw Hardware context.
2781  * @param domain Pointer to the domain object.
2782  * @param fcf FCF index.
2783  * @param vlan VLAN ID.
2784  *
2785  * @return Returns 0 on success, or a non-zero value on failure.
2786  */
2787 ocs_hw_rtn_e
2788 ocs_hw_domain_alloc(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fcf, uint32_t vlan)
2789 {
2790 	uint8_t		*cmd = NULL;
2791 	uint32_t	index;
2792 
2793 	if (!hw || !domain || !domain->sport) {
2794 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p sport=%p\n",
2795 				hw, domain, domain ? domain->sport : NULL);
2796 		return OCS_HW_RTN_ERROR;
2797 	}
2798 
2799 	/*
2800 	 * Check if the chip is in an error state (UE'd) before proceeding.
2801 	 */
2802 	if (sli_fw_error_status(&hw->sli) > 0) {
2803 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2804 		return OCS_HW_RTN_ERROR;
2805 	}
2806 
2807 	cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
2808 	if (!cmd) {
2809 		ocs_log_err(hw->os, "command memory allocation failed\n");
2810 		return OCS_HW_RTN_NO_MEMORY;
2811 	}
2812 
2813 	domain->dma = hw->domain_dmem;
2814 
2815 	domain->hw = hw;
2816 	domain->sm.app = domain;
2817 	domain->fcf = fcf;
2818 	domain->fcf_indicator = UINT32_MAX;
2819 	domain->vlan_id = vlan;
2820 	domain->indicator = UINT32_MAX;
2821 
2822 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_VFI, &domain->indicator, &index)) {
2823 		ocs_log_err(hw->os, "FCOE_VFI allocation failure\n");
2824 
2825 		ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
2826 
2827 		return OCS_HW_RTN_ERROR;
2828 	}
2829 
2830 	ocs_sm_transition(&domain->sm, __ocs_hw_domain_init, cmd);
2831 	return OCS_HW_RTN_SUCCESS;
2832 }
2833 
2834 /**
2835  * @ingroup domain
2836  * @brief Attach a SLI port to a domain.
2837  *
2838  * @param hw Hardware context.
2839  * @param domain Pointer to the domain object.
2840  * @param fc_id Fibre Channel ID to associate with this port.
2841  *
2842  * @return Returns 0 on success, or a non-zero value on failure.
2843  */
2844 ocs_hw_rtn_e
2845 ocs_hw_domain_attach(ocs_hw_t *hw, ocs_domain_t *domain, uint32_t fc_id)
2846 {
2847 	uint8_t	*buf = NULL;
2848 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
2849 
2850 	if (!hw || !domain) {
2851 		ocs_log_err(hw ? hw->os : NULL,
2852 			"bad parameter(s) hw=%p domain=%p\n",
2853 			hw, domain);
2854 		return OCS_HW_RTN_ERROR;
2855 	}
2856 
2857 	/*
2858 	 * Check if the chip is in an error state (UE'd) before proceeding.
2859 	 */
2860 	if (sli_fw_error_status(&hw->sli) > 0) {
2861 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2862 		return OCS_HW_RTN_ERROR;
2863 	}
2864 
2865 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
2866 	if (!buf) {
2867 		ocs_log_err(hw->os, "no buffer for command\n");
2868 		return OCS_HW_RTN_NO_MEMORY;
2869 	}
2870 
2871 	domain->sport->fc_id = fc_id;
2872 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_ATTACH, buf);
2873 	return rc;
2874 }
2875 
2876 /**
2877  * @ingroup domain
2878  * @brief Free a fabric domain object.
2879  *
2880  * @par Description
2881  * Free both the driver and SLI port resources associated with the domain.
2882  *
2883  * @param hw Hardware context.
2884  * @param domain Pointer to the domain object.
2885  *
2886  * @return Returns 0 on success, or a non-zero value on failure.
2887  */
2888 ocs_hw_rtn_e
2889 ocs_hw_domain_free(ocs_hw_t *hw, ocs_domain_t *domain)
2890 {
2891 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
2892 
2893 	if (!hw || !domain) {
2894 		ocs_log_err(hw ? hw->os : NULL,
2895 			"bad parameter(s) hw=%p domain=%p\n",
2896 			hw, domain);
2897 		return OCS_HW_RTN_ERROR;
2898 	}
2899 
2900 	/*
2901 	 * Check if the chip is in an error state (UE'd) before proceeding.
2902 	 */
2903 	if (sli_fw_error_status(&hw->sli) > 0) {
2904 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2905 		return OCS_HW_RTN_ERROR;
2906 	}
2907 
2908 	ocs_sm_post_event(&domain->sm, OCS_EVT_HW_DOMAIN_REQ_FREE, NULL);
2909 	return rc;
2910 }
2911 
2912 /**
2913  * @ingroup domain
2914  * @brief Free a fabric domain object.
2915  *
2916  * @par Description
2917  * Free the driver resources associated with the domain. The difference between
2918  * this call and ocs_hw_domain_free() is that this call assumes resources no longer
2919  * exist on the SLI port, due to a reset or after some error conditions.
2920  *
2921  * @param hw Hardware context.
2922  * @param domain Pointer to the domain object.
2923  *
2924  * @return Returns 0 on success, or a non-zero value on failure.
2925  */
2926 ocs_hw_rtn_e
2927 ocs_hw_domain_force_free(ocs_hw_t *hw, ocs_domain_t *domain)
2928 {
2929 	if (!hw || !domain) {
2930 		ocs_log_err(NULL, "bad parameter(s) hw=%p domain=%p\n", hw, domain);
2931 		return OCS_HW_RTN_ERROR;
2932 	}
2933 
2934 	sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
2935 
2936 	return OCS_HW_RTN_SUCCESS;
2937 }
2938 
2939 /**
2940  * @ingroup node
2941  * @brief Allocate a remote node object.
2942  *
2943  * @param hw Hardware context.
2944  * @param rnode Allocated remote node object to initialize.
2945  * @param fc_addr FC address of the remote node.
2946  * @param sport SLI port used to connect to remote node.
2947  *
2948  * @return Returns 0 on success, or a non-zero value on failure.
2949  */
2950 ocs_hw_rtn_e
2951 ocs_hw_node_alloc(ocs_hw_t *hw, ocs_remote_node_t *rnode, uint32_t fc_addr,
2952 		ocs_sli_port_t *sport)
2953 {
2954 	/* Check for invalid indicator */
2955 	if (UINT32_MAX != rnode->indicator) {
2956 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x rpi=%#x\n",
2957 				fc_addr, rnode->indicator);
2958 		return OCS_HW_RTN_ERROR;
2959 	}
2960 
2961 	/*
2962 	 * Check if the chip is in an error state (UE'd) before proceeding.
2963 	 */
2964 	if (sli_fw_error_status(&hw->sli) > 0) {
2965 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
2966 		return OCS_HW_RTN_ERROR;
2967 	}
2968 
2969 	/* NULL SLI port indicates an unallocated remote node */
2970 	rnode->sport = NULL;
2971 
2972 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &rnode->indicator, &rnode->index)) {
2973 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
2974 				fc_addr);
2975 		return OCS_HW_RTN_ERROR;
2976 	}
2977 
2978 	rnode->fc_id = fc_addr;
2979 	rnode->sport = sport;
2980 
2981 	return OCS_HW_RTN_SUCCESS;
2982 }
2983 
2984 /**
2985  * @ingroup node
2986  * @brief Update a remote node object with the remote port's service parameters.
2987  *
2988  * @param hw Hardware context.
2989  * @param rnode Allocated remote node object to initialize.
2990  * @param sparms DMA buffer containing the remote port's service parameters.
2991  *
2992  * @return Returns 0 on success, or a non-zero value on failure.
2993  */
2994 ocs_hw_rtn_e
2995 ocs_hw_node_attach(ocs_hw_t *hw, ocs_remote_node_t *rnode, ocs_dma_t *sparms)
2996 {
2997 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
2998 	uint8_t		*buf = NULL;
2999 	uint32_t	count = 0;
3000 
3001 	if (!hw || !rnode || !sparms) {
3002 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p sparms=%p\n",
3003 			    hw, rnode, sparms);
3004 		return OCS_HW_RTN_ERROR;
3005 	}
3006 
3007 	/*
3008 	 * Check if the chip is in an error state (UE'd) before proceeding.
3009 	 */
3010 	if (sli_fw_error_status(&hw->sli) > 0) {
3011 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3012 		return OCS_HW_RTN_ERROR;
3013 	}
3014 
3015 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3016 	if (!buf) {
3017 		ocs_log_err(hw->os, "no buffer for command\n");
3018 		return OCS_HW_RTN_NO_MEMORY;
3019 	}
3020 
3021 	/*
3022 	 * If the attach count is non-zero, this RPI has already been registered.
3023 	 * Otherwise, register the RPI
3024 	 */
3025 	if (rnode->index == UINT32_MAX) {
3026 		ocs_log_err(NULL, "bad parameter rnode->index invalid\n");
3027 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3028 		return OCS_HW_RTN_ERROR;
3029 	}
3030 	count = ocs_atomic_add_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3031 	if (count) {
3032 		/*
3033 		 * Can't attach multiple FC_ID's to a node unless High Login
3034 		 * Mode is enabled
3035 		 */
3036 		if (sli_get_hlm(&hw->sli) == FALSE) {
3037 			ocs_log_test(hw->os, "attach to already attached node HLM=%d count=%d\n",
3038 					sli_get_hlm(&hw->sli), count);
3039 			rc = OCS_HW_RTN_SUCCESS;
3040 		} else {
3041 			rnode->node_group = TRUE;
3042 			rnode->attached = ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_attached);
3043 			rc = rnode->attached  ? OCS_HW_RTN_SUCCESS_SYNC : OCS_HW_RTN_SUCCESS;
3044 		}
3045 	} else {
3046 		rnode->node_group = FALSE;
3047 
3048 		ocs_display_sparams("", "reg rpi", 0, NULL, sparms->virt);
3049 		if (sli_cmd_reg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->fc_id,
3050 					rnode->indicator, rnode->sport->indicator,
3051 					sparms, 0, (hw->auto_xfer_rdy_enabled && hw->config.auto_xfer_rdy_t10_enable))) {
3052 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT,
3053 					ocs_hw_cb_node_attach, rnode);
3054 		}
3055 	}
3056 
3057 	if (count || rc) {
3058 		if (rc < OCS_HW_RTN_SUCCESS) {
3059 			ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
3060 			ocs_log_err(hw->os, "%s error\n", count ? "HLM" : "REG_RPI");
3061 		}
3062 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3063 	}
3064 
3065 	return rc;
3066 }
3067 
3068 /**
3069  * @ingroup node
3070  * @brief Free a remote node resource.
3071  *
3072  * @param hw Hardware context.
3073  * @param rnode Remote node object to free.
3074  *
3075  * @return Returns 0 on success, or a non-zero value on failure.
3076  */
3077 ocs_hw_rtn_e
3078 ocs_hw_node_free_resources(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3079 {
3080 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3081 
3082 	if (!hw || !rnode) {
3083 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3084 			    hw, rnode);
3085 		return OCS_HW_RTN_ERROR;
3086 	}
3087 
3088 	if (rnode->sport) {
3089 		if (!rnode->attached) {
3090 			if (rnode->indicator != UINT32_MAX) {
3091 				if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3092 					ocs_log_err(hw->os, "FCOE_RPI free failure RPI %d addr=%#x\n",
3093 						    rnode->indicator, rnode->fc_id);
3094 					rc = OCS_HW_RTN_ERROR;
3095 				} else {
3096 					rnode->node_group = FALSE;
3097 					rnode->indicator = UINT32_MAX;
3098 					rnode->index = UINT32_MAX;
3099 					rnode->free_group = FALSE;
3100 				}
3101 			}
3102 		} else {
3103 			ocs_log_err(hw->os, "Error: rnode is still attached\n");
3104 			rc = OCS_HW_RTN_ERROR;
3105 		}
3106 	}
3107 
3108 	return rc;
3109 }
3110 
3111 
3112 /**
3113  * @ingroup node
3114  * @brief Free a remote node object.
3115  *
3116  * @param hw Hardware context.
3117  * @param rnode Remote node object to free.
3118  *
3119  * @return Returns 0 on success, or a non-zero value on failure.
3120  */
3121 ocs_hw_rtn_e
3122 ocs_hw_node_detach(ocs_hw_t *hw, ocs_remote_node_t *rnode)
3123 {
3124 	uint8_t	*buf = NULL;
3125 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS_SYNC;
3126 	uint32_t	index = UINT32_MAX;
3127 
3128 	if (!hw || !rnode) {
3129 		ocs_log_err(NULL, "bad parameter(s) hw=%p rnode=%p\n",
3130 			    hw, rnode);
3131 		return OCS_HW_RTN_ERROR;
3132 	}
3133 
3134 	/*
3135 	 * Check if the chip is in an error state (UE'd) before proceeding.
3136 	 */
3137 	if (sli_fw_error_status(&hw->sli) > 0) {
3138 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3139 		return OCS_HW_RTN_ERROR;
3140 	}
3141 
3142 	index = rnode->index;
3143 
3144 	if (rnode->sport) {
3145 		uint32_t	count = 0;
3146 		uint32_t	fc_id;
3147 
3148 		if (!rnode->attached) {
3149 			return OCS_HW_RTN_SUCCESS_SYNC;
3150 		}
3151 
3152 		buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3153 		if (!buf) {
3154 			ocs_log_err(hw->os, "no buffer for command\n");
3155 			return OCS_HW_RTN_NO_MEMORY;
3156 		}
3157 
3158 		count = ocs_atomic_sub_return(&hw->rpi_ref[index].rpi_count, 1);
3159 
3160 		if (count <= 1) {
3161 			/* There are no other references to this RPI
3162 			 * so unregister it and free the resource. */
3163 			fc_id = UINT32_MAX;
3164 			rnode->node_group = FALSE;
3165 			rnode->free_group = TRUE;
3166 		} else {
3167 			if (sli_get_hlm(&hw->sli) == FALSE) {
3168 				ocs_log_test(hw->os, "Invalid count with HLM disabled, count=%d\n",
3169 						count);
3170 			}
3171 			fc_id = rnode->fc_id & 0x00ffffff;
3172 		}
3173 
3174 		rc = OCS_HW_RTN_ERROR;
3175 
3176 		if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, rnode->indicator,
3177 					SLI_RSRC_FCOE_RPI, fc_id)) {
3178 			rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free, rnode);
3179 		}
3180 
3181 		if (rc != OCS_HW_RTN_SUCCESS) {
3182 			ocs_log_err(hw->os, "UNREG_RPI failed\n");
3183 			ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3184 			rc = OCS_HW_RTN_ERROR;
3185 		}
3186 	}
3187 
3188 	return rc;
3189 }
3190 
3191 /**
3192  * @ingroup node
3193  * @brief Free all remote node objects.
3194  *
3195  * @param hw Hardware context.
3196  *
3197  * @return Returns 0 on success, or a non-zero value on failure.
3198  */
3199 ocs_hw_rtn_e
3200 ocs_hw_node_free_all(ocs_hw_t *hw)
3201 {
3202 	uint8_t	*buf = NULL;
3203 	ocs_hw_rtn_e	rc = OCS_HW_RTN_ERROR;
3204 
3205 	if (!hw) {
3206 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
3207 		return OCS_HW_RTN_ERROR;
3208 	}
3209 
3210 	/*
3211 	 * Check if the chip is in an error state (UE'd) before proceeding.
3212 	 */
3213 	if (sli_fw_error_status(&hw->sli) > 0) {
3214 		ocs_log_crit(hw->os, "Chip is in an error state - reset needed\n");
3215 		return OCS_HW_RTN_ERROR;
3216 	}
3217 
3218 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
3219 	if (!buf) {
3220 		ocs_log_err(hw->os, "no buffer for command\n");
3221 		return OCS_HW_RTN_NO_MEMORY;
3222 	}
3223 
3224 	if (sli_cmd_unreg_rpi(&hw->sli, buf, SLI4_BMBX_SIZE, 0xffff,
3225 				SLI_RSRC_FCOE_FCFI, UINT32_MAX)) {
3226 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_node_free_all,
3227 				NULL);
3228 	}
3229 
3230 	if (rc != OCS_HW_RTN_SUCCESS) {
3231 		ocs_log_err(hw->os, "UNREG_RPI failed\n");
3232 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
3233 		rc = OCS_HW_RTN_ERROR;
3234 	}
3235 
3236 	return rc;
3237 }
3238 
3239 ocs_hw_rtn_e
3240 ocs_hw_node_group_alloc(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3241 {
3242 
3243 	if (!hw || !ngroup) {
3244 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3245 				hw, ngroup);
3246 		return OCS_HW_RTN_ERROR;
3247 	}
3248 
3249 	if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &ngroup->indicator,
3250 				&ngroup->index)) {
3251 		ocs_log_err(hw->os, "FCOE_RPI allocation failure addr=%#x\n",
3252 				ngroup->indicator);
3253 		return OCS_HW_RTN_ERROR;
3254 	}
3255 
3256 	return OCS_HW_RTN_SUCCESS;
3257 }
3258 
3259 ocs_hw_rtn_e
3260 ocs_hw_node_group_attach(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup, ocs_remote_node_t *rnode)
3261 {
3262 
3263 	if (!hw || !ngroup || !rnode) {
3264 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p rnode=%p\n",
3265 			    hw, ngroup, rnode);
3266 		return OCS_HW_RTN_ERROR;
3267 	}
3268 
3269 	if (rnode->attached) {
3270 		ocs_log_err(hw->os, "node already attached RPI=%#x addr=%#x\n",
3271 			    rnode->indicator, rnode->fc_id);
3272 		return OCS_HW_RTN_ERROR;
3273 	}
3274 
3275 	if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, rnode->indicator)) {
3276 		ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3277 				rnode->indicator);
3278 		return OCS_HW_RTN_ERROR;
3279 	}
3280 
3281 	rnode->indicator = ngroup->indicator;
3282 	rnode->index = ngroup->index;
3283 
3284 	return OCS_HW_RTN_SUCCESS;
3285 }
3286 
3287 ocs_hw_rtn_e
3288 ocs_hw_node_group_free(ocs_hw_t *hw, ocs_remote_node_group_t *ngroup)
3289 {
3290 	int	ref;
3291 
3292 	if (!hw || !ngroup) {
3293 		ocs_log_err(NULL, "bad parameter hw=%p ngroup=%p\n",
3294 				hw, ngroup);
3295 		return OCS_HW_RTN_ERROR;
3296 	}
3297 
3298 	ref = ocs_atomic_read(&hw->rpi_ref[ngroup->index].rpi_count);
3299 	if (ref) {
3300 		/* Hmmm, the reference count is non-zero */
3301 		ocs_log_debug(hw->os, "node group reference=%d (RPI=%#x)\n",
3302 				ref, ngroup->indicator);
3303 
3304 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_RPI, ngroup->indicator)) {
3305 			ocs_log_err(hw->os, "FCOE_RPI free failure RPI=%#x\n",
3306 				    ngroup->indicator);
3307 			return OCS_HW_RTN_ERROR;
3308 		}
3309 
3310 		ocs_atomic_set(&hw->rpi_ref[ngroup->index].rpi_count, 0);
3311 	}
3312 
3313 	ngroup->indicator = UINT32_MAX;
3314 	ngroup->index = UINT32_MAX;
3315 
3316 	return OCS_HW_RTN_SUCCESS;
3317 }
3318 
3319 /**
3320  * @brief Initialize IO fields on each free call.
3321  *
3322  * @n @b Note: This is done on each free call (as opposed to each
3323  * alloc call) because port-owned XRIs are not
3324  * allocated with ocs_hw_io_alloc() but are freed with this
3325  * function.
3326  *
3327  * @param io Pointer to HW IO.
3328  */
3329 static inline void
3330 ocs_hw_init_free_io(ocs_hw_io_t *io)
3331 {
3332 	/*
3333 	 * Set io->done to NULL, to avoid any callbacks, should
3334 	 * a completion be received for one of these IOs
3335 	 */
3336 	io->done = NULL;
3337 	io->abort_done = NULL;
3338 	io->status_saved = 0;
3339 	io->abort_in_progress = FALSE;
3340 	io->port_owned_abort_count = 0;
3341 	io->rnode = NULL;
3342 	io->type = 0xFFFF;
3343 	io->wq = NULL;
3344 	io->ul_io = NULL;
3345 	io->tgt_wqe_timeout = 0;
3346 }
3347 
3348 /**
3349  * @ingroup io
3350  * @brief Lockless allocate a HW IO object.
3351  *
3352  * @par Description
3353  * Assume that hw->ocs_lock is held. This function is only used if
3354  * use_dif_sec_xri workaround is being used.
3355  *
3356  * @param hw Hardware context.
3357  *
3358  * @return Returns a pointer to an object on success, or NULL on failure.
3359  */
3360 static inline ocs_hw_io_t *
3361 _ocs_hw_io_alloc(ocs_hw_t *hw)
3362 {
3363 	ocs_hw_io_t	*io = NULL;
3364 
3365 	if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
3366 		ocs_list_add_tail(&hw->io_inuse, io);
3367 		io->state = OCS_HW_IO_STATE_INUSE;
3368 		io->quarantine = FALSE;
3369 		io->quarantine_first_phase = TRUE;
3370 		io->abort_reqtag = UINT32_MAX;
3371 		ocs_ref_init(&io->ref, ocs_hw_io_free_internal, io);
3372 	} else {
3373 		ocs_atomic_add_return(&hw->io_alloc_failed_count, 1);
3374 	}
3375 
3376 	return io;
3377 }
3378 /**
3379  * @ingroup io
3380  * @brief Allocate a HW IO object.
3381  *
3382  * @par Description
3383  * @n @b Note: This function applies to non-port owned XRIs
3384  * only.
3385  *
3386  * @param hw Hardware context.
3387  *
3388  * @return Returns a pointer to an object on success, or NULL on failure.
3389  */
3390 ocs_hw_io_t *
3391 ocs_hw_io_alloc(ocs_hw_t *hw)
3392 {
3393 	ocs_hw_io_t	*io = NULL;
3394 
3395 	ocs_lock(&hw->io_lock);
3396 		io = _ocs_hw_io_alloc(hw);
3397 	ocs_unlock(&hw->io_lock);
3398 
3399 	return io;
3400 }
3401 
3402 /**
3403  * @ingroup io
3404  * @brief Allocate/Activate a port owned HW IO object.
3405  *
3406  * @par Description
3407  * This function is called by the transport layer when an XRI is
3408  * allocated by the SLI-Port. This will "activate" the HW IO
3409  * associated with the XRI received from the SLI-Port to mirror
3410  * the state of the XRI.
3411  * @n @n @b Note: This function applies to port owned XRIs only.
3412  *
3413  * @param hw Hardware context.
3414  * @param io Pointer HW IO to activate/allocate.
3415  *
3416  * @return Returns a pointer to an object on success, or NULL on failure.
3417  */
3418 ocs_hw_io_t *
3419 ocs_hw_io_activate_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
3420 {
3421 	if (ocs_ref_read_count(&io->ref) > 0) {
3422 		ocs_log_err(hw->os, "Bad parameter: refcount > 0\n");
3423 		return NULL;
3424 	}
3425 
3426 	if (io->wq != NULL) {
3427 		ocs_log_err(hw->os, "XRI %x already in use\n", io->indicator);
3428 		return NULL;
3429 	}
3430 
3431 	ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3432 	io->xbusy = TRUE;
3433 
3434 	return io;
3435 }
3436 
3437 /**
3438  * @ingroup io
3439  * @brief When an IO is freed, depending on the exchange busy flag, and other
3440  * workarounds, move it to the correct list.
3441  *
3442  * @par Description
3443  * @n @b Note: Assumes that the hw->io_lock is held and the item has been removed
3444  * from the busy or wait_free list.
3445  *
3446  * @param hw Hardware context.
3447  * @param io Pointer to the IO object to move.
3448  */
3449 static void
3450 ocs_hw_io_free_move_correct_list(ocs_hw_t *hw, ocs_hw_io_t *io)
3451 {
3452 	if (io->xbusy) {
3453 		/* add to wait_free list and wait for XRI_ABORTED CQEs to clean up */
3454 		ocs_list_add_tail(&hw->io_wait_free, io);
3455 		io->state = OCS_HW_IO_STATE_WAIT_FREE;
3456 	} else {
3457 		/* IO not busy, add to free list */
3458 		ocs_list_add_tail(&hw->io_free, io);
3459 		io->state = OCS_HW_IO_STATE_FREE;
3460 	}
3461 
3462 	/* BZ 161832 workaround */
3463 	if (hw->workaround.use_dif_sec_xri) {
3464 		ocs_hw_check_sec_hio_list(hw);
3465 	}
3466 }
3467 
3468 /**
3469  * @ingroup io
3470  * @brief Free a HW IO object. Perform cleanup common to
3471  * port and host-owned IOs.
3472  *
3473  * @param hw Hardware context.
3474  * @param io Pointer to the HW IO object.
3475  */
3476 static inline void
3477 ocs_hw_io_free_common(ocs_hw_t *hw, ocs_hw_io_t *io)
3478 {
3479 	/* initialize IO fields */
3480 	ocs_hw_init_free_io(io);
3481 
3482 	/* Restore default SGL */
3483 	ocs_hw_io_restore_sgl(hw, io);
3484 }
3485 
3486 /**
3487  * @ingroup io
3488  * @brief Free a HW IO object associated with a port-owned XRI.
3489  *
3490  * @param arg Pointer to the HW IO object.
3491  */
3492 static void
3493 ocs_hw_io_free_port_owned(void *arg)
3494 {
3495 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3496 	ocs_hw_t *hw = io->hw;
3497 
3498 	/*
3499 	 * For auto xfer rdy, if the dnrx bit is set, then add it to the list of XRIs
3500 	 * waiting for buffers.
3501 	 */
3502 	if (io->auto_xfer_rdy_dnrx) {
3503 		ocs_lock(&hw->io_lock);
3504 			/* take a reference count because we still own the IO until the buffer is posted */
3505 			ocs_ref_init(&io->ref, ocs_hw_io_free_port_owned, io);
3506 			ocs_list_add_tail(&hw->io_port_dnrx, io);
3507 		ocs_unlock(&hw->io_lock);
3508 	}
3509 
3510 	/* perform common cleanup */
3511 	ocs_hw_io_free_common(hw, io);
3512 }
3513 
3514 /**
3515  * @ingroup io
3516  * @brief Free a previously-allocated HW IO object. Called when
3517  * IO refcount goes to zero (host-owned IOs only).
3518  *
3519  * @param arg Pointer to the HW IO object.
3520  */
3521 static void
3522 ocs_hw_io_free_internal(void *arg)
3523 {
3524 	ocs_hw_io_t *io = (ocs_hw_io_t *)arg;
3525 	ocs_hw_t *hw = io->hw;
3526 
3527 	/* perform common cleanup */
3528 	ocs_hw_io_free_common(hw, io);
3529 
3530 	ocs_lock(&hw->io_lock);
3531 		/* remove from in-use list */
3532 		ocs_list_remove(&hw->io_inuse, io);
3533 		ocs_hw_io_free_move_correct_list(hw, io);
3534 	ocs_unlock(&hw->io_lock);
3535 }
3536 
3537 /**
3538  * @ingroup io
3539  * @brief Free a previously-allocated HW IO object.
3540  *
3541  * @par Description
3542  * @n @b Note: This function applies to port and host owned XRIs.
3543  *
3544  * @param hw Hardware context.
3545  * @param io Pointer to the HW IO object.
3546  *
3547  * @return Returns a non-zero value if HW IO was freed, 0 if references
3548  * on the IO still exist, or a negative value if an error occurred.
3549  */
3550 int32_t
3551 ocs_hw_io_free(ocs_hw_t *hw, ocs_hw_io_t *io)
3552 {
3553 	/* just put refcount */
3554 	if (ocs_ref_read_count(&io->ref) <= 0) {
3555 		ocs_log_err(hw->os, "Bad parameter: refcount <= 0 xri=%x tag=%x\n",
3556 			    io->indicator, io->reqtag);
3557 		return -1;
3558 	}
3559 
3560 	return ocs_ref_put(&io->ref); /* ocs_ref_get(): ocs_hw_io_alloc() */
3561 }
3562 
3563 /**
3564  * @ingroup io
3565  * @brief Check if given HW IO is in-use
3566  *
3567  * @par Description
3568  * This function returns TRUE if the given HW IO has been
3569  * allocated and is in-use, and FALSE otherwise. It applies to
3570  * port and host owned XRIs.
3571  *
3572  * @param hw Hardware context.
3573  * @param io Pointer to the HW IO object.
3574  *
3575  * @return TRUE if an IO is in use, or FALSE otherwise.
3576  */
3577 uint8_t
3578 ocs_hw_io_inuse(ocs_hw_t *hw, ocs_hw_io_t *io)
3579 {
3580 	return (ocs_ref_read_count(&io->ref) > 0);
3581 }
3582 
3583 /**
3584  * @brief Write a HW IO to a work queue.
3585  *
3586  * @par Description
3587  * A HW IO is written to a work queue.
3588  *
3589  * @param wq Pointer to work queue.
3590  * @param wqe Pointer to WQ entry.
3591  *
3592  * @n @b Note: Assumes the SLI-4 queue lock is held.
3593  *
3594  * @return Returns 0 on success, or a negative error code value on failure.
3595  */
3596 static int32_t
3597 _hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3598 {
3599 	int32_t rc;
3600 	int32_t queue_rc;
3601 
3602 	/* Every so often, set the wqec bit to generate comsummed completions */
3603 	if (wq->wqec_count) {
3604 		wq->wqec_count--;
3605 	}
3606 	if (wq->wqec_count == 0) {
3607 		sli4_generic_wqe_t *genwqe = (void*)wqe->wqebuf;
3608 		genwqe->wqec = 1;
3609 		wq->wqec_count = wq->wqec_set_count;
3610 	}
3611 
3612 	/* Decrement WQ free count */
3613 	wq->free_count--;
3614 
3615 	queue_rc = _sli_queue_write(&wq->hw->sli, wq->queue, wqe->wqebuf);
3616 
3617 	if (queue_rc < 0) {
3618 		rc = -1;
3619 	} else {
3620 		rc = 0;
3621 		ocs_queue_history_wq(&wq->hw->q_hist, (void *) wqe->wqebuf, wq->queue->id, queue_rc);
3622 	}
3623 
3624 	return rc;
3625 }
3626 
3627 /**
3628  * @brief Write a HW IO to a work queue.
3629  *
3630  * @par Description
3631  * A HW IO is written to a work queue.
3632  *
3633  * @param wq Pointer to work queue.
3634  * @param wqe Pointer to WQE entry.
3635  *
3636  * @n @b Note: Takes the SLI-4 queue lock.
3637  *
3638  * @return Returns 0 on success, or a negative error code value on failure.
3639  */
3640 int32_t
3641 hw_wq_write(hw_wq_t *wq, ocs_hw_wqe_t *wqe)
3642 {
3643 	int32_t rc = 0;
3644 
3645 	sli_queue_lock(wq->queue);
3646 		if ( ! ocs_list_empty(&wq->pending_list)) {
3647 			ocs_list_add_tail(&wq->pending_list, wqe);
3648 			OCS_STAT(wq->wq_pending_count++;)
3649 			while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3650 				rc = _hw_wq_write(wq, wqe);
3651 				if (rc < 0) {
3652 					break;
3653 				}
3654 				if (wqe->abort_wqe_submit_needed) {
3655 					wqe->abort_wqe_submit_needed = 0;
3656 					sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3657 							wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT );
3658 					ocs_list_add_tail(&wq->pending_list, wqe);
3659 					OCS_STAT(wq->wq_pending_count++;)
3660 				}
3661 			}
3662 		} else {
3663 			if (wq->free_count > 0) {
3664 				rc = _hw_wq_write(wq, wqe);
3665 			} else {
3666 				ocs_list_add_tail(&wq->pending_list, wqe);
3667 				OCS_STAT(wq->wq_pending_count++;)
3668 			}
3669 		}
3670 
3671 	sli_queue_unlock(wq->queue);
3672 
3673 	return rc;
3674 
3675 }
3676 
3677 /**
3678  * @brief Update free count and submit any pending HW IOs
3679  *
3680  * @par Description
3681  * The WQ free count is updated, and any pending HW IOs are submitted that
3682  * will fit in the queue.
3683  *
3684  * @param wq Pointer to work queue.
3685  * @param update_free_count Value added to WQs free count.
3686  *
3687  * @return None.
3688  */
3689 static void
3690 hw_wq_submit_pending(hw_wq_t *wq, uint32_t update_free_count)
3691 {
3692 	ocs_hw_wqe_t *wqe;
3693 
3694 	sli_queue_lock(wq->queue);
3695 
3696 		/* Update free count with value passed in */
3697 		wq->free_count += update_free_count;
3698 
3699 		while ((wq->free_count > 0) && ((wqe = ocs_list_remove_head(&wq->pending_list)) != NULL)) {
3700 			_hw_wq_write(wq, wqe);
3701 
3702 			if (wqe->abort_wqe_submit_needed) {
3703 				wqe->abort_wqe_submit_needed = 0;
3704 				sli_abort_wqe(&wq->hw->sli, wqe->wqebuf, wq->hw->sli.config.wqe_size, SLI_ABORT_XRI,
3705 						wqe->send_abts, wqe->id, 0, wqe->abort_reqtag, SLI4_CQ_DEFAULT);
3706 				ocs_list_add_tail(&wq->pending_list, wqe);
3707 				OCS_STAT(wq->wq_pending_count++;)
3708 			}
3709 		}
3710 
3711 	sli_queue_unlock(wq->queue);
3712 }
3713 
3714 /**
3715  * @brief Check to see if there are any BZ 161832 workaround waiting IOs
3716  *
3717  * @par Description
3718  * Checks hw->sec_hio_wait_list, if an IO is waiting for a HW IO, then try
3719  * to allocate a secondary HW io, and dispatch it.
3720  *
3721  * @n @b Note: hw->io_lock MUST be taken when called.
3722  *
3723  * @param hw pointer to HW object
3724  *
3725  * @return none
3726  */
3727 static void
3728 ocs_hw_check_sec_hio_list(ocs_hw_t *hw)
3729 {
3730 	ocs_hw_io_t *io;
3731 	ocs_hw_io_t *sec_io;
3732 	int rc = 0;
3733 
3734 	while (!ocs_list_empty(&hw->sec_hio_wait_list)) {
3735 		uint16_t flags;
3736 
3737 		sec_io = _ocs_hw_io_alloc(hw);
3738 		if (sec_io == NULL) {
3739 			break;
3740 		}
3741 
3742 		io = ocs_list_remove_head(&hw->sec_hio_wait_list);
3743 		ocs_list_add_tail(&hw->io_inuse, io);
3744 		io->state = OCS_HW_IO_STATE_INUSE;
3745 		io->sec_hio = sec_io;
3746 
3747 		/* mark secondary XRI for second and subsequent data phase as quarantine */
3748 		if (io->xbusy) {
3749 			sec_io->quarantine = TRUE;
3750 		}
3751 
3752 		flags = io->sec_iparam.fcp_tgt.flags;
3753 		if (io->xbusy) {
3754 			flags |= SLI4_IO_CONTINUATION;
3755 		} else {
3756 			flags &= ~SLI4_IO_CONTINUATION;
3757 		}
3758 
3759 		io->tgt_wqe_timeout = io->sec_iparam.fcp_tgt.timeout;
3760 
3761 		/* Complete (continue) TRECV IO */
3762 		if (io->xbusy) {
3763 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3764 				io->first_data_sge,
3765 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator, io->sec_hio->indicator,
3766 				io->reqtag, SLI4_CQ_DEFAULT,
3767 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3768 				flags,
3769 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size, io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3770 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3771 					break;
3772 			}
3773 		} else {
3774 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
3775 				io->first_data_sge,
3776 				io->sec_iparam.fcp_tgt.offset, io->sec_len, io->indicator,
3777 				io->reqtag, SLI4_CQ_DEFAULT,
3778 				io->sec_iparam.fcp_tgt.ox_id, io->rnode->indicator, io->rnode,
3779 				flags,
3780 				io->sec_iparam.fcp_tgt.dif_oper, io->sec_iparam.fcp_tgt.blk_size,
3781 				io->sec_iparam.fcp_tgt.cs_ctl, io->sec_iparam.fcp_tgt.app_id)) {
3782 					ocs_log_test(hw->os, "TRECEIVE WQE error\n");
3783 					break;
3784 			}
3785 		}
3786 
3787 		if (io->wq == NULL) {
3788 			io->wq = ocs_hw_queue_next_wq(hw, io);
3789 			ocs_hw_assert(io->wq != NULL);
3790 		}
3791 		io->xbusy = TRUE;
3792 
3793 		/*
3794 		 * Add IO to active io wqe list before submitting, in case the
3795 		 * wcqe processing preempts this thread.
3796 		 */
3797 		ocs_hw_add_io_timed_wqe(hw, io);
3798 		rc = hw_wq_write(io->wq, &io->wqe);
3799 		if (rc >= 0) {
3800 			/* non-negative return is success */
3801 			rc = 0;
3802 		} else {
3803 			/* failed to write wqe, remove from active wqe list */
3804 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
3805 			io->xbusy = FALSE;
3806 			ocs_hw_remove_io_timed_wqe(hw, io);
3807 		}
3808 	}
3809 }
3810 
3811 /**
3812  * @ingroup io
3813  * @brief Send a Single Request/Response Sequence (SRRS).
3814  *
3815  * @par Description
3816  * This routine supports communication sequences consisting of a single
3817  * request and single response between two endpoints. Examples include:
3818  *  - Sending an ELS request.
3819  *  - Sending an ELS response - To send an ELS reponse, the caller must provide
3820  * the OX_ID from the received request.
3821  *  - Sending a FC Common Transport (FC-CT) request - To send a FC-CT request,
3822  * the caller must provide the R_CTL, TYPE, and DF_CTL
3823  * values to place in the FC frame header.
3824  *  .
3825  * @n @b Note: The caller is expected to provide both send and receive
3826  * buffers for requests. In the case of sending a response, no receive buffer
3827  * is necessary and the caller may pass in a NULL pointer.
3828  *
3829  * @param hw Hardware context.
3830  * @param type Type of sequence (ELS request/response, FC-CT).
3831  * @param io Previously-allocated HW IO object.
3832  * @param send DMA memory holding data to send (for example, ELS request, BLS response).
3833  * @param len Length, in bytes, of data to send.
3834  * @param receive Optional DMA memory to hold a response.
3835  * @param rnode Destination of data (that is, a remote node).
3836  * @param iparam IO parameters (ELS response and FC-CT).
3837  * @param cb Function call upon completion of sending the data (may be NULL).
3838  * @param arg Argument to pass to IO completion function.
3839  *
3840  * @return Returns 0 on success, or a non-zero on failure.
3841  */
3842 ocs_hw_rtn_e
3843 ocs_hw_srrs_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
3844 		  ocs_dma_t *send, uint32_t len, ocs_dma_t *receive,
3845 		  ocs_remote_node_t *rnode, ocs_hw_io_param_t *iparam,
3846 		  ocs_hw_srrs_cb_t cb, void *arg)
3847 {
3848 	sli4_sge_t	*sge = NULL;
3849 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
3850 	uint16_t	local_flags = 0;
3851 
3852 	if (!hw || !io || !rnode || !iparam) {
3853 		ocs_log_err(NULL, "bad parm hw=%p io=%p send=%p receive=%p rnode=%p iparam=%p\n",
3854 			    hw, io, send, receive, rnode, iparam);
3855 		return OCS_HW_RTN_ERROR;
3856 	}
3857 
3858 	if (hw->state != OCS_HW_STATE_ACTIVE) {
3859 		ocs_log_test(hw->os, "cannot send SRRS, HW state=%d\n", hw->state);
3860 		return OCS_HW_RTN_ERROR;
3861 	}
3862 
3863 	if (ocs_hw_is_xri_port_owned(hw, io->indicator)) {
3864 		/* We must set the XC bit for port owned XRIs */
3865 		local_flags |= SLI4_IO_CONTINUATION;
3866 	}
3867 	io->rnode = rnode;
3868 	io->type  = type;
3869 	io->done = cb;
3870 	io->arg  = arg;
3871 
3872 	sge = io->sgl->virt;
3873 
3874 	/* clear both SGE */
3875 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
3876 
3877 	if (send) {
3878 		sge[0].buffer_address_high = ocs_addr32_hi(send->phys);
3879 		sge[0].buffer_address_low  = ocs_addr32_lo(send->phys);
3880 		sge[0].sge_type = SLI4_SGE_TYPE_DATA;
3881 		sge[0].buffer_length = len;
3882 	}
3883 
3884 	if ((OCS_HW_ELS_REQ == type) || (OCS_HW_FC_CT == type)) {
3885 		sge[1].buffer_address_high = ocs_addr32_hi(receive->phys);
3886 		sge[1].buffer_address_low  = ocs_addr32_lo(receive->phys);
3887 		sge[1].sge_type = SLI4_SGE_TYPE_DATA;
3888 		sge[1].buffer_length = receive->size;
3889 		sge[1].last = TRUE;
3890 	} else {
3891 		sge[0].last = TRUE;
3892 	}
3893 
3894 	switch (type) {
3895 	case OCS_HW_ELS_REQ:
3896 		if ( (!send) || sli_els_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl,
3897 							*((uint8_t *)(send->virt)), /* req_type */
3898 							len, receive->size,
3899 							iparam->els.timeout, io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rnode)) {
3900 			ocs_log_err(hw->os, "REQ WQE error\n");
3901 			rc = OCS_HW_RTN_ERROR;
3902 		}
3903 		break;
3904 	case OCS_HW_ELS_RSP:
3905 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3906 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3907 					   iparam->els.ox_id,
3908 							rnode, local_flags, UINT32_MAX)) {
3909 			ocs_log_err(hw->os, "RSP WQE error\n");
3910 			rc = OCS_HW_RTN_ERROR;
3911 		}
3912 		break;
3913 	case OCS_HW_ELS_RSP_SID:
3914 		if ( (!send) || sli_xmit_els_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3915 					   io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
3916 					   iparam->els_sid.ox_id,
3917 							rnode, local_flags, iparam->els_sid.s_id)) {
3918 			ocs_log_err(hw->os, "RSP (SID) WQE error\n");
3919 			rc = OCS_HW_RTN_ERROR;
3920 		}
3921 		break;
3922 	case OCS_HW_FC_CT:
3923 		if ( (!send) || sli_gen_request64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3924 					  receive->size, iparam->fc_ct.timeout, io->indicator,
3925 					  io->reqtag, SLI4_CQ_DEFAULT, rnode, iparam->fc_ct.r_ctl,
3926 					  iparam->fc_ct.type, iparam->fc_ct.df_ctl)) {
3927 			ocs_log_err(hw->os, "GEN WQE error\n");
3928 			rc = OCS_HW_RTN_ERROR;
3929 		}
3930 		break;
3931 	case OCS_HW_FC_CT_RSP:
3932 		if ( (!send) || sli_xmit_sequence64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->sgl, len,
3933 					  iparam->fc_ct_rsp.timeout, iparam->fc_ct_rsp.ox_id, io->indicator,
3934 					  io->reqtag, rnode, iparam->fc_ct_rsp.r_ctl,
3935 					  iparam->fc_ct_rsp.type, iparam->fc_ct_rsp.df_ctl)) {
3936 			ocs_log_err(hw->os, "XMIT SEQ WQE error\n");
3937 			rc = OCS_HW_RTN_ERROR;
3938 		}
3939 		break;
3940 	case OCS_HW_BLS_ACC:
3941 	case OCS_HW_BLS_RJT:
3942 	{
3943 		sli_bls_payload_t	bls;
3944 
3945 		if (OCS_HW_BLS_ACC == type) {
3946 			bls.type = SLI_BLS_ACC;
3947 			ocs_memcpy(&bls.u.acc, iparam->bls.payload, sizeof(bls.u.acc));
3948 		} else {
3949 			bls.type = SLI_BLS_RJT;
3950 			ocs_memcpy(&bls.u.rjt, iparam->bls.payload, sizeof(bls.u.rjt));
3951 		}
3952 
3953 		bls.ox_id = iparam->bls.ox_id;
3954 		bls.rx_id = iparam->bls.rx_id;
3955 
3956 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3957 					   io->indicator, io->reqtag,
3958 					   SLI4_CQ_DEFAULT,
3959 					   rnode, UINT32_MAX)) {
3960 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE error\n");
3961 			rc = OCS_HW_RTN_ERROR;
3962 		}
3963 		break;
3964 	}
3965 	case OCS_HW_BLS_ACC_SID:
3966 	{
3967 		sli_bls_payload_t	bls;
3968 
3969 		bls.type = SLI_BLS_ACC;
3970 		ocs_memcpy(&bls.u.acc, iparam->bls_sid.payload, sizeof(bls.u.acc));
3971 
3972 		bls.ox_id = iparam->bls_sid.ox_id;
3973 		bls.rx_id = iparam->bls_sid.rx_id;
3974 
3975 		if (sli_xmit_bls_rsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &bls,
3976 					   io->indicator, io->reqtag,
3977 					   SLI4_CQ_DEFAULT,
3978 					   rnode, iparam->bls_sid.s_id)) {
3979 			ocs_log_err(hw->os, "XMIT_BLS_RSP64 WQE SID error\n");
3980 			rc = OCS_HW_RTN_ERROR;
3981 		}
3982 		break;
3983 	}
3984 	case OCS_HW_BCAST:
3985 		if ( (!send) || sli_xmit_bcast64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, send, len,
3986 					iparam->bcast.timeout, io->indicator, io->reqtag,
3987 					SLI4_CQ_DEFAULT, rnode,
3988 					iparam->bcast.r_ctl, iparam->bcast.type, iparam->bcast.df_ctl)) {
3989 			ocs_log_err(hw->os, "XMIT_BCAST64 WQE error\n");
3990 			rc = OCS_HW_RTN_ERROR;
3991 		}
3992 		break;
3993 	default:
3994 		ocs_log_err(hw->os, "bad SRRS type %#x\n", type);
3995 		rc = OCS_HW_RTN_ERROR;
3996 	}
3997 
3998 	if (OCS_HW_RTN_SUCCESS == rc) {
3999 		if (io->wq == NULL) {
4000 			io->wq = ocs_hw_queue_next_wq(hw, io);
4001 			ocs_hw_assert(io->wq != NULL);
4002 		}
4003 		io->xbusy = TRUE;
4004 
4005 		/*
4006 		 * Add IO to active io wqe list before submitting, in case the
4007 		 * wcqe processing preempts this thread.
4008 		 */
4009 		OCS_STAT(io->wq->use_count++);
4010 		ocs_hw_add_io_timed_wqe(hw, io);
4011 		rc = hw_wq_write(io->wq, &io->wqe);
4012 		if (rc >= 0) {
4013 			/* non-negative return is success */
4014 			rc = 0;
4015 		} else {
4016 			/* failed to write wqe, remove from active wqe list */
4017 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4018 			io->xbusy = FALSE;
4019 			ocs_hw_remove_io_timed_wqe(hw, io);
4020 		}
4021 	}
4022 
4023 	return rc;
4024 }
4025 
4026 /**
4027  * @ingroup io
4028  * @brief Send a read, write, or response IO.
4029  *
4030  * @par Description
4031  * This routine supports sending a higher-level IO (for example, FCP) between two endpoints
4032  * as a target or initiator. Examples include:
4033  *  - Sending read data and good response (target).
4034  *  - Sending a response (target with no data or after receiving write data).
4035  *  .
4036  * This routine assumes all IOs use the SGL associated with the HW IO. Prior to
4037  * calling this routine, the data should be loaded using ocs_hw_io_add_sge().
4038  *
4039  * @param hw Hardware context.
4040  * @param type Type of IO (target read, target response, and so on).
4041  * @param io Previously-allocated HW IO object.
4042  * @param len Length, in bytes, of data to send.
4043  * @param iparam IO parameters.
4044  * @param rnode Destination of data (that is, a remote node).
4045  * @param cb Function call upon completion of sending data (may be NULL).
4046  * @param arg Argument to pass to IO completion function.
4047  *
4048  * @return Returns 0 on success, or a non-zero value on failure.
4049  *
4050  * @todo
4051  *  - Support specifiying relative offset.
4052  *  - Use a WQ other than 0.
4053  */
4054 ocs_hw_rtn_e
4055 ocs_hw_io_send(ocs_hw_t *hw, ocs_hw_io_type_e type, ocs_hw_io_t *io,
4056 		uint32_t len, ocs_hw_io_param_t *iparam, ocs_remote_node_t *rnode,
4057 		void *cb, void *arg)
4058 {
4059 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4060 	uint32_t	rpi;
4061 	uint8_t		send_wqe = TRUE;
4062 
4063 	CPUTRACE("");
4064 
4065 	if (!hw || !io || !rnode || !iparam) {
4066 		ocs_log_err(NULL, "bad parm hw=%p io=%p iparam=%p rnode=%p\n",
4067 			    hw, io, iparam, rnode);
4068 		return OCS_HW_RTN_ERROR;
4069 	}
4070 
4071 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4072 		ocs_log_err(hw->os, "cannot send IO, HW state=%d\n", hw->state);
4073 		return OCS_HW_RTN_ERROR;
4074 	}
4075 
4076 	rpi = rnode->indicator;
4077 
4078 	if (hw->workaround.use_unregistered_rpi && (rpi == UINT32_MAX)) {
4079 		rpi = hw->workaround.unregistered_rid;
4080 		ocs_log_test(hw->os, "using unregistered RPI: %d\n", rpi);
4081 	}
4082 
4083 	/*
4084 	 * Save state needed during later stages
4085 	 */
4086 	io->rnode = rnode;
4087 	io->type  = type;
4088 	io->done  = cb;
4089 	io->arg   = arg;
4090 
4091 	/*
4092 	 * Format the work queue entry used to send the IO
4093 	 */
4094 	switch (type) {
4095 	case OCS_HW_IO_INITIATOR_READ:
4096 		/*
4097 		 * If use_dif_quarantine workaround is in effect, and dif_separates then mark the
4098 		 * initiator read IO for quarantine
4099 		 */
4100 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4101 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4102 			io->quarantine = TRUE;
4103 		}
4104 
4105 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4106 				iparam->fcp_ini.rsp);
4107 
4108 		if (sli_fcp_iread64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge, len,
4109 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT, rpi, rnode,
4110 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4111 					iparam->fcp_ini.timeout)) {
4112 			ocs_log_err(hw->os, "IREAD WQE error\n");
4113 			rc = OCS_HW_RTN_ERROR;
4114 		}
4115 		break;
4116 	case OCS_HW_IO_INITIATOR_WRITE:
4117 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4118 				iparam->fcp_ini.rsp);
4119 
4120 		if (sli_fcp_iwrite64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4121 					 len, iparam->fcp_ini.first_burst,
4122 					 io->indicator, io->reqtag,
4123 					SLI4_CQ_DEFAULT, rpi, rnode,
4124 					iparam->fcp_ini.dif_oper, iparam->fcp_ini.blk_size,
4125 					iparam->fcp_ini.timeout)) {
4126 			ocs_log_err(hw->os, "IWRITE WQE error\n");
4127 			rc = OCS_HW_RTN_ERROR;
4128 		}
4129 		break;
4130 	case OCS_HW_IO_INITIATOR_NODATA:
4131 		ocs_hw_io_ini_sge(hw, io, iparam->fcp_ini.cmnd, iparam->fcp_ini.cmnd_size,
4132 				iparam->fcp_ini.rsp);
4133 
4134 		if (sli_fcp_icmnd64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl,
4135 					io->indicator, io->reqtag, SLI4_CQ_DEFAULT,
4136 					rpi, rnode, iparam->fcp_ini.timeout)) {
4137 			ocs_log_err(hw->os, "ICMND WQE error\n");
4138 			rc = OCS_HW_RTN_ERROR;
4139 		}
4140 		break;
4141 	case OCS_HW_IO_TARGET_WRITE: {
4142 		uint16_t flags = iparam->fcp_tgt.flags;
4143 		fcp_xfer_rdy_iu_t *xfer = io->xfer_rdy.virt;
4144 
4145 		/*
4146 		 * Fill in the XFER_RDY for IF_TYPE 0 devices
4147 		 */
4148 		*((uint32_t *)xfer->fcp_data_ro) = ocs_htobe32(iparam->fcp_tgt.offset);
4149 		*((uint32_t *)xfer->fcp_burst_len) = ocs_htobe32(len);
4150 		*((uint32_t *)xfer->rsvd) = 0;
4151 
4152 		if (io->xbusy) {
4153 			flags |= SLI4_IO_CONTINUATION;
4154 		} else {
4155 			flags &= ~SLI4_IO_CONTINUATION;
4156 		}
4157 
4158 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4159 
4160 		/*
4161 		 * If use_dif_quarantine workaround is in effect, and this is a DIF enabled IO
4162 		 * then mark the target write IO for quarantine
4163 		 */
4164 		if (hw->workaround.use_dif_quarantine && (hw->config.dif_mode == OCS_HW_DIF_MODE_SEPARATE) &&
4165 		    (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4166 			io->quarantine = TRUE;
4167 		}
4168 
4169 		/*
4170 		 * BZ 161832 Workaround:
4171 		 * Check for use_dif_sec_xri workaround.  Note, even though the first dataphase
4172 		 * doesn't really need a secondary XRI, we allocate one anyway, as this avoids the
4173 		 * potential for deadlock where all XRI's are allocated as primaries to IOs that
4174 		 * are on hw->sec_hio_wait_list.   If this secondary XRI is not for the first
4175 		 * data phase, it is marked for quarantine.
4176 		 */
4177 		if (hw->workaround.use_dif_sec_xri && (iparam->fcp_tgt.dif_oper != OCS_HW_DIF_OPER_DISABLED)) {
4178 
4179 			/*
4180 			 * If we have allocated a chained SGL for skyhawk, then
4181 			 * we can re-use this for the sec_hio.
4182 			 */
4183 			if (io->ovfl_io != NULL) {
4184 				io->sec_hio = io->ovfl_io;
4185 				io->sec_hio->quarantine = TRUE;
4186 			} else {
4187 				io->sec_hio = ocs_hw_io_alloc(hw);
4188 			}
4189 			if (io->sec_hio == NULL) {
4190 				/* Failed to allocate, so save full request context and put
4191 				 * this IO on the wait list
4192 				 */
4193 				io->sec_iparam = *iparam;
4194 				io->sec_len = len;
4195 				ocs_lock(&hw->io_lock);
4196 					ocs_list_remove(&hw->io_inuse,  io);
4197 					ocs_list_add_tail(&hw->sec_hio_wait_list, io);
4198 					io->state = OCS_HW_IO_STATE_WAIT_SEC_HIO;
4199 					hw->sec_hio_wait_count++;
4200 				ocs_unlock(&hw->io_lock);
4201 				send_wqe = FALSE;
4202 				/* Done */
4203 				break;
4204 			}
4205 			/* We quarantine the secondary IO if this is the second or subsequent data phase */
4206 			if (io->xbusy) {
4207 				io->sec_hio->quarantine = TRUE;
4208 			}
4209 		}
4210 
4211 		/*
4212 		 * If not the first data phase, and io->sec_hio has been allocated, then issue
4213 		 * FCP_CONT_TRECEIVE64 WQE, otherwise use the usual FCP_TRECEIVE64 WQE
4214 		 */
4215 		if (io->xbusy && (io->sec_hio != NULL)) {
4216 			if (sli_fcp_cont_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4217 						   iparam->fcp_tgt.offset, len, io->indicator, io->sec_hio->indicator,
4218 						   io->reqtag, SLI4_CQ_DEFAULT,
4219 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4220 						   flags,
4221 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4222 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4223 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4224 				rc = OCS_HW_RTN_ERROR;
4225 			}
4226 		} else {
4227 			if (sli_fcp_treceive64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4228 						   iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4229 						   SLI4_CQ_DEFAULT,
4230 						   iparam->fcp_tgt.ox_id, rpi, rnode,
4231 						   flags,
4232 						   iparam->fcp_tgt.dif_oper, iparam->fcp_tgt.blk_size,
4233 						   iparam->fcp_tgt.cs_ctl, iparam->fcp_tgt.app_id)) {
4234 				ocs_log_err(hw->os, "TRECEIVE WQE error\n");
4235 				rc = OCS_HW_RTN_ERROR;
4236 			}
4237 		}
4238 		break;
4239 	}
4240 	case OCS_HW_IO_TARGET_READ: {
4241 		uint16_t flags = iparam->fcp_tgt.flags;
4242 
4243 		if (io->xbusy) {
4244 			flags |= SLI4_IO_CONTINUATION;
4245 		} else {
4246 			flags &= ~SLI4_IO_CONTINUATION;
4247 		}
4248 
4249 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4250 		if (sli_fcp_tsend64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, &io->def_sgl, io->first_data_sge,
4251 					iparam->fcp_tgt.offset, len, io->indicator, io->reqtag,
4252 					SLI4_CQ_DEFAULT,
4253 					iparam->fcp_tgt.ox_id, rpi, rnode,
4254 					flags,
4255 					iparam->fcp_tgt.dif_oper,
4256 					iparam->fcp_tgt.blk_size,
4257 					iparam->fcp_tgt.cs_ctl,
4258 					iparam->fcp_tgt.app_id)) {
4259 			ocs_log_err(hw->os, "TSEND WQE error\n");
4260 			rc = OCS_HW_RTN_ERROR;
4261 		} else if (hw->workaround.retain_tsend_io_length) {
4262 			io->length = len;
4263 		}
4264 		break;
4265 	}
4266 	case OCS_HW_IO_TARGET_RSP: {
4267 		uint16_t flags = iparam->fcp_tgt.flags;
4268 
4269 		if (io->xbusy) {
4270 			flags |= SLI4_IO_CONTINUATION;
4271 		} else {
4272 			flags &= ~SLI4_IO_CONTINUATION;
4273 		}
4274 
4275 		/* post a new auto xfer ready buffer */
4276 		if (hw->auto_xfer_rdy_enabled && io->is_port_owned) {
4277 			if ((io->auto_xfer_rdy_dnrx = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1))) {
4278 				flags |= SLI4_IO_DNRX;
4279 			}
4280 		}
4281 
4282 		io->tgt_wqe_timeout = iparam->fcp_tgt.timeout;
4283 		if (sli_fcp_trsp64_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size,
4284 					&io->def_sgl,
4285 					len,
4286 					io->indicator, io->reqtag,
4287 					SLI4_CQ_DEFAULT,
4288 					iparam->fcp_tgt.ox_id,
4289 					rpi, rnode,
4290 					flags, iparam->fcp_tgt.cs_ctl,
4291 					io->is_port_owned,
4292 					iparam->fcp_tgt.app_id)) {
4293 			ocs_log_err(hw->os, "TRSP WQE error\n");
4294 			rc = OCS_HW_RTN_ERROR;
4295 		}
4296 
4297 		break;
4298 	}
4299 	default:
4300 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4301 		rc = OCS_HW_RTN_ERROR;
4302 	}
4303 
4304 	if (send_wqe && (OCS_HW_RTN_SUCCESS == rc)) {
4305 		if (io->wq == NULL) {
4306 			io->wq = ocs_hw_queue_next_wq(hw, io);
4307 			ocs_hw_assert(io->wq != NULL);
4308 		}
4309 
4310 		io->xbusy = TRUE;
4311 
4312 		/*
4313 		 * Add IO to active io wqe list before submitting, in case the
4314 		 * wcqe processing preempts this thread.
4315 		 */
4316 		OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
4317 		OCS_STAT(io->wq->use_count++);
4318 		ocs_hw_add_io_timed_wqe(hw, io);
4319 		rc = hw_wq_write(io->wq, &io->wqe);
4320 		if (rc >= 0) {
4321 			/* non-negative return is success */
4322 			rc = 0;
4323 		} else {
4324 			/* failed to write wqe, remove from active wqe list */
4325 			ocs_log_err(hw->os, "sli_queue_write failed: %d\n", rc);
4326 			io->xbusy = FALSE;
4327 			ocs_hw_remove_io_timed_wqe(hw, io);
4328 		}
4329 	}
4330 
4331 	return rc;
4332 }
4333 
4334 /**
4335  * @brief Send a raw frame
4336  *
4337  * @par Description
4338  * Using the SEND_FRAME_WQE, a frame consisting of header and payload is sent.
4339  *
4340  * @param hw Pointer to HW object.
4341  * @param hdr Pointer to a little endian formatted FC header.
4342  * @param sof Value to use as the frame SOF.
4343  * @param eof Value to use as the frame EOF.
4344  * @param payload Pointer to payload DMA buffer.
4345  * @param ctx Pointer to caller provided send frame context.
4346  * @param callback Callback function.
4347  * @param arg Callback function argument.
4348  *
4349  * @return Returns 0 on success, or a negative error code value on failure.
4350  */
4351 ocs_hw_rtn_e
4352 ocs_hw_send_frame(ocs_hw_t *hw, fc_header_le_t *hdr, uint8_t sof, uint8_t eof, ocs_dma_t *payload,
4353 		   ocs_hw_send_frame_context_t *ctx, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
4354 {
4355 	int32_t rc;
4356 	ocs_hw_wqe_t *wqe;
4357 	uint32_t xri;
4358 	hw_wq_t *wq;
4359 
4360 	wqe = &ctx->wqe;
4361 
4362 	/* populate the callback object */
4363 	ctx->hw = hw;
4364 
4365 	/* Fetch and populate request tag */
4366 	ctx->wqcb = ocs_hw_reqtag_alloc(hw, callback, arg);
4367 	if (ctx->wqcb == NULL) {
4368 		ocs_log_err(hw->os, "can't allocate request tag\n");
4369 		return OCS_HW_RTN_NO_RESOURCES;
4370 	}
4371 
4372 	/* Choose a work queue, first look for a class[1] wq, otherwise just use wq[0] */
4373 	wq = ocs_varray_iter_next(hw->wq_class_array[1]);
4374 	if (wq == NULL) {
4375 		wq = hw->hw_wq[0];
4376 	}
4377 
4378 	/* Set XRI and RX_ID in the header based on which WQ, and which send_frame_io we are using */
4379 	xri = wq->send_frame_io->indicator;
4380 
4381 	/* Build the send frame WQE */
4382 	rc = sli_send_frame_wqe(&hw->sli, wqe->wqebuf, hw->sli.config.wqe_size, sof, eof, (uint32_t*) hdr, payload,
4383 				payload->len, OCS_HW_SEND_FRAME_TIMEOUT, xri, ctx->wqcb->instance_index);
4384 	if (rc) {
4385 		ocs_log_err(hw->os, "sli_send_frame_wqe failed: %d\n", rc);
4386 		return OCS_HW_RTN_ERROR;
4387 	}
4388 
4389 	/* Write to WQ */
4390 	rc = hw_wq_write(wq, wqe);
4391 	if (rc) {
4392 		ocs_log_err(hw->os, "hw_wq_write failed: %d\n", rc);
4393 		return OCS_HW_RTN_ERROR;
4394 	}
4395 
4396 	OCS_STAT(wq->use_count++);
4397 
4398 	return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
4399 }
4400 
4401 ocs_hw_rtn_e
4402 ocs_hw_io_register_sgl(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *sgl, uint32_t sgl_count)
4403 {
4404 	if (sli_get_sgl_preregister(&hw->sli)) {
4405 		ocs_log_err(hw->os, "can't use temporary SGL with pre-registered SGLs\n");
4406 		return OCS_HW_RTN_ERROR;
4407 	}
4408 	io->ovfl_sgl = sgl;
4409 	io->ovfl_sgl_count = sgl_count;
4410 	io->ovfl_io = NULL;
4411 
4412 	return OCS_HW_RTN_SUCCESS;
4413 }
4414 
4415 static void
4416 ocs_hw_io_restore_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4417 {
4418 	/* Restore the default */
4419 	io->sgl = &io->def_sgl;
4420 	io->sgl_count = io->def_sgl_count;
4421 
4422 	/*
4423 	 * For skyhawk, we need to free the IO allocated for the chained
4424 	 * SGL. For all devices, clear the overflow fields on the IO.
4425 	 *
4426 	 * Note: For DIF IOs, we may be using the same XRI for the sec_hio and
4427 	 *       the chained SGLs. If so, then we clear the ovfl_io field
4428 	 *       when the sec_hio is freed.
4429 	 */
4430 	if (io->ovfl_io != NULL) {
4431 		ocs_hw_io_free(hw, io->ovfl_io);
4432 		io->ovfl_io = NULL;
4433 	}
4434 
4435 	/* Clear the overflow SGL */
4436 	io->ovfl_sgl = NULL;
4437 	io->ovfl_sgl_count = 0;
4438 	io->ovfl_lsp = NULL;
4439 }
4440 
4441 /**
4442  * @ingroup io
4443  * @brief Initialize the scatter gather list entries of an IO.
4444  *
4445  * @param hw Hardware context.
4446  * @param io Previously-allocated HW IO object.
4447  * @param type Type of IO (target read, target response, and so on).
4448  *
4449  * @return Returns 0 on success, or a non-zero value on failure.
4450  */
4451 ocs_hw_rtn_e
4452 ocs_hw_io_init_sges(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_io_type_e type)
4453 {
4454 	sli4_sge_t	*data = NULL;
4455 	uint32_t	i = 0;
4456 	uint32_t	skips = 0;
4457 
4458 	if (!hw || !io) {
4459 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p\n",
4460 			    hw, io);
4461 		return OCS_HW_RTN_ERROR;
4462 	}
4463 
4464 	/* Clear / reset the scatter-gather list */
4465 	io->sgl = &io->def_sgl;
4466 	io->sgl_count = io->def_sgl_count;
4467 	io->first_data_sge = 0;
4468 
4469 	ocs_memset(io->sgl->virt, 0, 2 * sizeof(sli4_sge_t));
4470 	io->n_sge = 0;
4471 	io->sge_offset = 0;
4472 
4473 	io->type = type;
4474 
4475 	data = io->sgl->virt;
4476 
4477 	/*
4478 	 * Some IO types have underlying hardware requirements on the order
4479 	 * of SGEs. Process all special entries here.
4480 	 */
4481 	switch (type) {
4482 	case OCS_HW_IO_INITIATOR_READ:
4483 	case OCS_HW_IO_INITIATOR_WRITE:
4484 	case OCS_HW_IO_INITIATOR_NODATA:
4485 		/*
4486 		 * No skips, 2 special for initiator I/Os
4487 		 * The addresses and length are written later
4488 		 */
4489 		/* setup command pointer */
4490 		data->sge_type = SLI4_SGE_TYPE_DATA;
4491 		data++;
4492 
4493 		/* setup response pointer */
4494 		data->sge_type = SLI4_SGE_TYPE_DATA;
4495 
4496 		if (OCS_HW_IO_INITIATOR_NODATA == type) {
4497 			data->last = TRUE;
4498 		}
4499 		data++;
4500 
4501 		io->n_sge = 2;
4502 		break;
4503 	case OCS_HW_IO_TARGET_WRITE:
4504 #define OCS_TARGET_WRITE_SKIPS	2
4505 		skips = OCS_TARGET_WRITE_SKIPS;
4506 
4507 		/* populate host resident XFER_RDY buffer */
4508 		data->sge_type = SLI4_SGE_TYPE_DATA;
4509 		data->buffer_address_high = ocs_addr32_hi(io->xfer_rdy.phys);
4510 		data->buffer_address_low  = ocs_addr32_lo(io->xfer_rdy.phys);
4511 		data->buffer_length = io->xfer_rdy.size;
4512 		data++;
4513 
4514 		skips--;
4515 
4516 		io->n_sge = 1;
4517 		break;
4518 	case OCS_HW_IO_TARGET_READ:
4519 		/*
4520 		 * For FCP_TSEND64, the first 2 entries are SKIP SGE's
4521 		 */
4522 #define OCS_TARGET_READ_SKIPS	2
4523 		skips = OCS_TARGET_READ_SKIPS;
4524 		break;
4525 	case OCS_HW_IO_TARGET_RSP:
4526 		/*
4527 		 * No skips, etc. for FCP_TRSP64
4528 		 */
4529 		break;
4530 	default:
4531 		ocs_log_err(hw->os, "unsupported IO type %#x\n", type);
4532 		return OCS_HW_RTN_ERROR;
4533 	}
4534 
4535 	/*
4536 	 * Write skip entries
4537 	 */
4538 	for (i = 0; i < skips; i++) {
4539 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4540 		data++;
4541 	}
4542 
4543 	io->n_sge += skips;
4544 
4545 	/*
4546 	 * Set last
4547 	 */
4548 	data->last = TRUE;
4549 
4550 	return OCS_HW_RTN_SUCCESS;
4551 }
4552 
4553 /**
4554  * @ingroup io
4555  * @brief Add a T10 PI seed scatter gather list entry.
4556  *
4557  * @param hw Hardware context.
4558  * @param io Previously-allocated HW IO object.
4559  * @param dif_info Pointer to T10 DIF fields, or NULL if no DIF.
4560  *
4561  * @return Returns 0 on success, or a non-zero value on failure.
4562  */
4563 ocs_hw_rtn_e
4564 ocs_hw_io_add_seed_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_hw_dif_info_t *dif_info)
4565 {
4566 	sli4_sge_t	*data = NULL;
4567 	sli4_diseed_sge_t *dif_seed;
4568 
4569 	/* If no dif_info, or dif_oper is disabled, then just return success */
4570 	if ((dif_info == NULL) || (dif_info->dif_oper == OCS_HW_DIF_OPER_DISABLED)) {
4571 		return OCS_HW_RTN_SUCCESS;
4572 	}
4573 
4574 	if (!hw || !io) {
4575 		ocs_log_err(hw ? hw->os : NULL, "bad parameter hw=%p io=%p dif_info=%p\n",
4576 			    hw, io, dif_info);
4577 		return OCS_HW_RTN_ERROR;
4578 	}
4579 
4580 	data = io->sgl->virt;
4581 	data += io->n_sge;
4582 
4583 	/* If we are doing T10 DIF add the DIF Seed SGE */
4584 	ocs_memset(data, 0, sizeof(sli4_diseed_sge_t));
4585 	dif_seed = (sli4_diseed_sge_t *)data;
4586 	dif_seed->ref_tag_cmp = dif_info->ref_tag_cmp;
4587 	dif_seed->ref_tag_repl = dif_info->ref_tag_repl;
4588 	dif_seed->app_tag_repl = dif_info->app_tag_repl;
4589 	dif_seed->repl_app_tag = dif_info->repl_app_tag;
4590 	if (SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) {
4591 		dif_seed->atrt = dif_info->disable_app_ref_ffff;
4592 		dif_seed->at = dif_info->disable_app_ffff;
4593 	}
4594 	dif_seed->sge_type = SLI4_SGE_TYPE_DISEED;
4595 	/* Workaround for SKH (BZ157233) */
4596 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4597 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type) && dif_info->dif_separate) {
4598 		dif_seed->sge_type = SLI4_SGE_TYPE_SKIP;
4599 	}
4600 
4601 	dif_seed->app_tag_cmp = dif_info->app_tag_cmp;
4602 	dif_seed->dif_blk_size = dif_info->blk_size;
4603 	dif_seed->auto_incr_ref_tag = dif_info->auto_incr_ref_tag;
4604 	dif_seed->check_app_tag = dif_info->check_app_tag;
4605 	dif_seed->check_ref_tag = dif_info->check_ref_tag;
4606 	dif_seed->check_crc = dif_info->check_guard;
4607 	dif_seed->new_ref_tag = dif_info->repl_ref_tag;
4608 
4609 	switch(dif_info->dif_oper) {
4610 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CRC:
4611 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4612 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CRC;
4613 		break;
4614 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_NODIF:
4615 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4616 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_NODIF;
4617 		break;
4618 	case OCS_HW_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM:
4619 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4620 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_NODIF_OUT_CHKSUM;
4621 		break;
4622 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF:
4623 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4624 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_NODIF;
4625 		break;
4626 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CRC:
4627 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4628 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CRC;
4629 		break;
4630 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM:
4631 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4632 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CHKSUM;
4633 		break;
4634 	case OCS_HW_SGE_DIF_OP_IN_CRC_OUT_CHKSUM:
4635 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4636 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CRC_OUT_CHKSUM;
4637 		break;
4638 	case OCS_HW_SGE_DIF_OP_IN_CHKSUM_OUT_CRC:
4639 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4640 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_CHKSUM_OUT_CRC;
4641 		break;
4642 	case OCS_HW_SGE_DIF_OP_IN_RAW_OUT_RAW:
4643 		dif_seed->dif_op_rx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4644 		dif_seed->dif_op_tx = SLI4_SGE_DIF_OP_IN_RAW_OUT_RAW;
4645 		break;
4646 	default:
4647 		ocs_log_err(hw->os, "unsupported DIF operation %#x\n",
4648 			    dif_info->dif_oper);
4649 		return OCS_HW_RTN_ERROR;
4650 	}
4651 
4652 	/*
4653 	 * Set last, clear previous last
4654 	 */
4655 	data->last = TRUE;
4656 	if (io->n_sge) {
4657 		data[-1].last = FALSE;
4658 	}
4659 
4660 	io->n_sge++;
4661 
4662 	return OCS_HW_RTN_SUCCESS;
4663 }
4664 
4665 static ocs_hw_rtn_e
4666 ocs_hw_io_overflow_sgl(ocs_hw_t *hw, ocs_hw_io_t *io)
4667 {
4668 	sli4_lsp_sge_t *lsp;
4669 
4670 	/* fail if we're already pointing to the overflow SGL */
4671 	if (io->sgl == io->ovfl_sgl) {
4672 		return OCS_HW_RTN_ERROR;
4673 	}
4674 
4675 	/*
4676 	 * For skyhawk, we can use another SGL to extend the SGL list. The
4677 	 * Chained entry must not be in the first 4 entries.
4678 	 *
4679 	 * Note: For DIF enabled IOs, we will use the ovfl_io for the sec_hio.
4680 	 */
4681 	if (sli_get_sgl_preregister(&hw->sli) &&
4682 	    io->def_sgl_count > 4 &&
4683 	    io->ovfl_io == NULL &&
4684 	    ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4685 		(SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli)))) {
4686 		io->ovfl_io = ocs_hw_io_alloc(hw);
4687 		if (io->ovfl_io != NULL) {
4688 			/*
4689 			 * Note: We can't call ocs_hw_io_register_sgl() here
4690 			 * because it checks that SGLs are not pre-registered
4691 			 * and for shyhawk, preregistered SGLs are required.
4692 			 */
4693 			io->ovfl_sgl = &io->ovfl_io->def_sgl;
4694 			io->ovfl_sgl_count = io->ovfl_io->def_sgl_count;
4695 		}
4696 	}
4697 
4698 	/* fail if we don't have an overflow SGL registered */
4699 	if (io->ovfl_sgl == NULL) {
4700 		return OCS_HW_RTN_ERROR;
4701 	}
4702 
4703 	/*
4704 	 * Overflow, we need to put a link SGE in the last location of the current SGL, after
4705 	 * copying the the last SGE to the overflow SGL
4706 	 */
4707 
4708 	((sli4_sge_t*)io->ovfl_sgl->virt)[0] = ((sli4_sge_t*)io->sgl->virt)[io->n_sge - 1];
4709 
4710 	lsp = &((sli4_lsp_sge_t*)io->sgl->virt)[io->n_sge - 1];
4711 	ocs_memset(lsp, 0, sizeof(*lsp));
4712 
4713 	if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
4714 	    (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
4715 		sli_skh_chain_sge_build(&hw->sli,
4716 					(sli4_sge_t*)lsp,
4717 					io->ovfl_io->indicator,
4718 					0, /* frag_num */
4719 					0); /* offset */
4720 	} else {
4721 		lsp->buffer_address_high = ocs_addr32_hi(io->ovfl_sgl->phys);
4722 		lsp->buffer_address_low  = ocs_addr32_lo(io->ovfl_sgl->phys);
4723 		lsp->sge_type = SLI4_SGE_TYPE_LSP;
4724 		lsp->last = 0;
4725 		io->ovfl_lsp = lsp;
4726 		io->ovfl_lsp->segment_length = sizeof(sli4_sge_t);
4727 	}
4728 
4729 	/* Update the current SGL pointer, and n_sgl */
4730 	io->sgl = io->ovfl_sgl;
4731 	io->sgl_count = io->ovfl_sgl_count;
4732 	io->n_sge = 1;
4733 
4734 	return OCS_HW_RTN_SUCCESS;
4735 }
4736 
4737 /**
4738  * @ingroup io
4739  * @brief Add a scatter gather list entry to an IO.
4740  *
4741  * @param hw Hardware context.
4742  * @param io Previously-allocated HW IO object.
4743  * @param addr Physical address.
4744  * @param length Length of memory pointed to by @c addr.
4745  *
4746  * @return Returns 0 on success, or a non-zero value on failure.
4747  */
4748 ocs_hw_rtn_e
4749 ocs_hw_io_add_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr, uint32_t length)
4750 {
4751 	sli4_sge_t	*data = NULL;
4752 
4753 	if (!hw || !io || !addr || !length) {
4754 		ocs_log_err(hw ? hw->os : NULL,
4755 			    "bad parameter hw=%p io=%p addr=%lx length=%u\n",
4756 			    hw, io, addr, length);
4757 		return OCS_HW_RTN_ERROR;
4758 	}
4759 
4760 	if ((length != 0) && (io->n_sge + 1) > io->sgl_count) {
4761 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_SUCCESS) {
4762 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4763 			return OCS_HW_RTN_ERROR;
4764 		}
4765 	}
4766 
4767 	if (length > sli_get_max_sge(&hw->sli)) {
4768 		ocs_log_err(hw->os, "length of SGE %d bigger than allowed %d\n",
4769 			    length, sli_get_max_sge(&hw->sli));
4770 		return OCS_HW_RTN_ERROR;
4771 	}
4772 
4773 	data = io->sgl->virt;
4774 	data += io->n_sge;
4775 
4776 	data->sge_type = SLI4_SGE_TYPE_DATA;
4777 	data->buffer_address_high = ocs_addr32_hi(addr);
4778 	data->buffer_address_low  = ocs_addr32_lo(addr);
4779 	data->buffer_length = length;
4780 	data->data_offset = io->sge_offset;
4781 	/*
4782 	 * Always assume this is the last entry and mark as such.
4783 	 * If this is not the first entry unset the "last SGE"
4784 	 * indication for the previous entry
4785 	 */
4786 	data->last = TRUE;
4787 	if (io->n_sge) {
4788 		data[-1].last = FALSE;
4789 	}
4790 
4791 	/* Set first_data_bde if not previously set */
4792 	if (io->first_data_sge == 0) {
4793 		io->first_data_sge = io->n_sge;
4794 	}
4795 
4796 	io->sge_offset += length;
4797 	io->n_sge++;
4798 
4799 	/* Update the linked segment length (only executed after overflow has begun) */
4800 	if (io->ovfl_lsp != NULL) {
4801 		io->ovfl_lsp->segment_length = io->n_sge * sizeof(sli4_sge_t);
4802 	}
4803 
4804 	return OCS_HW_RTN_SUCCESS;
4805 }
4806 
4807 /**
4808  * @ingroup io
4809  * @brief Add a T10 DIF scatter gather list entry to an IO.
4810  *
4811  * @param hw Hardware context.
4812  * @param io Previously-allocated HW IO object.
4813  * @param addr DIF physical address.
4814  *
4815  * @return Returns 0 on success, or a non-zero value on failure.
4816  */
4817 ocs_hw_rtn_e
4818 ocs_hw_io_add_dif_sge(ocs_hw_t *hw, ocs_hw_io_t *io, uintptr_t addr)
4819 {
4820 	sli4_dif_sge_t	*data = NULL;
4821 
4822 	if (!hw || !io || !addr) {
4823 		ocs_log_err(hw ? hw->os : NULL,
4824 			    "bad parameter hw=%p io=%p addr=%lx\n",
4825 			    hw, io, addr);
4826 		return OCS_HW_RTN_ERROR;
4827 	}
4828 
4829 	if ((io->n_sge + 1) > hw->config.n_sgl) {
4830 		if (ocs_hw_io_overflow_sgl(hw, io) != OCS_HW_RTN_ERROR) {
4831 			ocs_log_err(hw->os, "SGL full (%d)\n", io->n_sge);
4832 			return OCS_HW_RTN_ERROR;
4833 		}
4834 	}
4835 
4836 	data = io->sgl->virt;
4837 	data += io->n_sge;
4838 
4839 	data->sge_type = SLI4_SGE_TYPE_DIF;
4840 	/* Workaround for SKH (BZ157233) */
4841 	if (((io->type == OCS_HW_IO_TARGET_WRITE) || (io->type == OCS_HW_IO_INITIATOR_READ)) &&
4842 		(SLI4_IF_TYPE_LANCER_FC_ETH != hw->sli.if_type)) {
4843 		data->sge_type = SLI4_SGE_TYPE_SKIP;
4844 	}
4845 
4846 	data->buffer_address_high = ocs_addr32_hi(addr);
4847 	data->buffer_address_low  = ocs_addr32_lo(addr);
4848 
4849 	/*
4850 	 * Always assume this is the last entry and mark as such.
4851 	 * If this is not the first entry unset the "last SGE"
4852 	 * indication for the previous entry
4853 	 */
4854 	data->last = TRUE;
4855 	if (io->n_sge) {
4856 		data[-1].last = FALSE;
4857 	}
4858 
4859 	io->n_sge++;
4860 
4861 	return OCS_HW_RTN_SUCCESS;
4862 }
4863 
4864 /**
4865  * @ingroup io
4866  * @brief Abort a previously-started IO.
4867  *
4868  * @param hw Hardware context.
4869  * @param io_to_abort The IO to abort.
4870  * @param send_abts Boolean to have the hardware automatically
4871  * generate an ABTS.
4872  * @param cb Function call upon completion of the abort (may be NULL).
4873  * @param arg Argument to pass to abort completion function.
4874  *
4875  * @return Returns 0 on success, or a non-zero value on failure.
4876  */
4877 ocs_hw_rtn_e
4878 ocs_hw_io_abort(ocs_hw_t *hw, ocs_hw_io_t *io_to_abort, uint32_t send_abts, void *cb, void *arg)
4879 {
4880 	sli4_abort_type_e atype = SLI_ABORT_MAX;
4881 	uint32_t	id = 0, mask = 0;
4882 	ocs_hw_rtn_e	rc = OCS_HW_RTN_SUCCESS;
4883 	hw_wq_callback_t *wqcb;
4884 
4885 	if (!hw || !io_to_abort) {
4886 		ocs_log_err(hw ? hw->os : NULL,
4887 			    "bad parameter hw=%p io=%p\n",
4888 			    hw, io_to_abort);
4889 		return OCS_HW_RTN_ERROR;
4890 	}
4891 
4892 	if (hw->state != OCS_HW_STATE_ACTIVE) {
4893 		ocs_log_err(hw->os, "cannot send IO abort, HW state=%d\n",
4894 			    hw->state);
4895 		return OCS_HW_RTN_ERROR;
4896 	}
4897 
4898 	/* take a reference on IO being aborted */
4899 	if (ocs_ref_get_unless_zero(&io_to_abort->ref) == 0) {
4900 		/* command no longer active */
4901 		ocs_log_test(hw ? hw->os : NULL,
4902 				"io not active xri=0x%x tag=0x%x\n",
4903 				io_to_abort->indicator, io_to_abort->reqtag);
4904 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4905 	}
4906 
4907 	/* non-port owned XRI checks */
4908 	/* Must have a valid WQ reference */
4909 	if (io_to_abort->wq == NULL) {
4910 		ocs_log_test(hw->os, "io_to_abort xri=0x%x not active on WQ\n",
4911 				io_to_abort->indicator);
4912 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4913 		return OCS_HW_RTN_IO_NOT_ACTIVE;
4914 	}
4915 
4916 	/* Validation checks complete; now check to see if already being aborted */
4917 	ocs_lock(&hw->io_abort_lock);
4918 		if (io_to_abort->abort_in_progress) {
4919 			ocs_unlock(&hw->io_abort_lock);
4920 			ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
4921 			ocs_log_debug(hw ? hw->os : NULL,
4922 				"io already being aborted xri=0x%x tag=0x%x\n",
4923 				io_to_abort->indicator, io_to_abort->reqtag);
4924 			return OCS_HW_RTN_IO_ABORT_IN_PROGRESS;
4925 		}
4926 
4927 		/*
4928 		 * This IO is not already being aborted. Set flag so we won't try to
4929 		 * abort it again. After all, we only have one abort_done callback.
4930 		 */
4931 		io_to_abort->abort_in_progress = 1;
4932 	ocs_unlock(&hw->io_abort_lock);
4933 
4934 	/*
4935 	 * If we got here, the possibilities are:
4936 	 * - host owned xri
4937 	 *	- io_to_abort->wq_index != UINT32_MAX
4938 	 *		- submit ABORT_WQE to same WQ
4939 	 * - port owned xri:
4940 	 *	- rxri: io_to_abort->wq_index == UINT32_MAX
4941 	 *		- submit ABORT_WQE to any WQ
4942 	 *	- non-rxri
4943 	 *		- io_to_abort->index != UINT32_MAX
4944 	 *			- submit ABORT_WQE to same WQ
4945 	 *		- io_to_abort->index == UINT32_MAX
4946 	 *			- submit ABORT_WQE to any WQ
4947 	 */
4948 	io_to_abort->abort_done = cb;
4949 	io_to_abort->abort_arg  = arg;
4950 
4951 	atype = SLI_ABORT_XRI;
4952 	id = io_to_abort->indicator;
4953 
4954 	/* Allocate a request tag for the abort portion of this IO */
4955 	wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_abort, io_to_abort);
4956 	if (wqcb == NULL) {
4957 		ocs_log_err(hw->os, "can't allocate request tag\n");
4958 		return OCS_HW_RTN_NO_RESOURCES;
4959 	}
4960 	io_to_abort->abort_reqtag = wqcb->instance_index;
4961 
4962 	/*
4963 	 * If the wqe is on the pending list, then set this wqe to be
4964 	 * aborted when the IO's wqe is removed from the list.
4965 	 */
4966 	if (io_to_abort->wq != NULL) {
4967 		sli_queue_lock(io_to_abort->wq->queue);
4968 			if (ocs_list_on_list(&io_to_abort->wqe.link)) {
4969 				io_to_abort->wqe.abort_wqe_submit_needed = 1;
4970 				io_to_abort->wqe.send_abts = send_abts;
4971 				io_to_abort->wqe.id = id;
4972 				io_to_abort->wqe.abort_reqtag = io_to_abort->abort_reqtag;
4973 				sli_queue_unlock(io_to_abort->wq->queue);
4974 				return 0;
4975 		}
4976 		sli_queue_unlock(io_to_abort->wq->queue);
4977 	}
4978 
4979 	if (sli_abort_wqe(&hw->sli, io_to_abort->wqe.wqebuf, hw->sli.config.wqe_size, atype, send_abts, id, mask,
4980 			  io_to_abort->abort_reqtag, SLI4_CQ_DEFAULT)) {
4981 		ocs_log_err(hw->os, "ABORT WQE error\n");
4982 		io_to_abort->abort_reqtag = UINT32_MAX;
4983 		ocs_hw_reqtag_free(hw, wqcb);
4984 		rc = OCS_HW_RTN_ERROR;
4985 	}
4986 
4987 	if (OCS_HW_RTN_SUCCESS == rc) {
4988 		if (io_to_abort->wq == NULL) {
4989 			io_to_abort->wq = ocs_hw_queue_next_wq(hw, io_to_abort);
4990 			ocs_hw_assert(io_to_abort->wq != NULL);
4991 		}
4992 		/* ABORT_WQE does not actually utilize an XRI on the Port,
4993 		 * therefore, keep xbusy as-is to track the exchange's state,
4994 		 * not the ABORT_WQE's state
4995 		 */
4996 		rc = hw_wq_write(io_to_abort->wq, &io_to_abort->wqe);
4997 		if (rc > 0) {
4998 			/* non-negative return is success */
4999 			rc = 0;
5000 			/* can't abort an abort so skip adding to timed wqe list */
5001 		}
5002 	}
5003 
5004 	if (OCS_HW_RTN_SUCCESS != rc) {
5005 		ocs_lock(&hw->io_abort_lock);
5006 			io_to_abort->abort_in_progress = 0;
5007 		ocs_unlock(&hw->io_abort_lock);
5008 		ocs_ref_put(&io_to_abort->ref); /* ocs_ref_get(): same function */
5009 	}
5010 	return rc;
5011 }
5012 
5013 /**
5014  * @ingroup io
5015  * @brief Return the OX_ID/RX_ID of the IO.
5016  *
5017  * @param hw Hardware context.
5018  * @param io HW IO object.
5019  *
5020  * @return Returns X_ID on success, or -1 on failure.
5021  */
5022 int32_t
5023 ocs_hw_io_get_xid(ocs_hw_t *hw, ocs_hw_io_t *io)
5024 {
5025 	if (!hw || !io) {
5026 		ocs_log_err(hw ? hw->os : NULL,
5027 			    "bad parameter hw=%p io=%p\n", hw, io);
5028 		return -1;
5029 	}
5030 
5031 	return io->indicator;
5032 }
5033 
5034 
5035 typedef struct ocs_hw_fw_write_cb_arg {
5036 	ocs_hw_fw_cb_t cb;
5037 	void *arg;
5038 } ocs_hw_fw_write_cb_arg_t;
5039 
5040 typedef struct ocs_hw_sfp_cb_arg {
5041 	ocs_hw_sfp_cb_t cb;
5042 	void *arg;
5043 	ocs_dma_t payload;
5044 } ocs_hw_sfp_cb_arg_t;
5045 
5046 typedef struct ocs_hw_temp_cb_arg {
5047 	ocs_hw_temp_cb_t cb;
5048 	void *arg;
5049 } ocs_hw_temp_cb_arg_t;
5050 
5051 typedef struct ocs_hw_link_stat_cb_arg {
5052 	ocs_hw_link_stat_cb_t cb;
5053 	void *arg;
5054 } ocs_hw_link_stat_cb_arg_t;
5055 
5056 typedef struct ocs_hw_host_stat_cb_arg {
5057 	ocs_hw_host_stat_cb_t cb;
5058 	void *arg;
5059 } ocs_hw_host_stat_cb_arg_t;
5060 
5061 typedef struct ocs_hw_dump_get_cb_arg {
5062 	ocs_hw_dump_get_cb_t cb;
5063 	void *arg;
5064 	void *mbox_cmd;
5065 } ocs_hw_dump_get_cb_arg_t;
5066 
5067 typedef struct ocs_hw_dump_clear_cb_arg {
5068 	ocs_hw_dump_clear_cb_t cb;
5069 	void *arg;
5070 	void *mbox_cmd;
5071 } ocs_hw_dump_clear_cb_arg_t;
5072 
5073 /**
5074  * @brief Write a portion of a firmware image to the device.
5075  *
5076  * @par Description
5077  * Calls the correct firmware write function based on the device type.
5078  *
5079  * @param hw Hardware context.
5080  * @param dma DMA structure containing the firmware image chunk.
5081  * @param size Size of the firmware image chunk.
5082  * @param offset Offset, in bytes, from the beginning of the firmware image.
5083  * @param last True if this is the last chunk of the image.
5084  * Causes the image to be committed to flash.
5085  * @param cb Pointer to a callback function that is called when the command completes.
5086  * The callback function prototype is
5087  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5088  * @param arg Pointer to be passed to the callback function.
5089  *
5090  * @return Returns 0 on success, or a non-zero value on failure.
5091  */
5092 ocs_hw_rtn_e
5093 ocs_hw_firmware_write(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5094 {
5095 	if (hw->sli.if_type == SLI4_IF_TYPE_LANCER_FC_ETH) {
5096 		return ocs_hw_firmware_write_lancer(hw, dma, size, offset, last, cb, arg);
5097 	} else {
5098 		/* Write firmware_write for BE3/Skyhawk not supported */
5099 		return -1;
5100 	}
5101 }
5102 
5103 /**
5104  * @brief Write a portion of a firmware image to the Emulex XE201 ASIC (Lancer).
5105  *
5106  * @par Description
5107  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to write a
5108  * firmware image chunk, and then sends the command with ocs_hw_command(). On completion,
5109  * the callback function ocs_hw_fw_write_cb() gets called to free the mailbox
5110  * and to signal the caller that the write has completed.
5111  *
5112  * @param hw Hardware context.
5113  * @param dma DMA structure containing the firmware image chunk.
5114  * @param size Size of the firmware image chunk.
5115  * @param offset Offset, in bytes, from the beginning of the firmware image.
5116  * @param last True if this is the last chunk of the image. Causes the image to be committed to flash.
5117  * @param cb Pointer to a callback function that is called when the command completes.
5118  * The callback function prototype is
5119  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
5120  * @param arg Pointer to be passed to the callback function.
5121  *
5122  * @return Returns 0 on success, or a non-zero value on failure.
5123  */
5124 ocs_hw_rtn_e
5125 ocs_hw_firmware_write_lancer(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, int last, ocs_hw_fw_cb_t cb, void *arg)
5126 {
5127 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5128 	uint8_t *mbxdata;
5129 	ocs_hw_fw_write_cb_arg_t *cb_arg;
5130 	int noc=0;	/* No Commit bit - set to 1 for testing */
5131 
5132 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
5133 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
5134 		return OCS_HW_RTN_ERROR;
5135 	}
5136 
5137 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5138 	if (mbxdata == NULL) {
5139 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5140 		return OCS_HW_RTN_NO_MEMORY;
5141 	}
5142 
5143 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_fw_write_cb_arg_t), OCS_M_NOWAIT);
5144 	if (cb_arg == NULL) {
5145 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5146 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5147 		return OCS_HW_RTN_NO_MEMORY;
5148 	}
5149 
5150 	cb_arg->cb = cb;
5151 	cb_arg->arg = arg;
5152 
5153 	if (sli_cmd_common_write_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE, noc, last,
5154 			size, offset, "/prg/", dma)) {
5155 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_fw_write, cb_arg);
5156 	}
5157 
5158 	if (rc != OCS_HW_RTN_SUCCESS) {
5159 		ocs_log_test(hw->os, "COMMON_WRITE_OBJECT failed\n");
5160 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5161 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5162 	}
5163 
5164 	return rc;
5165 
5166 }
5167 
5168 /**
5169  * @brief Called when the WRITE OBJECT command completes.
5170  *
5171  * @par Description
5172  * Get the number of bytes actually written out of the response, free the mailbox
5173  * that was malloc'd by ocs_hw_firmware_write(),
5174  * then call the callback and pass the status and bytes written.
5175  *
5176  * @param hw Hardware context.
5177  * @param status Status field from the mbox completion.
5178  * @param mqe Mailbox response structure.
5179  * @param arg Pointer to a callback function that signals the caller that the command is done.
5180  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_written)</tt>.
5181  *
5182  * @return Returns 0.
5183  */
5184 static int32_t
5185 ocs_hw_cb_fw_write(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5186 {
5187 
5188 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
5189 	sli4_res_common_write_object_t* wr_obj_rsp = (sli4_res_common_write_object_t*) &(mbox_rsp->payload.embed);
5190 	ocs_hw_fw_write_cb_arg_t *cb_arg = arg;
5191 	uint32_t bytes_written;
5192 	uint16_t mbox_status;
5193 	uint32_t change_status;
5194 
5195 	bytes_written = wr_obj_rsp->actual_write_length;
5196 	mbox_status = mbox_rsp->hdr.status;
5197 	change_status = wr_obj_rsp->change_status;
5198 
5199 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5200 
5201 	if (cb_arg) {
5202 		if (cb_arg->cb) {
5203 			if ((status == 0) && mbox_status) {
5204 				status = mbox_status;
5205 			}
5206 			cb_arg->cb(status, bytes_written, change_status, cb_arg->arg);
5207 		}
5208 
5209 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
5210 	}
5211 
5212 	return 0;
5213 
5214 }
5215 
5216 /**
5217  * @brief Called when the READ_TRANSCEIVER_DATA command completes.
5218  *
5219  * @par Description
5220  * Get the number of bytes read out of the response, free the mailbox that was malloc'd
5221  * by ocs_hw_get_sfp(), then call the callback and pass the status and bytes written.
5222  *
5223  * @param hw Hardware context.
5224  * @param status Status field from the mbox completion.
5225  * @param mqe Mailbox response structure.
5226  * @param arg Pointer to a callback function that signals the caller that the command is done.
5227  * The callback function prototype is
5228  * <tt>void cb(int32_t status, uint32_t bytes_written, uint32_t *data, void *arg)</tt>.
5229  *
5230  * @return Returns 0.
5231  */
5232 static int32_t
5233 ocs_hw_cb_sfp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5234 {
5235 
5236 	ocs_hw_sfp_cb_arg_t *cb_arg = arg;
5237 	ocs_dma_t *payload = NULL;
5238 	sli4_res_common_read_transceiver_data_t* mbox_rsp = NULL;
5239 	uint32_t bytes_written;
5240 
5241 	if (cb_arg) {
5242 		payload = &(cb_arg->payload);
5243 		if (cb_arg->cb) {
5244 			mbox_rsp = (sli4_res_common_read_transceiver_data_t*) payload->virt;
5245 			bytes_written = mbox_rsp->hdr.response_length;
5246 			if ((status == 0) && mbox_rsp->hdr.status) {
5247 				status = mbox_rsp->hdr.status;
5248 			}
5249 			cb_arg->cb(hw->os, status, bytes_written, mbox_rsp->page_data, cb_arg->arg);
5250 		}
5251 
5252 		ocs_dma_free(hw->os, &cb_arg->payload);
5253 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5254 	}
5255 
5256 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5257 	return 0;
5258 }
5259 
5260 /**
5261  * @ingroup io
5262  * @brief Function to retrieve the SFP information.
5263  *
5264  * @param hw Hardware context.
5265  * @param page The page of SFP data to retrieve (0xa0 or 0xa2).
5266  * @param cb Function call upon completion of sending the data (may be NULL).
5267  * @param arg Argument to pass to IO completion function.
5268  *
5269  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5270  */
5271 ocs_hw_rtn_e
5272 ocs_hw_get_sfp(ocs_hw_t *hw, uint16_t page, ocs_hw_sfp_cb_t cb, void *arg)
5273 {
5274 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5275 	ocs_hw_sfp_cb_arg_t *cb_arg;
5276 	uint8_t *mbxdata;
5277 
5278 	/* mbxdata holds the header of the command */
5279 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5280 	if (mbxdata == NULL) {
5281 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5282 		return OCS_HW_RTN_NO_MEMORY;
5283 	}
5284 
5285 	/* cb_arg holds the data that will be passed to the callback on completion */
5286 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_sfp_cb_arg_t), OCS_M_NOWAIT);
5287 	if (cb_arg == NULL) {
5288 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5289 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5290 		return OCS_HW_RTN_NO_MEMORY;
5291 	}
5292 
5293 	cb_arg->cb = cb;
5294 	cb_arg->arg = arg;
5295 
5296 	/* payload holds the non-embedded portion */
5297 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_read_transceiver_data_t),
5298 			  OCS_MIN_DMA_ALIGNMENT)) {
5299 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
5300 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5301 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5302 		return OCS_HW_RTN_NO_MEMORY;
5303 	}
5304 
5305 	/* Send the HW command */
5306 	if (sli_cmd_common_read_transceiver_data(&hw->sli, mbxdata, SLI4_BMBX_SIZE, page,
5307 	    &cb_arg->payload)) {
5308 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_sfp, cb_arg);
5309 	}
5310 
5311 	if (rc != OCS_HW_RTN_SUCCESS) {
5312 		ocs_log_test(hw->os, "READ_TRANSCEIVER_DATA failed with status %d\n",
5313 				rc);
5314 		ocs_dma_free(hw->os, &cb_arg->payload);
5315 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_sfp_cb_arg_t));
5316 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5317 	}
5318 
5319 	return rc;
5320 }
5321 
5322 /**
5323  * @brief Function to retrieve the temperature information.
5324  *
5325  * @param hw Hardware context.
5326  * @param cb Function call upon completion of sending the data (may be NULL).
5327  * @param arg Argument to pass to IO completion function.
5328  *
5329  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5330  */
5331 ocs_hw_rtn_e
5332 ocs_hw_get_temperature(ocs_hw_t *hw, ocs_hw_temp_cb_t cb, void *arg)
5333 {
5334 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5335 	ocs_hw_temp_cb_arg_t *cb_arg;
5336 	uint8_t *mbxdata;
5337 
5338 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5339 	if (mbxdata == NULL) {
5340 		ocs_log_err(hw->os, "failed to malloc mbox");
5341 		return OCS_HW_RTN_NO_MEMORY;
5342 	}
5343 
5344 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_temp_cb_arg_t), OCS_M_NOWAIT);
5345 	if (cb_arg == NULL) {
5346 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5347 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5348 		return OCS_HW_RTN_NO_MEMORY;
5349 	}
5350 
5351 	cb_arg->cb = cb;
5352 	cb_arg->arg = arg;
5353 
5354 	if (sli_cmd_dump_type4(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5355 				SLI4_WKI_TAG_SAT_TEM)) {
5356 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_temp, cb_arg);
5357 	}
5358 
5359 	if (rc != OCS_HW_RTN_SUCCESS) {
5360 		ocs_log_test(hw->os, "DUMP_TYPE4 failed\n");
5361 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5362 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5363 	}
5364 
5365 	return rc;
5366 }
5367 
5368 /**
5369  * @brief Called when the DUMP command completes.
5370  *
5371  * @par Description
5372  * Get the temperature data out of the response, free the mailbox that was malloc'd
5373  * by ocs_hw_get_temperature(), then call the callback and pass the status and data.
5374  *
5375  * @param hw Hardware context.
5376  * @param status Status field from the mbox completion.
5377  * @param mqe Mailbox response structure.
5378  * @param arg Pointer to a callback function that signals the caller that the command is done.
5379  * The callback function prototype is defined by ocs_hw_temp_cb_t.
5380  *
5381  * @return Returns 0.
5382  */
5383 static int32_t
5384 ocs_hw_cb_temp(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5385 {
5386 
5387 	sli4_cmd_dump4_t* mbox_rsp = (sli4_cmd_dump4_t*) mqe;
5388 	ocs_hw_temp_cb_arg_t *cb_arg = arg;
5389 	uint32_t curr_temp = mbox_rsp->resp_data[0]; /* word 5 */
5390 	uint32_t crit_temp_thrshld = mbox_rsp->resp_data[1]; /* word 6*/
5391 	uint32_t warn_temp_thrshld = mbox_rsp->resp_data[2]; /* word 7 */
5392 	uint32_t norm_temp_thrshld = mbox_rsp->resp_data[3]; /* word 8 */
5393 	uint32_t fan_off_thrshld = mbox_rsp->resp_data[4];   /* word 9 */
5394 	uint32_t fan_on_thrshld = mbox_rsp->resp_data[5];    /* word 10 */
5395 
5396 	if (cb_arg) {
5397 		if (cb_arg->cb) {
5398 			if ((status == 0) && mbox_rsp->hdr.status) {
5399 				status = mbox_rsp->hdr.status;
5400 			}
5401 			cb_arg->cb(status,
5402 				   curr_temp,
5403 				   crit_temp_thrshld,
5404 				   warn_temp_thrshld,
5405 				   norm_temp_thrshld,
5406 				   fan_off_thrshld,
5407 				   fan_on_thrshld,
5408 				   cb_arg->arg);
5409 		}
5410 
5411 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_temp_cb_arg_t));
5412 	}
5413 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5414 
5415 	return 0;
5416 }
5417 
5418 /**
5419  * @brief Function to retrieve the link statistics.
5420  *
5421  * @param hw Hardware context.
5422  * @param req_ext_counters If TRUE, then the extended counters will be requested.
5423  * @param clear_overflow_flags If TRUE, then overflow flags will be cleared.
5424  * @param clear_all_counters If TRUE, the counters will be cleared.
5425  * @param cb Function call upon completion of sending the data (may be NULL).
5426  * @param arg Argument to pass to IO completion function.
5427  *
5428  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5429  */
5430 ocs_hw_rtn_e
5431 ocs_hw_get_link_stats(ocs_hw_t *hw,
5432 			uint8_t req_ext_counters,
5433 			uint8_t clear_overflow_flags,
5434 			uint8_t clear_all_counters,
5435 			ocs_hw_link_stat_cb_t cb,
5436 			void *arg)
5437 {
5438 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5439 	ocs_hw_link_stat_cb_arg_t *cb_arg;
5440 	uint8_t *mbxdata;
5441 
5442 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5443 	if (mbxdata == NULL) {
5444 		ocs_log_err(hw->os, "failed to malloc mbox");
5445 		return OCS_HW_RTN_NO_MEMORY;
5446 	}
5447 
5448 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_link_stat_cb_arg_t), OCS_M_NOWAIT);
5449 	if (cb_arg == NULL) {
5450 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5451 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5452 		return OCS_HW_RTN_NO_MEMORY;
5453 	}
5454 
5455 	cb_arg->cb = cb;
5456 	cb_arg->arg = arg;
5457 
5458 	if (sli_cmd_read_link_stats(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
5459 				    req_ext_counters,
5460 				    clear_overflow_flags,
5461 				    clear_all_counters)) {
5462 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_link_stat, cb_arg);
5463 	}
5464 
5465 	if (rc != OCS_HW_RTN_SUCCESS) {
5466 		ocs_log_test(hw->os, "READ_LINK_STATS failed\n");
5467 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5468 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5469 	}
5470 
5471 	return rc;
5472 }
5473 
5474 /**
5475  * @brief Called when the READ_LINK_STAT command completes.
5476  *
5477  * @par Description
5478  * Get the counters out of the response, free the mailbox that was malloc'd
5479  * by ocs_hw_get_link_stats(), then call the callback and pass the status and data.
5480  *
5481  * @param hw Hardware context.
5482  * @param status Status field from the mbox completion.
5483  * @param mqe Mailbox response structure.
5484  * @param arg Pointer to a callback function that signals the caller that the command is done.
5485  * The callback function prototype is defined by ocs_hw_link_stat_cb_t.
5486  *
5487  * @return Returns 0.
5488  */
5489 static int32_t
5490 ocs_hw_cb_link_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5491 {
5492 
5493 	sli4_cmd_read_link_stats_t* mbox_rsp = (sli4_cmd_read_link_stats_t*) mqe;
5494 	ocs_hw_link_stat_cb_arg_t *cb_arg = arg;
5495 	ocs_hw_link_stat_counts_t counts[OCS_HW_LINK_STAT_MAX];
5496 	uint32_t num_counters = (mbox_rsp->gec ? 20 : 13);
5497 
5498 	ocs_memset(counts, 0, sizeof(ocs_hw_link_stat_counts_t) *
5499 		   OCS_HW_LINK_STAT_MAX);
5500 
5501 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].overflow = mbox_rsp->w02of;
5502 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].overflow = mbox_rsp->w03of;
5503 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].overflow = mbox_rsp->w04of;
5504 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].overflow = mbox_rsp->w05of;
5505 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].overflow = mbox_rsp->w06of;
5506 	counts[OCS_HW_LINK_STAT_CRC_COUNT].overflow = mbox_rsp->w07of;
5507 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].overflow = mbox_rsp->w08of;
5508 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].overflow = mbox_rsp->w09of;
5509 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].overflow = mbox_rsp->w10of;
5510 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].overflow = mbox_rsp->w11of;
5511 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].overflow = mbox_rsp->w12of;
5512 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].overflow = mbox_rsp->w13of;
5513 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].overflow = mbox_rsp->w14of;
5514 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].overflow = mbox_rsp->w15of;
5515 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].overflow = mbox_rsp->w16of;
5516 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].overflow = mbox_rsp->w17of;
5517 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].overflow = mbox_rsp->w18of;
5518 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].overflow = mbox_rsp->w19of;
5519 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].overflow = mbox_rsp->w20of;
5520 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].overflow = mbox_rsp->w21of;
5521 
5522 	counts[OCS_HW_LINK_STAT_LINK_FAILURE_COUNT].counter = mbox_rsp->link_failure_error_count;
5523 	counts[OCS_HW_LINK_STAT_LOSS_OF_SYNC_COUNT].counter = mbox_rsp->loss_of_sync_error_count;
5524 	counts[OCS_HW_LINK_STAT_LOSS_OF_SIGNAL_COUNT].counter = mbox_rsp->loss_of_signal_error_count;
5525 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_COUNT].counter = mbox_rsp->primitive_sequence_error_count;
5526 	counts[OCS_HW_LINK_STAT_INVALID_XMIT_WORD_COUNT].counter = mbox_rsp->invalid_transmission_word_error_count;
5527 	counts[OCS_HW_LINK_STAT_CRC_COUNT].counter = mbox_rsp->crc_error_count;
5528 	counts[OCS_HW_LINK_STAT_PRIMITIVE_SEQ_TIMEOUT_COUNT].counter = mbox_rsp->primitive_sequence_event_timeout_count;
5529 	counts[OCS_HW_LINK_STAT_ELASTIC_BUFFER_OVERRUN_COUNT].counter = mbox_rsp->elastic_buffer_overrun_error_count;
5530 	counts[OCS_HW_LINK_STAT_ARB_TIMEOUT_COUNT].counter = mbox_rsp->arbitration_fc_al_timout_count;
5531 	counts[OCS_HW_LINK_STAT_ADVERTISED_RCV_B2B_CREDIT].counter = mbox_rsp->advertised_receive_bufftor_to_buffer_credit;
5532 	counts[OCS_HW_LINK_STAT_CURR_RCV_B2B_CREDIT].counter = mbox_rsp->current_receive_buffer_to_buffer_credit;
5533 	counts[OCS_HW_LINK_STAT_ADVERTISED_XMIT_B2B_CREDIT].counter = mbox_rsp->advertised_transmit_buffer_to_buffer_credit;
5534 	counts[OCS_HW_LINK_STAT_CURR_XMIT_B2B_CREDIT].counter = mbox_rsp->current_transmit_buffer_to_buffer_credit;
5535 	counts[OCS_HW_LINK_STAT_RCV_EOFA_COUNT].counter = mbox_rsp->received_eofa_count;
5536 	counts[OCS_HW_LINK_STAT_RCV_EOFDTI_COUNT].counter = mbox_rsp->received_eofdti_count;
5537 	counts[OCS_HW_LINK_STAT_RCV_EOFNI_COUNT].counter = mbox_rsp->received_eofni_count;
5538 	counts[OCS_HW_LINK_STAT_RCV_SOFF_COUNT].counter = mbox_rsp->received_soff_count;
5539 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_AER_COUNT].counter = mbox_rsp->received_dropped_no_aer_count;
5540 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_RPI_COUNT].counter = mbox_rsp->received_dropped_no_available_rpi_resources_count;
5541 	counts[OCS_HW_LINK_STAT_RCV_DROPPED_NO_XRI_COUNT].counter = mbox_rsp->received_dropped_no_available_xri_resources_count;
5542 
5543 	if (cb_arg) {
5544 		if (cb_arg->cb) {
5545 			if ((status == 0) && mbox_rsp->hdr.status) {
5546 				status = mbox_rsp->hdr.status;
5547 			}
5548 			cb_arg->cb(status,
5549 				   num_counters,
5550 				   counts,
5551 				   cb_arg->arg);
5552 		}
5553 
5554 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_link_stat_cb_arg_t));
5555 	}
5556 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5557 
5558 	return 0;
5559 }
5560 
5561 /**
5562  * @brief Function to retrieve the link and host statistics.
5563  *
5564  * @param hw Hardware context.
5565  * @param cc clear counters, if TRUE all counters will be cleared.
5566  * @param cb Function call upon completion of receiving the data.
5567  * @param arg Argument to pass to pointer fc hosts statistics structure.
5568  *
5569  * @return Returns OCS_HW_RTN_SUCCESS, OCS_HW_RTN_ERROR, or OCS_HW_RTN_NO_MEMORY.
5570  */
5571 ocs_hw_rtn_e
5572 ocs_hw_get_host_stats(ocs_hw_t *hw, uint8_t cc, ocs_hw_host_stat_cb_t cb, void *arg)
5573 {
5574 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
5575 	ocs_hw_host_stat_cb_arg_t *cb_arg;
5576 	uint8_t *mbxdata;
5577 
5578 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO);
5579 	if (mbxdata == NULL) {
5580 		ocs_log_err(hw->os, "failed to malloc mbox");
5581 		return OCS_HW_RTN_NO_MEMORY;
5582 	}
5583 
5584 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_host_stat_cb_arg_t), 0);
5585 	if (cb_arg == NULL) {
5586 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5587 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5588 		return OCS_HW_RTN_NO_MEMORY;
5589 	 }
5590 
5591 	 cb_arg->cb = cb;
5592 	 cb_arg->arg = arg;
5593 
5594 	 /* Send the HW command to get the host stats */
5595 	if (sli_cmd_read_status(&hw->sli, mbxdata, SLI4_BMBX_SIZE, cc)) {
5596 		 rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_cb_host_stat, cb_arg);
5597 	}
5598 
5599 	if (rc != OCS_HW_RTN_SUCCESS) {
5600 		ocs_log_test(hw->os, "READ_HOST_STATS failed\n");
5601 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5602 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5603 	}
5604 
5605 	return rc;
5606 }
5607 
5608 
5609 /**
5610  * @brief Called when the READ_STATUS command completes.
5611  *
5612  * @par Description
5613  * Get the counters out of the response, free the mailbox that was malloc'd
5614  * by ocs_hw_get_host_stats(), then call the callback and pass
5615  * the status and data.
5616  *
5617  * @param hw Hardware context.
5618  * @param status Status field from the mbox completion.
5619  * @param mqe Mailbox response structure.
5620  * @param arg Pointer to a callback function that signals the caller that the command is done.
5621  * The callback function prototype is defined by
5622  * ocs_hw_host_stat_cb_t.
5623  *
5624  * @return Returns 0.
5625  */
5626 static int32_t
5627 ocs_hw_cb_host_stat(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5628 {
5629 
5630 	sli4_cmd_read_status_t* mbox_rsp = (sli4_cmd_read_status_t*) mqe;
5631 	ocs_hw_host_stat_cb_arg_t *cb_arg = arg;
5632 	ocs_hw_host_stat_counts_t counts[OCS_HW_HOST_STAT_MAX];
5633 	uint32_t num_counters = OCS_HW_HOST_STAT_MAX;
5634 
5635 	ocs_memset(counts, 0, sizeof(ocs_hw_host_stat_counts_t) *
5636 		   OCS_HW_HOST_STAT_MAX);
5637 
5638 	counts[OCS_HW_HOST_STAT_TX_KBYTE_COUNT].counter = mbox_rsp->transmit_kbyte_count;
5639 	counts[OCS_HW_HOST_STAT_RX_KBYTE_COUNT].counter = mbox_rsp->receive_kbyte_count;
5640 	counts[OCS_HW_HOST_STAT_TX_FRAME_COUNT].counter = mbox_rsp->transmit_frame_count;
5641 	counts[OCS_HW_HOST_STAT_RX_FRAME_COUNT].counter = mbox_rsp->receive_frame_count;
5642 	counts[OCS_HW_HOST_STAT_TX_SEQ_COUNT].counter = mbox_rsp->transmit_sequence_count;
5643 	counts[OCS_HW_HOST_STAT_RX_SEQ_COUNT].counter = mbox_rsp->receive_sequence_count;
5644 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_ORIG].counter = mbox_rsp->total_exchanges_originator;
5645 	counts[OCS_HW_HOST_STAT_TOTAL_EXCH_RESP].counter = mbox_rsp->total_exchanges_responder;
5646 	counts[OCS_HW_HOSY_STAT_RX_P_BSY_COUNT].counter = mbox_rsp->receive_p_bsy_count;
5647 	counts[OCS_HW_HOST_STAT_RX_F_BSY_COUNT].counter = mbox_rsp->receive_f_bsy_count;
5648 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_RQ_BUF_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_rq_buffer_count;
5649 	counts[OCS_HW_HOST_STAT_EMPTY_RQ_TIMEOUT_COUNT].counter = mbox_rsp->empty_rq_timeout_count;
5650 	counts[OCS_HW_HOST_STAT_DROP_FRM_DUE_TO_NO_XRI_COUNT].counter = mbox_rsp->dropped_frames_due_to_no_xri_count;
5651 	counts[OCS_HW_HOST_STAT_EMPTY_XRI_POOL_COUNT].counter = mbox_rsp->empty_xri_pool_count;
5652 
5653 
5654 	if (cb_arg) {
5655 		if (cb_arg->cb) {
5656 			if ((status == 0) && mbox_rsp->hdr.status) {
5657 				status = mbox_rsp->hdr.status;
5658 			}
5659 			cb_arg->cb(status,
5660 				   num_counters,
5661 				   counts,
5662 				   cb_arg->arg);
5663 		}
5664 
5665 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_host_stat_cb_arg_t));
5666 	}
5667 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
5668 
5669 	return 0;
5670 }
5671 
5672 /**
5673  * @brief HW link configuration enum to the CLP string value mapping.
5674  *
5675  * This structure provides a mapping from the ocs_hw_linkcfg_e
5676  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5677  * control) to the CLP string that is used
5678  * in the DMTF_CLP_CMD mailbox command.
5679  */
5680 typedef struct ocs_hw_linkcfg_map_s {
5681 	ocs_hw_linkcfg_e linkcfg;
5682 	const char *clp_str;
5683 } ocs_hw_linkcfg_map_t;
5684 
5685 /**
5686  * @brief Mapping from the HW linkcfg enum to the CLP command value
5687  * string.
5688  */
5689 static ocs_hw_linkcfg_map_t linkcfg_map[] = {
5690 	{OCS_HW_LINKCFG_4X10G, "ELX_4x10G"},
5691 	{OCS_HW_LINKCFG_1X40G, "ELX_1x40G"},
5692 	{OCS_HW_LINKCFG_2X16G, "ELX_2x16G"},
5693 	{OCS_HW_LINKCFG_4X8G, "ELX_4x8G"},
5694 	{OCS_HW_LINKCFG_4X1G, "ELX_4x1G"},
5695 	{OCS_HW_LINKCFG_2X10G, "ELX_2x10G"},
5696 	{OCS_HW_LINKCFG_2X10G_2X8G, "ELX_2x10G_2x8G"}};
5697 
5698 /**
5699  * @brief HW link configuration enum to Skyhawk link config ID mapping.
5700  *
5701  * This structure provides a mapping from the ocs_hw_linkcfg_e
5702  * enum (enum exposed for the OCS_HW_PORT_SET_LINK_CONFIG port
5703  * control) to the link config ID numbers used by Skyhawk
5704  */
5705 typedef struct ocs_hw_skyhawk_linkcfg_map_s {
5706 	ocs_hw_linkcfg_e linkcfg;
5707 	uint32_t	config_id;
5708 } ocs_hw_skyhawk_linkcfg_map_t;
5709 
5710 /**
5711  * @brief Mapping from the HW linkcfg enum to the Skyhawk link config IDs
5712  */
5713 static ocs_hw_skyhawk_linkcfg_map_t skyhawk_linkcfg_map[] = {
5714 	{OCS_HW_LINKCFG_4X10G, 0x0a},
5715 	{OCS_HW_LINKCFG_1X40G, 0x09},
5716 };
5717 
5718 /**
5719  * @brief Helper function for getting the HW linkcfg enum from the CLP
5720  * string value
5721  *
5722  * @param clp_str CLP string value from OEMELX_LinkConfig.
5723  *
5724  * @return Returns the HW linkcfg enum corresponding to clp_str.
5725  */
5726 static ocs_hw_linkcfg_e
5727 ocs_hw_linkcfg_from_clp(const char *clp_str)
5728 {
5729 	uint32_t i;
5730 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5731 		if (ocs_strncmp(linkcfg_map[i].clp_str, clp_str, ocs_strlen(clp_str)) == 0) {
5732 			return linkcfg_map[i].linkcfg;
5733 		}
5734 	}
5735 	return OCS_HW_LINKCFG_NA;
5736 }
5737 
5738 /**
5739  * @brief Helper function for getting the CLP string value from the HW
5740  * linkcfg enum.
5741  *
5742  * @param linkcfg HW linkcfg enum.
5743  *
5744  * @return Returns the OEMELX_LinkConfig CLP string value corresponding to
5745  * given linkcfg.
5746  */
5747 static const char *
5748 ocs_hw_clp_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5749 {
5750 	uint32_t i;
5751 	for (i = 0; i < ARRAY_SIZE(linkcfg_map); i++) {
5752 		if (linkcfg_map[i].linkcfg == linkcfg) {
5753 			return linkcfg_map[i].clp_str;
5754 		}
5755 	}
5756 	return NULL;
5757 }
5758 
5759 /**
5760  * @brief Helper function for getting a Skyhawk link config ID from the HW
5761  * linkcfg enum.
5762  *
5763  * @param linkcfg HW linkcfg enum.
5764  *
5765  * @return Returns the Skyhawk link config ID corresponding to
5766  * given linkcfg.
5767  */
5768 static uint32_t
5769 ocs_hw_config_id_from_linkcfg(ocs_hw_linkcfg_e linkcfg)
5770 {
5771 	uint32_t i;
5772 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5773 		if (skyhawk_linkcfg_map[i].linkcfg == linkcfg) {
5774 			return skyhawk_linkcfg_map[i].config_id;
5775 		}
5776 	}
5777 	return 0;
5778 }
5779 
5780 /**
5781  * @brief Helper function for getting the HW linkcfg enum from a
5782  * Skyhawk config ID.
5783  *
5784  * @param config_id Skyhawk link config ID.
5785  *
5786  * @return Returns the HW linkcfg enum corresponding to config_id.
5787  */
5788 static ocs_hw_linkcfg_e
5789 ocs_hw_linkcfg_from_config_id(const uint32_t config_id)
5790 {
5791 	uint32_t i;
5792 	for (i = 0; i < ARRAY_SIZE(skyhawk_linkcfg_map); i++) {
5793 		if (skyhawk_linkcfg_map[i].config_id == config_id) {
5794 			return skyhawk_linkcfg_map[i].linkcfg;
5795 		}
5796 	}
5797 	return OCS_HW_LINKCFG_NA;
5798 }
5799 
5800 /**
5801  * @brief Link configuration callback argument.
5802  */
5803 typedef struct ocs_hw_linkcfg_cb_arg_s {
5804 	ocs_hw_port_control_cb_t cb;
5805 	void *arg;
5806 	uint32_t opts;
5807 	int32_t status;
5808 	ocs_dma_t dma_cmd;
5809 	ocs_dma_t dma_resp;
5810 	uint32_t result_len;
5811 } ocs_hw_linkcfg_cb_arg_t;
5812 
5813 /**
5814  * @brief Set link configuration.
5815  *
5816  * @param hw Hardware context.
5817  * @param value Link configuration enum to which the link configuration is
5818  * set.
5819  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5820  * @param cb Callback function to invoke following mbx command.
5821  * @param arg Callback argument.
5822  *
5823  * @return Returns OCS_HW_RTN_SUCCESS on success.
5824  */
5825 static ocs_hw_rtn_e
5826 ocs_hw_set_linkcfg(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5827 {
5828 	if (!sli_link_is_configurable(&hw->sli)) {
5829 		ocs_log_debug(hw->os, "Function not supported\n");
5830 		return OCS_HW_RTN_ERROR;
5831 	}
5832 
5833 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
5834 		return ocs_hw_set_linkcfg_lancer(hw, value, opts, cb, arg);
5835 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
5836 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
5837 		return ocs_hw_set_linkcfg_skyhawk(hw, value, opts, cb, arg);
5838 	} else {
5839 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
5840 		return OCS_HW_RTN_ERROR;
5841 	}
5842 }
5843 
5844 /**
5845  * @brief Set link configuration for Lancer
5846  *
5847  * @param hw Hardware context.
5848  * @param value Link configuration enum to which the link configuration is
5849  * set.
5850  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5851  * @param cb Callback function to invoke following mbx command.
5852  * @param arg Callback argument.
5853  *
5854  * @return Returns OCS_HW_RTN_SUCCESS on success.
5855  */
5856 static ocs_hw_rtn_e
5857 ocs_hw_set_linkcfg_lancer(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5858 {
5859 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
5860 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5861 	const char *value_str = NULL;
5862 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5863 
5864 	/* translate ocs_hw_linkcfg_e to CLP string */
5865 	value_str = ocs_hw_clp_from_linkcfg(value);
5866 
5867 	/* allocate memory for callback argument */
5868 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
5869 	if (cb_arg == NULL) {
5870 		ocs_log_err(hw->os, "failed to malloc cb_arg");
5871 		return OCS_HW_RTN_NO_MEMORY;
5872 	}
5873 
5874 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_LinkConfig=%s", value_str);
5875 	/* allocate DMA for command  */
5876 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
5877 		ocs_log_err(hw->os, "malloc failed\n");
5878 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5879 		return OCS_HW_RTN_NO_MEMORY;
5880 	}
5881 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
5882 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
5883 
5884 	/* allocate DMA for response */
5885 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
5886 		ocs_log_err(hw->os, "malloc failed\n");
5887 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5888 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5889 		return OCS_HW_RTN_NO_MEMORY;
5890 	}
5891 	cb_arg->cb = cb;
5892 	cb_arg->arg = arg;
5893 	cb_arg->opts = opts;
5894 
5895 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
5896 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
5897 
5898 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
5899 		/* if failed, or polling, free memory here; if success and not
5900 		 * polling, will free in callback function
5901 		 */
5902 		if (rc) {
5903 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
5904 					(char *)cb_arg->dma_cmd.virt);
5905 		}
5906 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
5907 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
5908 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5909 	}
5910 	return rc;
5911 }
5912 
5913 /**
5914  * @brief Callback for ocs_hw_set_linkcfg_skyhawk
5915  *
5916  * @param hw Hardware context.
5917  * @param status Status from the RECONFIG_GET_LINK_INFO command.
5918  * @param mqe Mailbox response structure.
5919  * @param arg Pointer to a callback argument.
5920  *
5921  * @return none
5922  */
5923 static void
5924 ocs_hw_set_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
5925 {
5926 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
5927 
5928 	if (status) {
5929 		ocs_log_test(hw->os, "SET_RECONFIG_LINK_ID failed, status=%d\n", status);
5930 	}
5931 
5932 	/* invoke callback */
5933 	if (cb_arg->cb) {
5934 		cb_arg->cb(status, 0, cb_arg->arg);
5935 	}
5936 
5937 	/* if polling, will free memory in calling function */
5938 	if (cb_arg->opts != OCS_CMD_POLL) {
5939 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
5940 	}
5941 }
5942 
5943 /**
5944  * @brief Set link configuration for a Skyhawk
5945  *
5946  * @param hw Hardware context.
5947  * @param value Link configuration enum to which the link configuration is
5948  * set.
5949  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
5950  * @param cb Callback function to invoke following mbx command.
5951  * @param arg Callback argument.
5952  *
5953  * @return Returns OCS_HW_RTN_SUCCESS on success.
5954  */
5955 static ocs_hw_rtn_e
5956 ocs_hw_set_linkcfg_skyhawk(ocs_hw_t *hw, ocs_hw_linkcfg_e value, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
5957 {
5958 	uint8_t *mbxdata;
5959 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
5960 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
5961 	uint32_t config_id;
5962 
5963 	config_id = ocs_hw_config_id_from_linkcfg(value);
5964 
5965 	if (config_id == 0) {
5966 		ocs_log_test(hw->os, "Link config %d not supported by Skyhawk\n", value);
5967 		return OCS_HW_RTN_ERROR;
5968 	}
5969 
5970 	/* mbxdata holds the header of the command */
5971 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
5972 	if (mbxdata == NULL) {
5973 		ocs_log_err(hw->os, "failed to malloc mbox\n");
5974 		return OCS_HW_RTN_NO_MEMORY;
5975 	}
5976 
5977 	/* cb_arg holds the data that will be passed to the callback on completion */
5978 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
5979 	if (cb_arg == NULL) {
5980 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
5981 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5982 		return OCS_HW_RTN_NO_MEMORY;
5983 	}
5984 
5985 	cb_arg->cb = cb;
5986 	cb_arg->arg = arg;
5987 
5988 	if (sli_cmd_common_set_reconfig_link_id(&hw->sli, mbxdata, SLI4_BMBX_SIZE, NULL, 0, config_id)) {
5989 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_set_active_link_config_cb, cb_arg);
5990 	}
5991 
5992 	if (rc != OCS_HW_RTN_SUCCESS) {
5993 		ocs_log_err(hw->os, "SET_RECONFIG_LINK_ID failed\n");
5994 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
5995 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
5996 	} else if (opts == OCS_CMD_POLL) {
5997 		/* if we're polling we have to call the callback here. */
5998 		ocs_hw_set_active_link_config_cb(hw, 0, mbxdata, cb_arg);
5999 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6000 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6001 	} else {
6002 		/* We weren't poling, so the callback got called */
6003 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6004 	}
6005 
6006 	return rc;
6007 }
6008 
6009 /**
6010  * @brief Get link configuration.
6011  *
6012  * @param hw Hardware context.
6013  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6014  * @param cb Callback function to invoke following mbx command.
6015  * @param arg Callback argument.
6016  *
6017  * @return Returns OCS_HW_RTN_SUCCESS on success.
6018  */
6019 static ocs_hw_rtn_e
6020 ocs_hw_get_linkcfg(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6021 {
6022 	if (!sli_link_is_configurable(&hw->sli)) {
6023 		ocs_log_debug(hw->os, "Function not supported\n");
6024 		return OCS_HW_RTN_ERROR;
6025 	}
6026 
6027 	if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
6028 		return ocs_hw_get_linkcfg_lancer(hw, opts, cb, arg);
6029 	} else if ((SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) ||
6030 		   (SLI4_IF_TYPE_BE3_SKH_VF == sli_get_if_type(&hw->sli))) {
6031 		return ocs_hw_get_linkcfg_skyhawk(hw, opts, cb, arg);
6032 	} else {
6033 		ocs_log_test(hw->os, "Function not supported for this IF_TYPE\n");
6034 		return OCS_HW_RTN_ERROR;
6035 	}
6036 }
6037 
6038 /**
6039  * @brief Get link configuration for a Lancer
6040  *
6041  * @param hw Hardware context.
6042  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6043  * @param cb Callback function to invoke following mbx command.
6044  * @param arg Callback argument.
6045  *
6046  * @return Returns OCS_HW_RTN_SUCCESS on success.
6047  */
6048 static ocs_hw_rtn_e
6049 ocs_hw_get_linkcfg_lancer(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6050 {
6051 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6052 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6053 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6054 
6055 	/* allocate memory for callback argument */
6056 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6057 	if (cb_arg == NULL) {
6058 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6059 		return OCS_HW_RTN_NO_MEMORY;
6060 	}
6061 
6062 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "show / OEMELX_LinkConfig");
6063 
6064 	/* allocate DMA for command  */
6065 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6066 		ocs_log_err(hw->os, "malloc failed\n");
6067 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6068 		return OCS_HW_RTN_NO_MEMORY;
6069 	}
6070 
6071 	/* copy CLP command to DMA command */
6072 	ocs_memset(cb_arg->dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6073 	ocs_memcpy(cb_arg->dma_cmd.virt, cmd, ocs_strlen(cmd));
6074 
6075 	/* allocate DMA for response */
6076 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6077 		ocs_log_err(hw->os, "malloc failed\n");
6078 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6079 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6080 		return OCS_HW_RTN_NO_MEMORY;
6081 	}
6082 	cb_arg->cb = cb;
6083 	cb_arg->arg = arg;
6084 	cb_arg->opts = opts;
6085 
6086 	rc = ocs_hw_exec_dmtf_clp_cmd(hw, &cb_arg->dma_cmd, &cb_arg->dma_resp,
6087 					opts, ocs_hw_linkcfg_dmtf_clp_cb, cb_arg);
6088 
6089 	if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6090 		/* if failed or polling, free memory here; if not polling and success,
6091 		 * will free in callback function
6092 		 */
6093 		if (rc) {
6094 			ocs_log_test(hw->os, "CLP cmd=\"%s\" failed\n",
6095 					(char *)cb_arg->dma_cmd.virt);
6096 		}
6097 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6098 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6099 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6100 	}
6101 	return rc;
6102 }
6103 
6104 
6105 /**
6106  * @brief Get the link configuration callback.
6107  *
6108  * @param hw Hardware context.
6109  * @param status Status from the RECONFIG_GET_LINK_INFO command.
6110  * @param mqe Mailbox response structure.
6111  * @param arg Pointer to a callback argument.
6112  *
6113  * @return none
6114  */
6115 static void
6116 ocs_hw_get_active_link_config_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6117 {
6118 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6119 	sli4_res_common_get_reconfig_link_info_t *rsp = cb_arg->dma_cmd.virt;
6120 	ocs_hw_linkcfg_e value = OCS_HW_LINKCFG_NA;
6121 
6122 	if (status) {
6123 		ocs_log_test(hw->os, "GET_RECONFIG_LINK_INFO failed, status=%d\n", status);
6124 	} else {
6125 		/* Call was successful */
6126 		value = ocs_hw_linkcfg_from_config_id(rsp->active_link_config_id);
6127 	}
6128 
6129 	/* invoke callback */
6130 	if (cb_arg->cb) {
6131 		cb_arg->cb(status, value, cb_arg->arg);
6132 	}
6133 
6134 	/* if polling, will free memory in calling function */
6135 	if (cb_arg->opts != OCS_CMD_POLL) {
6136 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6137 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6138 	}
6139 }
6140 
6141 /**
6142  * @brief Get link configuration for a Skyhawk.
6143  *
6144  * @param hw Hardware context.
6145  * @param opts Mailbox command options (OCS_CMD_NOWAIT/POLL).
6146  * @param cb Callback function to invoke following mbx command.
6147  * @param arg Callback argument.
6148  *
6149  * @return Returns OCS_HW_RTN_SUCCESS on success.
6150  */
6151 static ocs_hw_rtn_e
6152 ocs_hw_get_linkcfg_skyhawk(ocs_hw_t *hw, uint32_t opts, ocs_hw_port_control_cb_t cb, void *arg)
6153 {
6154 	uint8_t *mbxdata;
6155 	ocs_hw_linkcfg_cb_arg_t *cb_arg;
6156 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6157 
6158 	/* mbxdata holds the header of the command */
6159 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6160 	if (mbxdata == NULL) {
6161 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6162 		return OCS_HW_RTN_NO_MEMORY;
6163 	}
6164 
6165 	/* cb_arg holds the data that will be passed to the callback on completion */
6166 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_linkcfg_cb_arg_t), OCS_M_NOWAIT);
6167 	if (cb_arg == NULL) {
6168 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
6169 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6170 		return OCS_HW_RTN_NO_MEMORY;
6171 	}
6172 
6173 	cb_arg->cb = cb;
6174 	cb_arg->arg = arg;
6175 	cb_arg->opts = opts;
6176 
6177 	/* dma_mem holds the non-embedded portion */
6178 	if (ocs_dma_alloc(hw->os, &cb_arg->dma_cmd, sizeof(sli4_res_common_get_reconfig_link_info_t), 4)) {
6179 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
6180 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6181 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6182 		return OCS_HW_RTN_NO_MEMORY;
6183 	}
6184 
6185 	if (sli_cmd_common_get_reconfig_link_info(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->dma_cmd)) {
6186 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_get_active_link_config_cb, cb_arg);
6187 	}
6188 
6189 	if (rc != OCS_HW_RTN_SUCCESS) {
6190 		ocs_log_err(hw->os, "GET_RECONFIG_LINK_INFO failed\n");
6191 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6192 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6193 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6194 	} else if (opts == OCS_CMD_POLL) {
6195 		/* if we're polling we have to call the callback here. */
6196 		ocs_hw_get_active_link_config_cb(hw, 0, mbxdata, cb_arg);
6197 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6198 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6199 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_linkcfg_cb_arg_t));
6200 	} else {
6201 		/* We weren't poling, so the callback got called */
6202 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6203 	}
6204 
6205 	return rc;
6206 }
6207 
6208 /**
6209  * @brief Sets the DIF seed value.
6210  *
6211  * @param hw Hardware context.
6212  *
6213  * @return Returns OCS_HW_RTN_SUCCESS on success.
6214  */
6215 static ocs_hw_rtn_e
6216 ocs_hw_set_dif_seed(ocs_hw_t *hw)
6217 {
6218 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6219 	uint8_t buf[SLI4_BMBX_SIZE];
6220 	sli4_req_common_set_features_dif_seed_t seed_param;
6221 
6222 	ocs_memset(&seed_param, 0, sizeof(seed_param));
6223 	seed_param.seed = hw->config.dif_seed;
6224 
6225 	/* send set_features command */
6226 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6227 					SLI4_SET_FEATURES_DIF_SEED,
6228 					4,
6229 					(uint32_t*)&seed_param)) {
6230 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6231 		if (rc) {
6232 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6233 		} else {
6234 			ocs_log_debug(hw->os, "DIF seed set to 0x%x\n",
6235 					hw->config.dif_seed);
6236 		}
6237 	} else {
6238 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6239 		rc = OCS_HW_RTN_ERROR;
6240 	}
6241 	return rc;
6242 }
6243 
6244 
6245 /**
6246  * @brief Sets the DIF mode value.
6247  *
6248  * @param hw Hardware context.
6249  *
6250  * @return Returns OCS_HW_RTN_SUCCESS on success.
6251  */
6252 static ocs_hw_rtn_e
6253 ocs_hw_set_dif_mode(ocs_hw_t *hw)
6254 {
6255 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6256 	uint8_t buf[SLI4_BMBX_SIZE];
6257 	sli4_req_common_set_features_t10_pi_mem_model_t mode_param;
6258 
6259 	ocs_memset(&mode_param, 0, sizeof(mode_param));
6260 	mode_param.tmm = (hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? 0 : 1);
6261 
6262 	/* send set_features command */
6263 	if (sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6264 					SLI4_SET_FEATURES_DIF_MEMORY_MODE,
6265 					sizeof(mode_param),
6266 					(uint32_t*)&mode_param)) {
6267 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6268 		if (rc) {
6269 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6270 		} else {
6271 			ocs_log_test(hw->os, "DIF mode set to %s\n",
6272 				(hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE ? "inline" : "separate"));
6273 		}
6274 	} else {
6275 		ocs_log_err(hw->os, "sli_cmd_common_set_features failed\n");
6276 		rc = OCS_HW_RTN_ERROR;
6277 	}
6278 	return rc;
6279 }
6280 
6281 static void
6282 ocs_hw_watchdog_timer_cb(void *arg)
6283 {
6284 	ocs_hw_t *hw = (ocs_hw_t *)arg;
6285 
6286 	ocs_hw_config_watchdog_timer(hw);
6287 	return;
6288 }
6289 
6290 static void
6291 ocs_hw_cb_cfg_watchdog(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6292 {
6293 	uint16_t timeout = hw->watchdog_timeout;
6294 
6295 	if (status != 0) {
6296 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", status);
6297 	} else {
6298 		if(timeout != 0) {
6299 			/* keeping callback 500ms before timeout to keep heartbeat alive */
6300 			ocs_setup_timer(hw->os, &hw->watchdog_timer, ocs_hw_watchdog_timer_cb, hw, (timeout*1000 - 500) );
6301 		}else {
6302 			ocs_del_timer(&hw->watchdog_timer);
6303 		}
6304 	}
6305 
6306 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6307 	return;
6308 }
6309 
6310 /**
6311  * @brief Set configuration parameters for watchdog timer feature.
6312  *
6313  * @param hw Hardware context.
6314  * @param timeout Timeout for watchdog timer in seconds
6315  *
6316  * @return Returns OCS_HW_RTN_SUCCESS on success.
6317  */
6318 static ocs_hw_rtn_e
6319 ocs_hw_config_watchdog_timer(ocs_hw_t *hw)
6320 {
6321 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6322 	uint8_t *buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
6323 
6324 	sli4_cmd_lowlevel_set_watchdog(&hw->sli, buf, SLI4_BMBX_SIZE, hw->watchdog_timeout);
6325 	rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_cfg_watchdog, NULL);
6326 	if (rc) {
6327 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
6328 		ocs_log_err(hw->os, "config watchdog timer failed, rc = %d\n", rc);
6329 	}
6330 	return rc;
6331 }
6332 
6333 /**
6334  * @brief Set configuration parameters for auto-generate xfer_rdy T10 PI feature.
6335  *
6336  * @param hw Hardware context.
6337  * @param buf Pointer to a mailbox buffer area.
6338  *
6339  * @return Returns OCS_HW_RTN_SUCCESS on success.
6340  */
6341 static ocs_hw_rtn_e
6342 ocs_hw_config_auto_xfer_rdy_t10pi(ocs_hw_t *hw, uint8_t *buf)
6343 {
6344 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6345 	sli4_req_common_set_features_xfer_rdy_t10pi_t param;
6346 
6347 	ocs_memset(&param, 0, sizeof(param));
6348 	param.rtc = (hw->config.auto_xfer_rdy_ref_tag_is_lba ? 0 : 1);
6349 	param.atv = (hw->config.auto_xfer_rdy_app_tag_valid ? 1 : 0);
6350 	param.tmm = ((hw->config.dif_mode == OCS_HW_DIF_MODE_INLINE) ? 0 : 1);
6351 	param.app_tag = hw->config.auto_xfer_rdy_app_tag_value;
6352 	param.blk_size = hw->config.auto_xfer_rdy_blk_size_chip;
6353 
6354 	switch (hw->config.auto_xfer_rdy_p_type) {
6355 	case 1:
6356 		param.p_type = 0;
6357 		break;
6358 	case 3:
6359 		param.p_type = 2;
6360 		break;
6361 	default:
6362 		ocs_log_err(hw->os, "unsupported p_type %d\n",
6363 			hw->config.auto_xfer_rdy_p_type);
6364 		return OCS_HW_RTN_ERROR;
6365 	}
6366 
6367 	/* build the set_features command */
6368 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6369 				    SLI4_SET_FEATURES_SET_CONFIG_AUTO_XFER_RDY_T10PI,
6370 				    sizeof(param),
6371 				    &param);
6372 
6373 
6374 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6375 	if (rc) {
6376 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6377 	} else {
6378 		ocs_log_test(hw->os, "Auto XFER RDY T10 PI configured rtc:%d atv:%d p_type:%d app_tag:%x blk_size:%d\n",
6379 				param.rtc, param.atv, param.p_type,
6380 				param.app_tag, param.blk_size);
6381 	}
6382 
6383 	return rc;
6384 }
6385 
6386 
6387 /**
6388  * @brief enable sli port health check
6389  *
6390  * @param hw Hardware context.
6391  * @param buf Pointer to a mailbox buffer area.
6392  * @param query current status of the health check feature enabled/disabled
6393  * @param enable if 1: enable 0: disable
6394  * @param buf Pointer to a mailbox buffer area.
6395  *
6396  * @return Returns OCS_HW_RTN_SUCCESS on success.
6397  */
6398 static ocs_hw_rtn_e
6399 ocs_hw_config_sli_port_health_check(ocs_hw_t *hw, uint8_t query, uint8_t enable)
6400 {
6401 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6402 	uint8_t buf[SLI4_BMBX_SIZE];
6403 	sli4_req_common_set_features_health_check_t param;
6404 
6405 	ocs_memset(&param, 0, sizeof(param));
6406 	param.hck = enable;
6407 	param.qry = query;
6408 
6409 	/* build the set_features command */
6410 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6411 				    SLI4_SET_FEATURES_SLI_PORT_HEALTH_CHECK,
6412 				    sizeof(param),
6413 				    &param);
6414 
6415 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6416 	if (rc) {
6417 		ocs_log_err(hw->os, "ocs_hw_command returns %d\n", rc);
6418 	} else {
6419 		ocs_log_test(hw->os, "SLI Port Health Check is enabled \n");
6420 	}
6421 
6422 	return rc;
6423 }
6424 
6425 /**
6426  * @brief Set FTD transfer hint feature
6427  *
6428  * @param hw Hardware context.
6429  * @param fdt_xfer_hint size in bytes where read requests are segmented.
6430  *
6431  * @return Returns OCS_HW_RTN_SUCCESS on success.
6432  */
6433 static ocs_hw_rtn_e
6434 ocs_hw_config_set_fdt_xfer_hint(ocs_hw_t *hw, uint32_t fdt_xfer_hint)
6435 {
6436 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6437 	uint8_t buf[SLI4_BMBX_SIZE];
6438 	sli4_req_common_set_features_set_fdt_xfer_hint_t param;
6439 
6440 	ocs_memset(&param, 0, sizeof(param));
6441 	param.fdt_xfer_hint = fdt_xfer_hint;
6442 	/* build the set_features command */
6443 	sli_cmd_common_set_features(&hw->sli, buf, SLI4_BMBX_SIZE,
6444 				    SLI4_SET_FEATURES_SET_FTD_XFER_HINT,
6445 				    sizeof(param),
6446 				    &param);
6447 
6448 
6449 	rc = ocs_hw_command(hw, buf, OCS_CMD_POLL, NULL, NULL);
6450 	if (rc) {
6451 		ocs_log_warn(hw->os, "set FDT hint %d failed: %d\n", fdt_xfer_hint, rc);
6452 	} else {
6453 		ocs_log_debug(hw->os, "Set FTD transfer hint to %d\n", param.fdt_xfer_hint);
6454 	}
6455 
6456 	return rc;
6457 }
6458 
6459 /**
6460  * @brief Get the link configuration callback.
6461  *
6462  * @param hw Hardware context.
6463  * @param status Status from the DMTF CLP command.
6464  * @param result_len Length, in bytes, of the DMTF CLP result.
6465  * @param arg Pointer to a callback argument.
6466  *
6467  * @return Returns OCS_HW_RTN_SUCCESS on success.
6468  */
6469 static void
6470 ocs_hw_linkcfg_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint32_t result_len, void *arg)
6471 {
6472 	int32_t rval;
6473 	char retdata_str[64];
6474 	ocs_hw_linkcfg_cb_arg_t *cb_arg = (ocs_hw_linkcfg_cb_arg_t *)arg;
6475 	ocs_hw_linkcfg_e linkcfg = OCS_HW_LINKCFG_NA;
6476 
6477 	if (status) {
6478 		ocs_log_test(hw->os, "CLP cmd failed, status=%d\n", status);
6479 	} else {
6480 		/* parse CLP response to get return data */
6481 		rval = ocs_hw_clp_resp_get_value(hw, "retdata", retdata_str,
6482 						  sizeof(retdata_str),
6483 						  cb_arg->dma_resp.virt,
6484 						  result_len);
6485 
6486 		if (rval <= 0) {
6487 			ocs_log_err(hw->os, "failed to get retdata %d\n", result_len);
6488 		} else {
6489 			/* translate string into hw enum */
6490 			linkcfg = ocs_hw_linkcfg_from_clp(retdata_str);
6491 		}
6492 	}
6493 
6494 	/* invoke callback */
6495 	if (cb_arg->cb) {
6496 		cb_arg->cb(status, linkcfg, cb_arg->arg);
6497 	}
6498 
6499 	/* if polling, will free memory in calling function */
6500 	if (cb_arg->opts != OCS_CMD_POLL) {
6501 		ocs_dma_free(hw->os, &cb_arg->dma_cmd);
6502 		ocs_dma_free(hw->os, &cb_arg->dma_resp);
6503 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6504 	}
6505 }
6506 
6507 /**
6508  * @brief Set the Lancer dump location
6509  * @par Description
6510  * This function tells a Lancer chip to use a specific DMA
6511  * buffer as a dump location rather than the internal flash.
6512  *
6513  * @param hw Hardware context.
6514  * @param num_buffers The number of DMA buffers to hold the dump (1..n).
6515  * @param dump_buffers DMA buffers to hold the dump.
6516  *
6517  * @return Returns OCS_HW_RTN_SUCCESS on success.
6518  */
6519 ocs_hw_rtn_e
6520 ocs_hw_set_dump_location(ocs_hw_t *hw, uint32_t num_buffers, ocs_dma_t *dump_buffers, uint8_t fdb)
6521 {
6522 	uint8_t bus, dev, func;
6523 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6524 	uint8_t	buf[SLI4_BMBX_SIZE];
6525 
6526 	/*
6527 	 * Make sure the FW is new enough to support this command. If the FW
6528 	 * is too old, the FW will UE.
6529 	 */
6530 	if (hw->workaround.disable_dump_loc) {
6531 		ocs_log_test(hw->os, "FW version is too old for this feature\n");
6532 		return OCS_HW_RTN_ERROR;
6533 	}
6534 
6535 	/* This command is only valid for physical port 0 */
6536 	ocs_get_bus_dev_func(hw->os, &bus, &dev, &func);
6537 	if (fdb == 0 && func != 0) {
6538 		ocs_log_test(hw->os, "function only valid for pci function 0, %d passed\n",
6539 			     func);
6540 		return OCS_HW_RTN_ERROR;
6541 	}
6542 
6543 	/*
6544 	 * If a single buffer is used, then it may be passed as is to the chip. For multiple buffers,
6545 	 * We must allocate a SGL list and then pass the address of the list to the chip.
6546 	 */
6547 	if (num_buffers > 1) {
6548 		uint32_t sge_size = num_buffers * sizeof(sli4_sge_t);
6549 		sli4_sge_t *sge;
6550 		uint32_t i;
6551 
6552 		if (hw->dump_sges.size < sge_size) {
6553 			ocs_dma_free(hw->os, &hw->dump_sges);
6554 			if (ocs_dma_alloc(hw->os, &hw->dump_sges, sge_size, OCS_MIN_DMA_ALIGNMENT)) {
6555 				ocs_log_err(hw->os, "SGE DMA allocation failed\n");
6556 				return OCS_HW_RTN_NO_MEMORY;
6557 			}
6558 		}
6559 		/* build the SGE list */
6560 		ocs_memset(hw->dump_sges.virt, 0, hw->dump_sges.size);
6561 		hw->dump_sges.len = sge_size;
6562 		sge = hw->dump_sges.virt;
6563 		for (i = 0; i < num_buffers; i++) {
6564 			sge[i].buffer_address_high = ocs_addr32_hi(dump_buffers[i].phys);
6565 			sge[i].buffer_address_low = ocs_addr32_lo(dump_buffers[i].phys);
6566 			sge[i].last = (i == num_buffers - 1 ? 1 : 0);
6567 			sge[i].buffer_length = dump_buffers[i].size;
6568 		}
6569 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6570 						      SLI4_BMBX_SIZE, FALSE, TRUE,
6571 						      &hw->dump_sges, fdb);
6572 	} else {
6573 		dump_buffers->len = dump_buffers->size;
6574 		rc = sli_cmd_common_set_dump_location(&hw->sli, (void *)buf,
6575 						      SLI4_BMBX_SIZE, FALSE, FALSE,
6576 						      dump_buffers, fdb);
6577 	}
6578 
6579 	if (rc) {
6580 		rc = ocs_hw_command(hw, buf, OCS_CMD_POLL,
6581 				     NULL, NULL);
6582 		if (rc) {
6583 			ocs_log_err(hw->os, "ocs_hw_command returns %d\n",
6584 				rc);
6585 		}
6586 	} else {
6587 		ocs_log_err(hw->os,
6588 			"sli_cmd_common_set_dump_location failed\n");
6589 		rc = OCS_HW_RTN_ERROR;
6590 	}
6591 
6592 	return rc;
6593 }
6594 
6595 
6596 /**
6597  * @brief Set the Ethernet license.
6598  *
6599  * @par Description
6600  * This function sends the appropriate mailbox command (DMTF
6601  * CLP) to set the Ethernet license to the given license value.
6602  * Since it is used during the time of ocs_hw_init(), the mailbox
6603  * command is sent via polling (the BMBX route).
6604  *
6605  * @param hw Hardware context.
6606  * @param license 32-bit license value.
6607  *
6608  * @return Returns OCS_HW_RTN_SUCCESS on success.
6609  */
6610 static ocs_hw_rtn_e
6611 ocs_hw_set_eth_license(ocs_hw_t *hw, uint32_t license)
6612 {
6613 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6614 	char cmd[OCS_HW_DMTF_CLP_CMD_MAX];
6615 	ocs_dma_t dma_cmd;
6616 	ocs_dma_t dma_resp;
6617 
6618 	/* only for lancer right now */
6619 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6620 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6621 		return OCS_HW_RTN_ERROR;
6622 	}
6623 
6624 	ocs_snprintf(cmd, OCS_HW_DMTF_CLP_CMD_MAX, "set / OEMELX_Ethernet_License=%X", license);
6625 	/* allocate DMA for command  */
6626 	if (ocs_dma_alloc(hw->os, &dma_cmd, ocs_strlen(cmd)+1, 4096)) {
6627 		ocs_log_err(hw->os, "malloc failed\n");
6628 		return OCS_HW_RTN_NO_MEMORY;
6629 	}
6630 	ocs_memset(dma_cmd.virt, 0, ocs_strlen(cmd)+1);
6631 	ocs_memcpy(dma_cmd.virt, cmd, ocs_strlen(cmd));
6632 
6633 	/* allocate DMA for response */
6634 	if (ocs_dma_alloc(hw->os, &dma_resp, OCS_HW_DMTF_CLP_RSP_MAX, 4096)) {
6635 		ocs_log_err(hw->os, "malloc failed\n");
6636 		ocs_dma_free(hw->os, &dma_cmd);
6637 		return OCS_HW_RTN_NO_MEMORY;
6638 	}
6639 
6640 	/* send DMTF CLP command mbx and poll */
6641 	if (ocs_hw_exec_dmtf_clp_cmd(hw, &dma_cmd, &dma_resp, OCS_CMD_POLL, NULL, NULL)) {
6642 		ocs_log_err(hw->os, "CLP cmd=\"%s\" failed\n", (char *)dma_cmd.virt);
6643 		rc = OCS_HW_RTN_ERROR;
6644 	}
6645 
6646 	ocs_dma_free(hw->os, &dma_cmd);
6647 	ocs_dma_free(hw->os, &dma_resp);
6648 	return rc;
6649 }
6650 
6651 /**
6652  * @brief Callback argument structure for the DMTF CLP commands.
6653  */
6654 typedef struct ocs_hw_clp_cb_arg_s {
6655 	ocs_hw_dmtf_clp_cb_t cb;
6656 	ocs_dma_t *dma_resp;
6657 	int32_t status;
6658 	uint32_t opts;
6659 	void *arg;
6660 } ocs_hw_clp_cb_arg_t;
6661 
6662 /**
6663  * @brief Execute the DMTF CLP command.
6664  *
6665  * @param hw Hardware context.
6666  * @param dma_cmd DMA buffer containing the CLP command.
6667  * @param dma_resp DMA buffer that will contain the response (if successful).
6668  * @param opts Mailbox command options (such as OCS_CMD_NOWAIT and POLL).
6669  * @param cb Callback function.
6670  * @param arg Callback argument.
6671  *
6672  * @return Returns the number of bytes written to the response
6673  * buffer on success, or a negative value if failed.
6674  */
6675 static ocs_hw_rtn_e
6676 ocs_hw_exec_dmtf_clp_cmd(ocs_hw_t *hw, ocs_dma_t *dma_cmd, ocs_dma_t *dma_resp, uint32_t opts, ocs_hw_dmtf_clp_cb_t cb, void *arg)
6677 {
6678 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6679 	ocs_hw_clp_cb_arg_t *cb_arg;
6680 	uint8_t *mbxdata;
6681 
6682 	/* allocate DMA for mailbox */
6683 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6684 	if (mbxdata == NULL) {
6685 		ocs_log_err(hw->os, "failed to malloc mbox\n");
6686 		return OCS_HW_RTN_NO_MEMORY;
6687 	}
6688 
6689 	/* allocate memory for callback argument */
6690 	cb_arg = ocs_malloc(hw->os, sizeof(*cb_arg), OCS_M_NOWAIT);
6691 	if (cb_arg == NULL) {
6692 		ocs_log_err(hw->os, "failed to malloc cb_arg");
6693 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6694 		return OCS_HW_RTN_NO_MEMORY;
6695 	}
6696 
6697 	cb_arg->cb = cb;
6698 	cb_arg->arg = arg;
6699 	cb_arg->dma_resp = dma_resp;
6700 	cb_arg->opts = opts;
6701 
6702 	/* Send the HW command */
6703 	if (sli_cmd_dmtf_exec_clp_cmd(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
6704 				      dma_cmd, dma_resp)) {
6705 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_dmtf_clp_cb, cb_arg);
6706 
6707 		if (opts == OCS_CMD_POLL && rc == OCS_HW_RTN_SUCCESS) {
6708 			/* if we're polling, copy response and invoke callback to
6709 			 * parse result */
6710 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
6711 			ocs_hw_dmtf_clp_cb(hw, 0, mbxdata, cb_arg);
6712 
6713 			/* set rc to resulting or "parsed" status */
6714 			rc = cb_arg->status;
6715 		}
6716 
6717 		/* if failed, or polling, free memory here */
6718 		if (opts == OCS_CMD_POLL || rc != OCS_HW_RTN_SUCCESS) {
6719 			if (rc != OCS_HW_RTN_SUCCESS) {
6720 				ocs_log_test(hw->os, "ocs_hw_command failed\n");
6721 			}
6722 			ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6723 			ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6724 		}
6725 	} else {
6726 		ocs_log_test(hw->os, "sli_cmd_dmtf_exec_clp_cmd failed\n");
6727 		rc = OCS_HW_RTN_ERROR;
6728 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
6729 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6730 	}
6731 
6732 	return rc;
6733 }
6734 
6735 
6736 /**
6737  * @brief Called when the DMTF CLP command completes.
6738  *
6739  * @param hw Hardware context.
6740  * @param status Status field from the mbox completion.
6741  * @param mqe Mailbox response structure.
6742  * @param arg Pointer to a callback argument.
6743  *
6744  * @return None.
6745  *
6746  */
6747 static void
6748 ocs_hw_dmtf_clp_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6749 {
6750 	int32_t cb_status = 0;
6751 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6752 	sli4_res_dmtf_exec_clp_cmd_t *clp_rsp = (sli4_res_dmtf_exec_clp_cmd_t *) mbox_rsp->payload.embed;
6753 	ocs_hw_clp_cb_arg_t *cb_arg = arg;
6754 	uint32_t result_len = 0;
6755 	int32_t stat_len;
6756 	char stat_str[8];
6757 
6758 	/* there are several status codes here, check them all and condense
6759 	 * into a single callback status
6760 	 */
6761 	if (status || mbox_rsp->hdr.status || clp_rsp->clp_status) {
6762 		ocs_log_debug(hw->os, "status=x%x/x%x/x%x  addl=x%x clp=x%x detail=x%x\n",
6763 			status,
6764 			mbox_rsp->hdr.status,
6765 			clp_rsp->hdr.status,
6766 			clp_rsp->hdr.additional_status,
6767 			clp_rsp->clp_status,
6768 			clp_rsp->clp_detailed_status);
6769 		if (status) {
6770 			cb_status = status;
6771 		} else if (mbox_rsp->hdr.status) {
6772 			cb_status = mbox_rsp->hdr.status;
6773 		} else {
6774 			cb_status = clp_rsp->clp_status;
6775 		}
6776 	} else {
6777 		result_len = clp_rsp->resp_length;
6778 	}
6779 
6780 	if (cb_status) {
6781 		goto ocs_hw_cb_dmtf_clp_done;
6782 	}
6783 
6784 	if ((result_len == 0) || (cb_arg->dma_resp->size < result_len)) {
6785 		ocs_log_test(hw->os, "Invalid response length: resp_len=%zu result len=%d\n",
6786 			     cb_arg->dma_resp->size, result_len);
6787 		cb_status = -1;
6788 		goto ocs_hw_cb_dmtf_clp_done;
6789 	}
6790 
6791 	/* parse CLP response to get status */
6792 	stat_len = ocs_hw_clp_resp_get_value(hw, "status", stat_str,
6793 					      sizeof(stat_str),
6794 					      cb_arg->dma_resp->virt,
6795 					      result_len);
6796 
6797 	if (stat_len <= 0) {
6798 		ocs_log_test(hw->os, "failed to get status %d\n", stat_len);
6799 		cb_status = -1;
6800 		goto ocs_hw_cb_dmtf_clp_done;
6801 	}
6802 
6803 	if (ocs_strcmp(stat_str, "0") != 0) {
6804 		ocs_log_test(hw->os, "CLP status indicates failure=%s\n", stat_str);
6805 		cb_status = -1;
6806 		goto ocs_hw_cb_dmtf_clp_done;
6807 	}
6808 
6809 ocs_hw_cb_dmtf_clp_done:
6810 
6811 	/* save status in cb_arg for callers with NULL cb's + polling */
6812 	cb_arg->status = cb_status;
6813 	if (cb_arg->cb) {
6814 		cb_arg->cb(hw, cb_status, result_len, cb_arg->arg);
6815 	}
6816 	/* if polling, caller will free memory */
6817 	if (cb_arg->opts != OCS_CMD_POLL) {
6818 		ocs_free(hw->os, cb_arg, sizeof(*cb_arg));
6819 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
6820 	}
6821 }
6822 
6823 /**
6824  * @brief Parse the CLP result and get the value corresponding to the given
6825  * keyword.
6826  *
6827  * @param hw Hardware context.
6828  * @param keyword CLP keyword for which the value is returned.
6829  * @param value Location to which the resulting value is copied.
6830  * @param value_len Length of the value parameter.
6831  * @param resp Pointer to the response buffer that is searched
6832  * for the keyword and value.
6833  * @param resp_len Length of response buffer passed in.
6834  *
6835  * @return Returns the number of bytes written to the value
6836  * buffer on success, or a negative vaue on failure.
6837  */
6838 static int32_t
6839 ocs_hw_clp_resp_get_value(ocs_hw_t *hw, const char *keyword, char *value, uint32_t value_len, const char *resp, uint32_t resp_len)
6840 {
6841 	char *start = NULL;
6842 	char *end = NULL;
6843 
6844 	/* look for specified keyword in string */
6845 	start = ocs_strstr(resp, keyword);
6846 	if (start == NULL) {
6847 		ocs_log_test(hw->os, "could not find keyword=%s in CLP response\n",
6848 			     keyword);
6849 		return -1;
6850 	}
6851 
6852 	/* now look for '=' and go one past */
6853 	start = ocs_strchr(start, '=');
6854 	if (start == NULL) {
6855 		ocs_log_test(hw->os, "could not find \'=\' in CLP response for keyword=%s\n",
6856 			     keyword);
6857 		return -1;
6858 	}
6859 	start++;
6860 
6861 	/* \r\n terminates value */
6862 	end = ocs_strstr(start, "\r\n");
6863 	if (end == NULL) {
6864 		ocs_log_test(hw->os, "could not find \\r\\n for keyword=%s in CLP response\n",
6865 			     keyword);
6866 		return -1;
6867 	}
6868 
6869 	/* make sure given result array is big enough */
6870 	if ((end - start + 1) > value_len) {
6871 		ocs_log_test(hw->os, "value len=%d not large enough for actual=%ld\n",
6872 			     value_len, (end-start));
6873 		return -1;
6874 	}
6875 
6876 	ocs_strncpy(value, start, (end - start));
6877 	value[end-start] = '\0';
6878 	return (end-start+1);
6879 }
6880 
6881 /**
6882  * @brief Cause chip to enter an unrecoverable error state.
6883  *
6884  * @par Description
6885  * Cause chip to enter an unrecoverable error state. This is
6886  * used when detecting unexpected FW behavior so that the FW can be
6887  * hwted from the driver as soon as the error is detected.
6888  *
6889  * @param hw Hardware context.
6890  * @param dump Generate dump as part of reset.
6891  *
6892  * @return Returns 0 on success, or a non-zero value on failure.
6893  *
6894  */
6895 ocs_hw_rtn_e
6896 ocs_hw_raise_ue(ocs_hw_t *hw, uint8_t dump)
6897 {
6898 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
6899 
6900 	if (sli_raise_ue(&hw->sli, dump) != 0) {
6901 		rc = OCS_HW_RTN_ERROR;
6902 	} else {
6903 		if (hw->state != OCS_HW_STATE_UNINITIALIZED) {
6904 			hw->state = OCS_HW_STATE_QUEUES_ALLOCATED;
6905 		}
6906 	}
6907 
6908 	return rc;
6909 }
6910 
6911 /**
6912  * @brief Called when the OBJECT_GET command completes.
6913  *
6914  * @par Description
6915  * Get the number of bytes actually written out of the response, free the mailbox
6916  * that was malloc'd by ocs_hw_dump_get(), then call the callback
6917  * and pass the status and bytes read.
6918  *
6919  * @param hw Hardware context.
6920  * @param status Status field from the mbox completion.
6921  * @param mqe Mailbox response structure.
6922  * @param arg Pointer to a callback function that signals the caller that the command is done.
6923  * The callback function prototype is <tt>void cb(int32_t status, uint32_t bytes_read)</tt>.
6924  *
6925  * @return Returns 0.
6926  */
6927 static int32_t
6928 ocs_hw_cb_dump_get(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
6929 {
6930 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
6931 	sli4_res_common_read_object_t* rd_obj_rsp = (sli4_res_common_read_object_t*) mbox_rsp->payload.embed;
6932 	ocs_hw_dump_get_cb_arg_t *cb_arg = arg;
6933 	uint32_t bytes_read;
6934 	uint8_t eof;
6935 
6936 	bytes_read = rd_obj_rsp->actual_read_length;
6937 	eof = rd_obj_rsp->eof;
6938 
6939 	if (cb_arg) {
6940 		if (cb_arg->cb) {
6941 			if ((status == 0) && mbox_rsp->hdr.status) {
6942 				status = mbox_rsp->hdr.status;
6943 			}
6944 			cb_arg->cb(status, bytes_read, eof, cb_arg->arg);
6945 		}
6946 
6947 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
6948 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
6949 	}
6950 
6951 	return 0;
6952 }
6953 
6954 
6955 /**
6956  * @brief Read a dump image to the host.
6957  *
6958  * @par Description
6959  * Creates a SLI_CONFIG mailbox command, fills in the correct values to read a
6960  * dump image chunk, then sends the command with the ocs_hw_command(). On completion,
6961  * the callback function ocs_hw_cb_dump_get() gets called to free the mailbox
6962  * and signal the caller that the read has completed.
6963  *
6964  * @param hw Hardware context.
6965  * @param dma DMA structure to transfer the dump chunk into.
6966  * @param size Size of the dump chunk.
6967  * @param offset Offset, in bytes, from the beginning of the dump.
6968  * @param cb Pointer to a callback function that is called when the command completes.
6969  * The callback function prototype is
6970  * <tt>void cb(int32_t status, uint32_t bytes_read, uint8_t eof, void *arg)</tt>.
6971  * @param arg Pointer to be passed to the callback function.
6972  *
6973  * @return Returns 0 on success, or a non-zero value on failure.
6974  */
6975 ocs_hw_rtn_e
6976 ocs_hw_dump_get(ocs_hw_t *hw, ocs_dma_t *dma, uint32_t size, uint32_t offset, ocs_hw_dump_get_cb_t cb, void *arg)
6977 {
6978 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
6979 	uint8_t *mbxdata;
6980 	ocs_hw_dump_get_cb_arg_t *cb_arg;
6981 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
6982 
6983 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
6984 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
6985 		return OCS_HW_RTN_ERROR;
6986 	}
6987 
6988 	if (1 != sli_dump_is_present(&hw->sli)) {
6989 		ocs_log_test(hw->os, "No dump is present\n");
6990 		return OCS_HW_RTN_ERROR;
6991 	}
6992 
6993 	if (1 == sli_reset_required(&hw->sli)) {
6994 		ocs_log_test(hw->os, "device reset required\n");
6995 		return OCS_HW_RTN_ERROR;
6996 	}
6997 
6998 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
6999 	if (mbxdata == NULL) {
7000 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7001 		return OCS_HW_RTN_NO_MEMORY;
7002 	}
7003 
7004 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_get_cb_arg_t), OCS_M_NOWAIT);
7005 	if (cb_arg == NULL) {
7006 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7007 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7008 		return OCS_HW_RTN_NO_MEMORY;
7009 	}
7010 
7011 	cb_arg->cb = cb;
7012 	cb_arg->arg = arg;
7013 	cb_arg->mbox_cmd = mbxdata;
7014 
7015 	if (sli_cmd_common_read_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7016 			size, offset, "/dbg/dump.bin", dma)) {
7017 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_get, cb_arg);
7018 		if (rc == 0 && opts == OCS_CMD_POLL) {
7019 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7020 			rc = ocs_hw_cb_dump_get(hw, 0, mbxdata, cb_arg);
7021 		}
7022 	}
7023 
7024 	if (rc != OCS_HW_RTN_SUCCESS) {
7025 		ocs_log_test(hw->os, "COMMON_READ_OBJECT failed\n");
7026 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7027 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_get_cb_arg_t));
7028 	}
7029 
7030 	return rc;
7031 }
7032 
7033 /**
7034  * @brief Called when the OBJECT_DELETE command completes.
7035  *
7036  * @par Description
7037  * Free the mailbox that was malloc'd
7038  * by ocs_hw_dump_clear(), then call the callback and pass the status.
7039  *
7040  * @param hw Hardware context.
7041  * @param status Status field from the mbox completion.
7042  * @param mqe Mailbox response structure.
7043  * @param arg Pointer to a callback function that signals the caller that the command is done.
7044  * The callback function prototype is <tt>void cb(int32_t status, void *arg)</tt>.
7045  *
7046  * @return Returns 0.
7047  */
7048 static int32_t
7049 ocs_hw_cb_dump_clear(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
7050 {
7051 	ocs_hw_dump_clear_cb_arg_t *cb_arg = arg;
7052 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7053 
7054 	if (cb_arg) {
7055 		if (cb_arg->cb) {
7056 			if ((status == 0) && mbox_rsp->hdr.status) {
7057 				status = mbox_rsp->hdr.status;
7058 			}
7059 			cb_arg->cb(status, cb_arg->arg);
7060 		}
7061 
7062 		ocs_free(hw->os, cb_arg->mbox_cmd, SLI4_BMBX_SIZE);
7063 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7064 	}
7065 
7066 	return 0;
7067 }
7068 
7069 /**
7070  * @brief Clear a dump image from the device.
7071  *
7072  * @par Description
7073  * Creates a SLI_CONFIG mailbox command, fills it with the correct values to clear
7074  * the dump, then sends the command with ocs_hw_command(). On completion,
7075  * the callback function ocs_hw_cb_dump_clear() gets called to free the mailbox
7076  * and to signal the caller that the write has completed.
7077  *
7078  * @param hw Hardware context.
7079  * @param cb Pointer to a callback function that is called when the command completes.
7080  * The callback function prototype is
7081  * <tt>void cb(int32_t status, uint32_t bytes_written, void *arg)</tt>.
7082  * @param arg Pointer to be passed to the callback function.
7083  *
7084  * @return Returns 0 on success, or a non-zero value on failure.
7085  */
7086 ocs_hw_rtn_e
7087 ocs_hw_dump_clear(ocs_hw_t *hw, ocs_hw_dump_clear_cb_t cb, void *arg)
7088 {
7089 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7090 	uint8_t *mbxdata;
7091 	ocs_hw_dump_clear_cb_arg_t *cb_arg;
7092 	uint32_t opts = (hw->state == OCS_HW_STATE_ACTIVE ? OCS_CMD_NOWAIT : OCS_CMD_POLL);
7093 
7094 	if (SLI4_IF_TYPE_LANCER_FC_ETH != sli_get_if_type(&hw->sli)) {
7095 		ocs_log_test(hw->os, "Function only supported for I/F type 2\n");
7096 		return OCS_HW_RTN_ERROR;
7097 	}
7098 
7099 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7100 	if (mbxdata == NULL) {
7101 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7102 		return OCS_HW_RTN_NO_MEMORY;
7103 	}
7104 
7105 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_dump_clear_cb_arg_t), OCS_M_NOWAIT);
7106 	if (cb_arg == NULL) {
7107 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7108 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7109 		return OCS_HW_RTN_NO_MEMORY;
7110 	}
7111 
7112 	cb_arg->cb = cb;
7113 	cb_arg->arg = arg;
7114 	cb_arg->mbox_cmd = mbxdata;
7115 
7116 	if (sli_cmd_common_delete_object(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7117 			"/dbg/dump.bin")) {
7118 		rc = ocs_hw_command(hw, mbxdata, opts, ocs_hw_cb_dump_clear, cb_arg);
7119 		if (rc == 0 && opts == OCS_CMD_POLL) {
7120 			ocs_memcpy(mbxdata, hw->sli.bmbx.virt, SLI4_BMBX_SIZE);
7121 			rc = ocs_hw_cb_dump_clear(hw, 0, mbxdata, cb_arg);
7122 		}
7123 	}
7124 
7125 	if (rc != OCS_HW_RTN_SUCCESS) {
7126 		ocs_log_test(hw->os, "COMMON_DELETE_OBJECT failed\n");
7127 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7128 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_dump_clear_cb_arg_t));
7129 	}
7130 
7131 	return rc;
7132 }
7133 
7134 typedef struct ocs_hw_get_port_protocol_cb_arg_s {
7135 	ocs_get_port_protocol_cb_t cb;
7136 	void *arg;
7137 	uint32_t pci_func;
7138 	ocs_dma_t payload;
7139 } ocs_hw_get_port_protocol_cb_arg_t;
7140 
7141 /**
7142  * @brief Called for the completion of get_port_profile for a
7143  *        user request.
7144  *
7145  * @param hw Hardware context.
7146  * @param status The status from the MQE.
7147  * @param mqe Pointer to mailbox command buffer.
7148  * @param arg Pointer to a callback argument.
7149  *
7150  * @return Returns 0 on success, or a non-zero value on failure.
7151  */
7152 static int32_t
7153 ocs_hw_get_port_protocol_cb(ocs_hw_t *hw, int32_t status,
7154 			    uint8_t *mqe, void *arg)
7155 {
7156 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg = arg;
7157 	ocs_dma_t *payload = &(cb_arg->payload);
7158 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7159 	ocs_hw_port_protocol_e port_protocol;
7160 	int num_descriptors;
7161 	sli4_resource_descriptor_v1_t *desc_p;
7162 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7163 	int i;
7164 
7165 	port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7166 
7167 	num_descriptors = response->desc_count;
7168 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7169 	for (i=0; i<num_descriptors; i++) {
7170 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7171 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7172 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7173 				switch(pcie_desc_p->pf_type) {
7174 				case 0x02:
7175 					port_protocol = OCS_HW_PORT_PROTOCOL_ISCSI;
7176 					break;
7177 				case 0x04:
7178 					port_protocol = OCS_HW_PORT_PROTOCOL_FCOE;
7179 					break;
7180 				case 0x10:
7181 					port_protocol = OCS_HW_PORT_PROTOCOL_FC;
7182 					break;
7183 				default:
7184 					port_protocol = OCS_HW_PORT_PROTOCOL_OTHER;
7185 					break;
7186 				}
7187 			}
7188 		}
7189 
7190 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7191 	}
7192 
7193 	if (cb_arg->cb) {
7194 		cb_arg->cb(status, port_protocol, cb_arg->arg);
7195 
7196 	}
7197 
7198 	ocs_dma_free(hw->os, &cb_arg->payload);
7199 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7200 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7201 
7202 	return 0;
7203 }
7204 
7205 /**
7206  * @ingroup io
7207  * @brief  Get the current port protocol.
7208  * @par Description
7209  * Issues a SLI4 COMMON_GET_PROFILE_CONFIG mailbox.  When the
7210  * command completes the provided mgmt callback function is
7211  * called.
7212  *
7213  * @param hw Hardware context.
7214  * @param pci_func PCI function to query for current protocol.
7215  * @param cb Callback function to be called when the command completes.
7216  * @param ul_arg An argument that is passed to the callback function.
7217  *
7218  * @return
7219  * - OCS_HW_RTN_SUCCESS on success.
7220  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7221  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7222  *   context.
7223  * - OCS_HW_RTN_ERROR on any other error.
7224  */
7225 ocs_hw_rtn_e
7226 ocs_hw_get_port_protocol(ocs_hw_t *hw, uint32_t pci_func,
7227 	ocs_get_port_protocol_cb_t cb, void* ul_arg)
7228 {
7229 	uint8_t *mbxdata;
7230 	ocs_hw_get_port_protocol_cb_arg_t *cb_arg;
7231 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7232 
7233 	/* Only supported on Skyhawk */
7234 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7235 		return OCS_HW_RTN_ERROR;
7236 	}
7237 
7238 	/* mbxdata holds the header of the command */
7239 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7240 	if (mbxdata == NULL) {
7241 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7242 		return OCS_HW_RTN_NO_MEMORY;
7243 	}
7244 
7245 
7246 	/* cb_arg holds the data that will be passed to the callback on completion */
7247 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7248 	if (cb_arg == NULL) {
7249 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7250 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7251 		return OCS_HW_RTN_NO_MEMORY;
7252 	}
7253 
7254 	cb_arg->cb = cb;
7255 	cb_arg->arg = ul_arg;
7256 	cb_arg->pci_func = pci_func;
7257 
7258 	/* dma_mem holds the non-embedded portion */
7259 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7260 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7261 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7262 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7263 		return OCS_HW_RTN_NO_MEMORY;
7264 	}
7265 
7266 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7267 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_port_protocol_cb, cb_arg);
7268 	}
7269 
7270 	if (rc != OCS_HW_RTN_SUCCESS) {
7271 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7272 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7273 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7274 		ocs_dma_free(hw->os, &cb_arg->payload);
7275 	}
7276 
7277 	return rc;
7278 
7279 }
7280 
7281 typedef struct ocs_hw_set_port_protocol_cb_arg_s {
7282 	ocs_set_port_protocol_cb_t cb;
7283 	void *arg;
7284 	ocs_dma_t payload;
7285 	uint32_t new_protocol;
7286 	uint32_t pci_func;
7287 } ocs_hw_set_port_protocol_cb_arg_t;
7288 
7289 /**
7290  * @brief Called for the completion of set_port_profile for a
7291  *        user request.
7292  *
7293  * @par Description
7294  * This is the second of two callbacks for the set_port_protocol
7295  * function. The set operation is a read-modify-write. This
7296  * callback is called when the write (SET_PROFILE_CONFIG)
7297  * completes.
7298  *
7299  * @param hw Hardware context.
7300  * @param status The status from the MQE.
7301  * @param mqe Pointer to mailbox command buffer.
7302  * @param arg Pointer to a callback argument.
7303  *
7304  * @return 0 on success, non-zero otherwise
7305  */
7306 static int32_t
7307 ocs_hw_set_port_protocol_cb2(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7308 {
7309 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7310 
7311 	if (cb_arg->cb) {
7312 		cb_arg->cb( status, cb_arg->arg);
7313 	}
7314 
7315 	ocs_dma_free(hw->os, &(cb_arg->payload));
7316 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7317 	ocs_free(hw->os, arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7318 
7319 	return 0;
7320 }
7321 
7322 /**
7323  * @brief Called for the completion of set_port_profile for a
7324  *        user request.
7325  *
7326  * @par Description
7327  * This is the first of two callbacks for the set_port_protocol
7328  * function.  The set operation is a read-modify-write.  This
7329  * callback is called when the read completes
7330  * (GET_PROFILE_CONFG).  It will updated the resource
7331  * descriptors, then queue the write (SET_PROFILE_CONFIG).
7332  *
7333  * On entry there are three memory areas that were allocated by
7334  * ocs_hw_set_port_protocol.  If a failure is detected in this
7335  * function those need to be freed.  If this function succeeds
7336  * it allocates three more areas.
7337  *
7338  * @param hw Hardware context.
7339  * @param status The status from the MQE
7340  * @param mqe Pointer to mailbox command buffer.
7341  * @param arg Pointer to a callback argument.
7342  *
7343  * @return Returns 0 on success, or a non-zero value otherwise.
7344  */
7345 static int32_t
7346 ocs_hw_set_port_protocol_cb1(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7347 {
7348 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg = arg;
7349 	ocs_dma_t *payload = &(cb_arg->payload);
7350 	sli4_res_common_get_profile_config_t* response = (sli4_res_common_get_profile_config_t*) payload->virt;
7351 	int num_descriptors;
7352 	sli4_resource_descriptor_v1_t *desc_p;
7353 	sli4_pcie_resource_descriptor_v1_t *pcie_desc_p;
7354 	int i;
7355 	ocs_hw_set_port_protocol_cb_arg_t *new_cb_arg;
7356 	ocs_hw_port_protocol_e new_protocol;
7357 	uint8_t *dst;
7358 	sli4_isap_resouce_descriptor_v1_t *isap_desc_p;
7359 	uint8_t *mbxdata;
7360 	int pci_descriptor_count;
7361 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7362 	int num_fcoe_ports = 0;
7363 	int num_iscsi_ports = 0;
7364 
7365 	new_protocol = (ocs_hw_port_protocol_e)cb_arg->new_protocol;
7366 
7367 	num_descriptors = response->desc_count;
7368 
7369 	/* Count PCI descriptors */
7370 	pci_descriptor_count = 0;
7371 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7372 	for (i=0; i<num_descriptors; i++) {
7373 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7374 			++pci_descriptor_count;
7375 		}
7376 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7377 	}
7378 
7379 	/* mbxdata holds the header of the command */
7380 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7381 	if (mbxdata == NULL) {
7382 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7383 		return OCS_HW_RTN_NO_MEMORY;
7384 	}
7385 
7386 
7387 	/* cb_arg holds the data that will be passed to the callback on completion */
7388 	new_cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7389 	if (new_cb_arg == NULL) {
7390 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7391 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7392 		return OCS_HW_RTN_NO_MEMORY;
7393 	}
7394 
7395 	new_cb_arg->cb = cb_arg->cb;
7396 	new_cb_arg->arg = cb_arg->arg;
7397 
7398 	/* Allocate memory for the descriptors we're going to send.  This is
7399 	 * one for each PCI descriptor plus one ISAP descriptor. */
7400 	if (ocs_dma_alloc(hw->os, &new_cb_arg->payload, sizeof(sli4_req_common_set_profile_config_t) +
7401 			  (pci_descriptor_count * sizeof(sli4_pcie_resource_descriptor_v1_t)) +
7402 			  sizeof(sli4_isap_resouce_descriptor_v1_t), 4)) {
7403 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7404 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7405 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7406 		return OCS_HW_RTN_NO_MEMORY;
7407 	}
7408 
7409 	sli_cmd_common_set_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE,
7410 						   &new_cb_arg->payload,
7411 						   0, pci_descriptor_count+1, 1);
7412 
7413 	/* Point dst to the first descriptor entry in the SET_PROFILE_CONFIG command */
7414 	dst = (uint8_t *)&(((sli4_req_common_set_profile_config_t *) new_cb_arg->payload.virt)->desc);
7415 
7416 	/* Loop over all descriptors.  If the descriptor is a PCIe descriptor, copy it
7417 	 * to the SET_PROFILE_CONFIG command to be written back.  If it's the descriptor
7418 	 * that we're trying to change also set its pf_type.
7419 	 */
7420 	desc_p = (sli4_resource_descriptor_v1_t *)response->desc;
7421 	for (i=0; i<num_descriptors; i++) {
7422 		if (desc_p->descriptor_type == SLI4_RESOURCE_DESCRIPTOR_TYPE_PCIE) {
7423 			pcie_desc_p = (sli4_pcie_resource_descriptor_v1_t*) desc_p;
7424 			if (pcie_desc_p->pf_number == cb_arg->pci_func) {
7425 				/* This is the PCIe descriptor for this OCS instance.
7426 				 * Update it with the new pf_type */
7427 				switch(new_protocol) {
7428 				case OCS_HW_PORT_PROTOCOL_FC:
7429 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FC;
7430 					break;
7431 				case OCS_HW_PORT_PROTOCOL_FCOE:
7432 					pcie_desc_p->pf_type = SLI4_PROTOCOL_FCOE;
7433 					break;
7434 				case OCS_HW_PORT_PROTOCOL_ISCSI:
7435 					pcie_desc_p->pf_type = SLI4_PROTOCOL_ISCSI;
7436 					break;
7437 				default:
7438 					pcie_desc_p->pf_type = SLI4_PROTOCOL_DEFAULT;
7439 					break;
7440 				}
7441 
7442 			}
7443 
7444 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_FCOE) {
7445 				++num_fcoe_ports;
7446 			}
7447 			if (pcie_desc_p->pf_type == SLI4_PROTOCOL_ISCSI) {
7448 				++num_iscsi_ports;
7449 			}
7450 			ocs_memcpy(dst, pcie_desc_p, sizeof(sli4_pcie_resource_descriptor_v1_t));
7451 			dst += sizeof(sli4_pcie_resource_descriptor_v1_t);
7452 		}
7453 
7454 		desc_p = (sli4_resource_descriptor_v1_t *) ((uint8_t *)desc_p + desc_p->descriptor_length);
7455 	}
7456 
7457 	/* Create an ISAP resource descriptor */
7458 	isap_desc_p = (sli4_isap_resouce_descriptor_v1_t*)dst;
7459 	isap_desc_p->descriptor_type = SLI4_RESOURCE_DESCRIPTOR_TYPE_ISAP;
7460 	isap_desc_p->descriptor_length = sizeof(sli4_isap_resouce_descriptor_v1_t);
7461 	if (num_iscsi_ports > 0) {
7462 		isap_desc_p->iscsi_tgt = 1;
7463 		isap_desc_p->iscsi_ini = 1;
7464 		isap_desc_p->iscsi_dif = 1;
7465 	}
7466 	if (num_fcoe_ports > 0) {
7467 		isap_desc_p->fcoe_tgt = 1;
7468 		isap_desc_p->fcoe_ini = 1;
7469 		isap_desc_p->fcoe_dif = 1;
7470 	}
7471 
7472 	/* At this point we're done with the memory allocated by ocs_port_set_protocol */
7473 	ocs_dma_free(hw->os, &cb_arg->payload);
7474 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7475 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7476 
7477 
7478 	/* Send a SET_PROFILE_CONFIG mailbox command with the new descriptors */
7479 	rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb2, new_cb_arg);
7480 	if (rc) {
7481 		ocs_log_err(hw->os, "Error posting COMMON_SET_PROFILE_CONFIG\n");
7482 		/* Call the upper level callback to report a failure */
7483 		if (new_cb_arg->cb) {
7484 			new_cb_arg->cb( rc, new_cb_arg->arg);
7485 		}
7486 
7487 		/* Free the memory allocated by this function */
7488 		ocs_dma_free(hw->os, &new_cb_arg->payload);
7489 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7490 		ocs_free(hw->os, new_cb_arg, sizeof(ocs_hw_set_port_protocol_cb_arg_t));
7491 	}
7492 
7493 
7494 	return rc;
7495 }
7496 
7497 /**
7498  * @ingroup io
7499  * @brief  Set the port protocol.
7500  * @par Description
7501  * Setting the port protocol is a read-modify-write operation.
7502  * This function submits a GET_PROFILE_CONFIG command to read
7503  * the current settings.  The callback function will modify the
7504  * settings and issue the write.
7505  *
7506  * On successful completion this function will have allocated
7507  * two regular memory areas and one dma area which will need to
7508  * get freed later in the callbacks.
7509  *
7510  * @param hw Hardware context.
7511  * @param new_protocol New protocol to use.
7512  * @param pci_func PCI function to configure.
7513  * @param cb Callback function to be called when the command completes.
7514  * @param ul_arg An argument that is passed to the callback function.
7515  *
7516  * @return
7517  * - OCS_HW_RTN_SUCCESS on success.
7518  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7519  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7520  *   context.
7521  * - OCS_HW_RTN_ERROR on any other error.
7522  */
7523 ocs_hw_rtn_e
7524 ocs_hw_set_port_protocol(ocs_hw_t *hw, ocs_hw_port_protocol_e new_protocol,
7525 		uint32_t pci_func, ocs_set_port_protocol_cb_t cb, void *ul_arg)
7526 {
7527 	uint8_t *mbxdata;
7528 	ocs_hw_set_port_protocol_cb_arg_t *cb_arg;
7529 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
7530 
7531 	/* Only supported on Skyhawk */
7532 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7533 		return OCS_HW_RTN_ERROR;
7534 	}
7535 
7536 	/* mbxdata holds the header of the command */
7537 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7538 	if (mbxdata == NULL) {
7539 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7540 		return OCS_HW_RTN_NO_MEMORY;
7541 	}
7542 
7543 
7544 	/* cb_arg holds the data that will be passed to the callback on completion */
7545 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_port_protocol_cb_arg_t), OCS_M_NOWAIT);
7546 	if (cb_arg == NULL) {
7547 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7548 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7549 		return OCS_HW_RTN_NO_MEMORY;
7550 	}
7551 
7552 	cb_arg->cb = cb;
7553 	cb_arg->arg = ul_arg;
7554 	cb_arg->new_protocol = new_protocol;
7555 	cb_arg->pci_func = pci_func;
7556 
7557 	/* dma_mem holds the non-embedded portion */
7558 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, 4096, 4)) {
7559 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7560 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7561 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_port_protocol_cb_arg_t));
7562 		return OCS_HW_RTN_NO_MEMORY;
7563 	}
7564 
7565 	if (sli_cmd_common_get_profile_config(&hw->sli, mbxdata, SLI4_BMBX_SIZE, &cb_arg->payload)) {
7566 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_port_protocol_cb1, cb_arg);
7567 	}
7568 
7569 	if (rc != OCS_HW_RTN_SUCCESS) {
7570 		ocs_log_test(hw->os, "GET_PROFILE_CONFIG failed\n");
7571 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7572 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_fw_write_cb_arg_t));
7573 		ocs_dma_free(hw->os, &cb_arg->payload);
7574 	}
7575 
7576 	return rc;
7577 }
7578 
7579 typedef struct ocs_hw_get_profile_list_cb_arg_s {
7580 	ocs_get_profile_list_cb_t cb;
7581 	void *arg;
7582 	ocs_dma_t payload;
7583 } ocs_hw_get_profile_list_cb_arg_t;
7584 
7585 /**
7586  * @brief Called for the completion of get_profile_list for a
7587  *        user request.
7588  * @par Description
7589  * This function is called when the COMMMON_GET_PROFILE_LIST
7590  * mailbox completes.  The response will be in
7591  * ctx->non_embedded_mem.virt.  This function parses the
7592  * response and creates a ocs_hw_profile_list, then calls the
7593  * mgmt_cb callback function and passes that list to it.
7594  *
7595  * @param hw Hardware context.
7596  * @param status The status from the MQE
7597  * @param mqe Pointer to mailbox command buffer.
7598  * @param arg Pointer to a callback argument.
7599  *
7600  * @return Returns 0 on success, or a non-zero value on failure.
7601  */
7602 static int32_t
7603 ocs_hw_get_profile_list_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7604 {
7605 	ocs_hw_profile_list_t *list;
7606 	ocs_hw_get_profile_list_cb_arg_t *cb_arg = arg;
7607 	ocs_dma_t *payload = &(cb_arg->payload);
7608 	sli4_res_common_get_profile_list_t *response = (sli4_res_common_get_profile_list_t *)payload->virt;
7609 	int i;
7610 	int num_descriptors;
7611 
7612 	list = ocs_malloc(hw->os, sizeof(ocs_hw_profile_list_t), OCS_M_ZERO);
7613 	list->num_descriptors = response->profile_descriptor_count;
7614 
7615 	num_descriptors = list->num_descriptors;
7616 	if (num_descriptors > OCS_HW_MAX_PROFILES) {
7617 		num_descriptors = OCS_HW_MAX_PROFILES;
7618 	}
7619 
7620 	for (i=0; i<num_descriptors; i++) {
7621 		list->descriptors[i].profile_id = response->profile_descriptor[i].profile_id;
7622 		list->descriptors[i].profile_index = response->profile_descriptor[i].profile_index;
7623 		ocs_strcpy(list->descriptors[i].profile_description, (char *)response->profile_descriptor[i].profile_description);
7624 	}
7625 
7626 	if (cb_arg->cb) {
7627 		cb_arg->cb(status, list, cb_arg->arg);
7628 	} else {
7629 		ocs_free(hw->os, list, sizeof(*list));
7630 	}
7631 
7632 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7633 	ocs_dma_free(hw->os, &cb_arg->payload);
7634 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7635 
7636 	return 0;
7637 }
7638 
7639 /**
7640  * @ingroup io
7641  * @brief  Get a list of available profiles.
7642  * @par Description
7643  * Issues a SLI-4 COMMON_GET_PROFILE_LIST mailbox.  When the
7644  * command completes the provided mgmt callback function is
7645  * called.
7646  *
7647  * @param hw Hardware context.
7648  * @param cb Callback function to be called when the
7649  *      	  command completes.
7650  * @param ul_arg An argument that is passed to the callback
7651  *      	 function.
7652  *
7653  * @return
7654  * - OCS_HW_RTN_SUCCESS on success.
7655  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7656  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7657  *   context.
7658  * - OCS_HW_RTN_ERROR on any other error.
7659  */
7660 ocs_hw_rtn_e
7661 ocs_hw_get_profile_list(ocs_hw_t *hw, ocs_get_profile_list_cb_t cb, void* ul_arg)
7662 {
7663 	uint8_t *mbxdata;
7664 	ocs_hw_get_profile_list_cb_arg_t *cb_arg;
7665 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7666 
7667 	/* Only supported on Skyhawk */
7668 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7669 		return OCS_HW_RTN_ERROR;
7670 	}
7671 
7672 	/* mbxdata holds the header of the command */
7673 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7674 	if (mbxdata == NULL) {
7675 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7676 		return OCS_HW_RTN_NO_MEMORY;
7677 	}
7678 
7679 
7680 	/* cb_arg holds the data that will be passed to the callback on completion */
7681 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_profile_list_cb_arg_t), OCS_M_NOWAIT);
7682 	if (cb_arg == NULL) {
7683 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7684 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7685 		return OCS_HW_RTN_NO_MEMORY;
7686 	}
7687 
7688 	cb_arg->cb = cb;
7689 	cb_arg->arg = ul_arg;
7690 
7691 	/* dma_mem holds the non-embedded portion */
7692 	if (ocs_dma_alloc(hw->os, &cb_arg->payload, sizeof(sli4_res_common_get_profile_list_t), 4)) {
7693 		ocs_log_err(hw->os, "Failed to allocate DMA buffer\n");
7694 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7695 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7696 		return OCS_HW_RTN_NO_MEMORY;
7697 	}
7698 
7699 	if (sli_cmd_common_get_profile_list(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, &cb_arg->payload)) {
7700 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_profile_list_cb, cb_arg);
7701 	}
7702 
7703 	if (rc != OCS_HW_RTN_SUCCESS) {
7704 		ocs_log_test(hw->os, "GET_PROFILE_LIST failed\n");
7705 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7706 		ocs_dma_free(hw->os, &cb_arg->payload);
7707 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_profile_list_cb_arg_t));
7708 	}
7709 
7710 	return rc;
7711 }
7712 
7713 typedef struct ocs_hw_get_active_profile_cb_arg_s {
7714 	ocs_get_active_profile_cb_t cb;
7715 	void *arg;
7716 } ocs_hw_get_active_profile_cb_arg_t;
7717 
7718 /**
7719  * @brief Called for the completion of get_active_profile for a
7720  *        user request.
7721  *
7722  * @param hw Hardware context.
7723  * @param status The status from the MQE
7724  * @param mqe Pointer to mailbox command buffer.
7725  * @param arg Pointer to a callback argument.
7726  *
7727  * @return Returns 0 on success, or a non-zero value on failure.
7728  */
7729 static int32_t
7730 ocs_hw_get_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7731 {
7732 	ocs_hw_get_active_profile_cb_arg_t *cb_arg = arg;
7733 	sli4_cmd_sli_config_t* mbox_rsp = (sli4_cmd_sli_config_t*) mqe;
7734 	sli4_res_common_get_active_profile_t* response = (sli4_res_common_get_active_profile_t*) mbox_rsp->payload.embed;
7735 	uint32_t active_profile;
7736 
7737 	active_profile = response->active_profile_id;
7738 
7739 	if (cb_arg->cb) {
7740 		cb_arg->cb(status, active_profile, cb_arg->arg);
7741 	}
7742 
7743 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7744 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7745 
7746 	return 0;
7747 }
7748 
7749 /**
7750  * @ingroup io
7751  * @brief  Get the currently active profile.
7752  * @par Description
7753  * Issues a SLI-4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
7754  * command completes the provided mgmt callback function is
7755  * called.
7756  *
7757  * @param hw Hardware context.
7758  * @param cb Callback function to be called when the
7759  *	     command completes.
7760  * @param ul_arg An argument that is passed to the callback
7761  *      	 function.
7762  *
7763  * @return
7764  * - OCS_HW_RTN_SUCCESS on success.
7765  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7766  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7767  *   context.
7768  * - OCS_HW_RTN_ERROR on any other error.
7769  */
7770 int32_t
7771 ocs_hw_get_active_profile(ocs_hw_t *hw, ocs_get_active_profile_cb_t cb, void* ul_arg)
7772 {
7773 	uint8_t *mbxdata;
7774 	ocs_hw_get_active_profile_cb_arg_t *cb_arg;
7775 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7776 
7777 	/* Only supported on Skyhawk */
7778 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
7779 		return OCS_HW_RTN_ERROR;
7780 	}
7781 
7782 	/* mbxdata holds the header of the command */
7783 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7784 	if (mbxdata == NULL) {
7785 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7786 		return OCS_HW_RTN_NO_MEMORY;
7787 	}
7788 
7789 	/* cb_arg holds the data that will be passed to the callback on completion */
7790 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_active_profile_cb_arg_t), OCS_M_NOWAIT);
7791 	if (cb_arg == NULL) {
7792 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7793 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7794 		return OCS_HW_RTN_NO_MEMORY;
7795 	}
7796 
7797 	cb_arg->cb = cb;
7798 	cb_arg->arg = ul_arg;
7799 
7800 	if (sli_cmd_common_get_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7801 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_active_profile_cb, cb_arg);
7802 	}
7803 
7804 	if (rc != OCS_HW_RTN_SUCCESS) {
7805 		ocs_log_test(hw->os, "GET_ACTIVE_PROFILE failed\n");
7806 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7807 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
7808 	}
7809 
7810 	return rc;
7811 }
7812 
7813 typedef struct ocs_hw_get_nvparms_cb_arg_s {
7814 	ocs_get_nvparms_cb_t cb;
7815 	void *arg;
7816 } ocs_hw_get_nvparms_cb_arg_t;
7817 
7818 /**
7819  * @brief Called for the completion of get_nvparms for a
7820  *        user request.
7821  *
7822  * @param hw Hardware context.
7823  * @param status The status from the MQE.
7824  * @param mqe Pointer to mailbox command buffer.
7825  * @param arg Pointer to a callback argument.
7826  *
7827  * @return 0 on success, non-zero otherwise
7828  */
7829 static int32_t
7830 ocs_hw_get_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7831 {
7832 	ocs_hw_get_nvparms_cb_arg_t *cb_arg = arg;
7833 	sli4_cmd_read_nvparms_t* mbox_rsp = (sli4_cmd_read_nvparms_t*) mqe;
7834 
7835 	if (cb_arg->cb) {
7836 		cb_arg->cb(status, mbox_rsp->wwpn, mbox_rsp->wwnn, mbox_rsp->hard_alpa,
7837 				mbox_rsp->preferred_d_id, cb_arg->arg);
7838 	}
7839 
7840 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7841 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7842 
7843 	return 0;
7844 }
7845 
7846 /**
7847  * @ingroup io
7848  * @brief  Read non-volatile parms.
7849  * @par Description
7850  * Issues a SLI-4 READ_NVPARMS mailbox. When the
7851  * command completes the provided mgmt callback function is
7852  * called.
7853  *
7854  * @param hw Hardware context.
7855  * @param cb Callback function to be called when the
7856  *	  command completes.
7857  * @param ul_arg An argument that is passed to the callback
7858  *	  function.
7859  *
7860  * @return
7861  * - OCS_HW_RTN_SUCCESS on success.
7862  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7863  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7864  *   context.
7865  * - OCS_HW_RTN_ERROR on any other error.
7866  */
7867 int32_t
7868 ocs_hw_get_nvparms(ocs_hw_t *hw, ocs_get_nvparms_cb_t cb, void* ul_arg)
7869 {
7870 	uint8_t *mbxdata;
7871 	ocs_hw_get_nvparms_cb_arg_t *cb_arg;
7872 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7873 
7874 	/* mbxdata holds the header of the command */
7875 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7876 	if (mbxdata == NULL) {
7877 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7878 		return OCS_HW_RTN_NO_MEMORY;
7879 	}
7880 
7881 	/* cb_arg holds the data that will be passed to the callback on completion */
7882 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_get_nvparms_cb_arg_t), OCS_M_NOWAIT);
7883 	if (cb_arg == NULL) {
7884 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7885 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7886 		return OCS_HW_RTN_NO_MEMORY;
7887 	}
7888 
7889 	cb_arg->cb = cb;
7890 	cb_arg->arg = ul_arg;
7891 
7892 	if (sli_cmd_read_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE)) {
7893 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_get_nvparms_cb, cb_arg);
7894 	}
7895 
7896 	if (rc != OCS_HW_RTN_SUCCESS) {
7897 		ocs_log_test(hw->os, "READ_NVPARMS failed\n");
7898 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7899 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_nvparms_cb_arg_t));
7900 	}
7901 
7902 	return rc;
7903 }
7904 
7905 typedef struct ocs_hw_set_nvparms_cb_arg_s {
7906 	ocs_set_nvparms_cb_t cb;
7907 	void *arg;
7908 } ocs_hw_set_nvparms_cb_arg_t;
7909 
7910 /**
7911  * @brief Called for the completion of set_nvparms for a
7912  *        user request.
7913  *
7914  * @param hw Hardware context.
7915  * @param status The status from the MQE.
7916  * @param mqe Pointer to mailbox command buffer.
7917  * @param arg Pointer to a callback argument.
7918  *
7919  * @return Returns 0 on success, or a non-zero value on failure.
7920  */
7921 static int32_t
7922 ocs_hw_set_nvparms_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
7923 {
7924 	ocs_hw_set_nvparms_cb_arg_t *cb_arg = arg;
7925 
7926 	if (cb_arg->cb) {
7927 		cb_arg->cb(status, cb_arg->arg);
7928 	}
7929 
7930 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
7931 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7932 
7933 	return 0;
7934 }
7935 
7936 /**
7937  * @ingroup io
7938  * @brief  Write non-volatile parms.
7939  * @par Description
7940  * Issues a SLI-4 WRITE_NVPARMS mailbox. When the
7941  * command completes the provided mgmt callback function is
7942  * called.
7943  *
7944  * @param hw Hardware context.
7945  * @param cb Callback function to be called when the
7946  *	  command completes.
7947  * @param wwpn Port's WWPN in big-endian order, or NULL to use default.
7948  * @param wwnn Port's WWNN in big-endian order, or NULL to use default.
7949  * @param hard_alpa A hard AL_PA address setting used during loop
7950  * initialization. If no hard AL_PA is required, set to 0.
7951  * @param preferred_d_id A preferred D_ID address setting
7952  * that may be overridden with the CONFIG_LINK mailbox command.
7953  * If there is no preference, set to 0.
7954  * @param ul_arg An argument that is passed to the callback
7955  *	  function.
7956  *
7957  * @return
7958  * - OCS_HW_RTN_SUCCESS on success.
7959  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
7960  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
7961  *   context.
7962  * - OCS_HW_RTN_ERROR on any other error.
7963  */
7964 int32_t
7965 ocs_hw_set_nvparms(ocs_hw_t *hw, ocs_set_nvparms_cb_t cb, uint8_t *wwpn,
7966 		uint8_t *wwnn, uint8_t hard_alpa, uint32_t preferred_d_id, void* ul_arg)
7967 {
7968 	uint8_t *mbxdata;
7969 	ocs_hw_set_nvparms_cb_arg_t *cb_arg;
7970 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
7971 
7972 	/* mbxdata holds the header of the command */
7973 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
7974 	if (mbxdata == NULL) {
7975 		ocs_log_err(hw->os, "failed to malloc mbox\n");
7976 		return OCS_HW_RTN_NO_MEMORY;
7977 	}
7978 
7979 	/* cb_arg holds the data that will be passed to the callback on completion */
7980 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_nvparms_cb_arg_t), OCS_M_NOWAIT);
7981 	if (cb_arg == NULL) {
7982 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
7983 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7984 		return OCS_HW_RTN_NO_MEMORY;
7985 	}
7986 
7987 	cb_arg->cb = cb;
7988 	cb_arg->arg = ul_arg;
7989 
7990 	if (sli_cmd_write_nvparms(&hw->sli, mbxdata, SLI4_BMBX_SIZE, wwpn, wwnn, hard_alpa, preferred_d_id)) {
7991 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_nvparms_cb, cb_arg);
7992 	}
7993 
7994 	if (rc != OCS_HW_RTN_SUCCESS) {
7995 		ocs_log_test(hw->os, "SET_NVPARMS failed\n");
7996 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
7997 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_nvparms_cb_arg_t));
7998 	}
7999 
8000 	return rc;
8001 }
8002 
8003 
8004 
8005 /**
8006  * @brief Called to obtain the count for the specified type.
8007  *
8008  * @param hw Hardware context.
8009  * @param io_count_type IO count type (inuse, free, wait_free).
8010  *
8011  * @return Returns the number of IOs on the specified list type.
8012  */
8013 uint32_t
8014 ocs_hw_io_get_count(ocs_hw_t *hw, ocs_hw_io_count_type_e io_count_type)
8015 {
8016 	ocs_hw_io_t *io = NULL;
8017 	uint32_t count = 0;
8018 
8019 	ocs_lock(&hw->io_lock);
8020 
8021 	switch (io_count_type) {
8022 	case OCS_HW_IO_INUSE_COUNT :
8023 		ocs_list_foreach(&hw->io_inuse, io) {
8024 			count++;
8025 		}
8026 		break;
8027 	case OCS_HW_IO_FREE_COUNT :
8028 		 ocs_list_foreach(&hw->io_free, io) {
8029 			 count++;
8030 		 }
8031 		 break;
8032 	case OCS_HW_IO_WAIT_FREE_COUNT :
8033 		 ocs_list_foreach(&hw->io_wait_free, io) {
8034 			 count++;
8035 		 }
8036 		 break;
8037 	case OCS_HW_IO_PORT_OWNED_COUNT:
8038 		 ocs_list_foreach(&hw->io_port_owned, io) {
8039 			 count++;
8040 		 }
8041 		 break;
8042 	case OCS_HW_IO_N_TOTAL_IO_COUNT :
8043 		count = hw->config.n_io;
8044 		break;
8045 	}
8046 
8047 	ocs_unlock(&hw->io_lock);
8048 
8049 	return count;
8050 }
8051 
8052 /**
8053  * @brief Called to obtain the count of produced RQs.
8054  *
8055  * @param hw Hardware context.
8056  *
8057  * @return Returns the number of RQs produced.
8058  */
8059 uint32_t
8060 ocs_hw_get_rqes_produced_count(ocs_hw_t *hw)
8061 {
8062 	uint32_t count = 0;
8063 	uint32_t i;
8064 	uint32_t j;
8065 
8066 	for (i = 0; i < hw->hw_rq_count; i++) {
8067 		hw_rq_t *rq = hw->hw_rq[i];
8068 		if (rq->rq_tracker != NULL) {
8069 			for (j = 0; j < rq->entry_count; j++) {
8070 				if (rq->rq_tracker[j] != NULL) {
8071 					count++;
8072 				}
8073 			}
8074 		}
8075 	}
8076 
8077 	return count;
8078 }
8079 
8080 typedef struct ocs_hw_set_active_profile_cb_arg_s {
8081 	ocs_set_active_profile_cb_t cb;
8082 	void *arg;
8083 } ocs_hw_set_active_profile_cb_arg_t;
8084 
8085 /**
8086  * @brief Called for the completion of set_active_profile for a
8087  *        user request.
8088  *
8089  * @param hw Hardware context.
8090  * @param status The status from the MQE
8091  * @param mqe Pointer to mailbox command buffer.
8092  * @param arg Pointer to a callback argument.
8093  *
8094  * @return Returns 0 on success, or a non-zero value on failure.
8095  */
8096 static int32_t
8097 ocs_hw_set_active_profile_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
8098 {
8099 	ocs_hw_set_active_profile_cb_arg_t *cb_arg = arg;
8100 
8101 	if (cb_arg->cb) {
8102 		cb_arg->cb(status, cb_arg->arg);
8103 	}
8104 
8105 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
8106 	ocs_free(hw->os, cb_arg, sizeof(ocs_hw_get_active_profile_cb_arg_t));
8107 
8108 	return 0;
8109 }
8110 
8111 /**
8112  * @ingroup io
8113  * @brief  Set the currently active profile.
8114  * @par Description
8115  * Issues a SLI4 COMMON_GET_ACTIVE_PROFILE mailbox. When the
8116  * command completes the provided mgmt callback function is
8117  * called.
8118  *
8119  * @param hw Hardware context.
8120  * @param profile_id Profile ID to activate.
8121  * @param cb Callback function to be called when the command completes.
8122  * @param ul_arg An argument that is passed to the callback function.
8123  *
8124  * @return
8125  * - OCS_HW_RTN_SUCCESS on success.
8126  * - OCS_HW_RTN_NO_MEMORY if a malloc fails.
8127  * - OCS_HW_RTN_NO_RESOURCES if unable to get a command
8128  *   context.
8129  * - OCS_HW_RTN_ERROR on any other error.
8130  */
8131 int32_t
8132 ocs_hw_set_active_profile(ocs_hw_t *hw, ocs_set_active_profile_cb_t cb, uint32_t profile_id, void* ul_arg)
8133 {
8134 	uint8_t *mbxdata;
8135 	ocs_hw_set_active_profile_cb_arg_t *cb_arg;
8136 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
8137 
8138 	/* Only supported on Skyhawk */
8139 	if (sli_get_if_type(&hw->sli) != SLI4_IF_TYPE_BE3_SKH_PF) {
8140 		return OCS_HW_RTN_ERROR;
8141 	}
8142 
8143 	/* mbxdata holds the header of the command */
8144 	mbxdata = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
8145 	if (mbxdata == NULL) {
8146 		ocs_log_err(hw->os, "failed to malloc mbox\n");
8147 		return OCS_HW_RTN_NO_MEMORY;
8148 	}
8149 
8150 
8151 	/* cb_arg holds the data that will be passed to the callback on completion */
8152 	cb_arg = ocs_malloc(hw->os, sizeof(ocs_hw_set_active_profile_cb_arg_t), OCS_M_NOWAIT);
8153 	if (cb_arg == NULL) {
8154 		ocs_log_err(hw->os, "failed to malloc cb_arg\n");
8155 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8156 		return OCS_HW_RTN_NO_MEMORY;
8157 	}
8158 
8159 	cb_arg->cb = cb;
8160 	cb_arg->arg = ul_arg;
8161 
8162 	if (sli_cmd_common_set_active_profile(&hw->sli, mbxdata, SLI4_BMBX_SIZE, 0, profile_id)) {
8163 		rc = ocs_hw_command(hw, mbxdata, OCS_CMD_NOWAIT, ocs_hw_set_active_profile_cb, cb_arg);
8164 	}
8165 
8166 	if (rc != OCS_HW_RTN_SUCCESS) {
8167 		ocs_log_test(hw->os, "SET_ACTIVE_PROFILE failed\n");
8168 		ocs_free(hw->os, mbxdata, SLI4_BMBX_SIZE);
8169 		ocs_free(hw->os, cb_arg, sizeof(ocs_hw_set_active_profile_cb_arg_t));
8170 	}
8171 
8172 	return rc;
8173 }
8174 
8175 
8176 
8177 /*
8178  * Private functions
8179  */
8180 
8181 /**
8182  * @brief Update the queue hash with the ID and index.
8183  *
8184  * @param hash Pointer to hash table.
8185  * @param id ID that was created.
8186  * @param index The index into the hash object.
8187  */
8188 static void
8189 ocs_hw_queue_hash_add(ocs_queue_hash_t *hash, uint16_t id, uint16_t index)
8190 {
8191 	uint32_t	hash_index = id & (OCS_HW_Q_HASH_SIZE - 1);
8192 
8193 	/*
8194 	 * Since the hash is always bigger than the number of queues, then we
8195 	 * never have to worry about an infinite loop.
8196 	 */
8197 	while(hash[hash_index].in_use) {
8198 		hash_index = (hash_index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8199 	}
8200 
8201 	/* not used, claim the entry */
8202 	hash[hash_index].id = id;
8203 	hash[hash_index].in_use = 1;
8204 	hash[hash_index].index = index;
8205 }
8206 
8207 /**
8208  * @brief Find index given queue ID.
8209  *
8210  * @param hash Pointer to hash table.
8211  * @param id ID to find.
8212  *
8213  * @return Returns the index into the HW cq array or -1 if not found.
8214  */
8215 int32_t
8216 ocs_hw_queue_hash_find(ocs_queue_hash_t *hash, uint16_t id)
8217 {
8218 	int32_t	rc = -1;
8219 	int32_t	index = id & (OCS_HW_Q_HASH_SIZE - 1);
8220 
8221 	/*
8222 	 * Since the hash is always bigger than the maximum number of Qs, then we
8223 	 * never have to worry about an infinite loop. We will always find an
8224 	 * unused entry.
8225 	 */
8226 	do {
8227 		if (hash[index].in_use &&
8228 		    hash[index].id == id) {
8229 			rc = hash[index].index;
8230 		} else {
8231 			index = (index + 1) & (OCS_HW_Q_HASH_SIZE - 1);
8232 		}
8233 	} while(rc == -1 && hash[index].in_use);
8234 
8235 	return rc;
8236 }
8237 
8238 static int32_t
8239 ocs_hw_domain_add(ocs_hw_t *hw, ocs_domain_t *domain)
8240 {
8241 	int32_t		rc = OCS_HW_RTN_ERROR;
8242 	uint16_t	fcfi = UINT16_MAX;
8243 
8244 	if ((hw == NULL) || (domain == NULL)) {
8245 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8246 				hw, domain);
8247 		return OCS_HW_RTN_ERROR;
8248 	}
8249 
8250 	fcfi = domain->fcf_indicator;
8251 
8252 	if (fcfi < SLI4_MAX_FCFI) {
8253 		uint16_t	fcf_index = UINT16_MAX;
8254 
8255 		ocs_log_debug(hw->os, "adding domain %p @ %#x\n",
8256 				domain, fcfi);
8257 		hw->domains[fcfi] = domain;
8258 
8259 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8260 		if (hw->workaround.override_fcfi) {
8261 			if (hw->first_domain_idx < 0) {
8262 				hw->first_domain_idx = fcfi;
8263 			}
8264 		}
8265 
8266 		fcf_index = domain->fcf;
8267 
8268 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8269 			ocs_log_debug(hw->os, "adding map of FCF index %d to FCFI %d\n",
8270 				      fcf_index, fcfi);
8271 			hw->fcf_index_fcfi[fcf_index] = fcfi;
8272 			rc = OCS_HW_RTN_SUCCESS;
8273 		} else {
8274 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8275 				     fcf_index, SLI4_MAX_FCF_INDEX);
8276 			hw->domains[fcfi] = NULL;
8277 		}
8278 	} else {
8279 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8280 				fcfi, SLI4_MAX_FCFI);
8281 	}
8282 
8283 	return rc;
8284 }
8285 
8286 static int32_t
8287 ocs_hw_domain_del(ocs_hw_t *hw, ocs_domain_t *domain)
8288 {
8289 	int32_t		rc = OCS_HW_RTN_ERROR;
8290 	uint16_t	fcfi = UINT16_MAX;
8291 
8292 	if ((hw == NULL) || (domain == NULL)) {
8293 		ocs_log_err(NULL, "bad parameter hw=%p domain=%p\n",
8294 				hw, domain);
8295 		return OCS_HW_RTN_ERROR;
8296 	}
8297 
8298 	fcfi = domain->fcf_indicator;
8299 
8300 	if (fcfi < SLI4_MAX_FCFI) {
8301 		uint16_t	fcf_index = UINT16_MAX;
8302 
8303 		ocs_log_debug(hw->os, "deleting domain %p @ %#x\n",
8304 				domain, fcfi);
8305 
8306 		if (domain != hw->domains[fcfi]) {
8307 			ocs_log_test(hw->os, "provided domain %p does not match stored domain %p\n",
8308 				     domain, hw->domains[fcfi]);
8309 			return OCS_HW_RTN_ERROR;
8310 		}
8311 
8312 		hw->domains[fcfi] = NULL;
8313 
8314 		/* HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB */
8315 		if (hw->workaround.override_fcfi) {
8316 			if (hw->first_domain_idx == fcfi) {
8317 				hw->first_domain_idx = -1;
8318 			}
8319 		}
8320 
8321 		fcf_index = domain->fcf;
8322 
8323 		if (fcf_index < SLI4_MAX_FCF_INDEX) {
8324 			if (hw->fcf_index_fcfi[fcf_index] == fcfi) {
8325 				hw->fcf_index_fcfi[fcf_index] = 0;
8326 				rc = OCS_HW_RTN_SUCCESS;
8327 			} else {
8328 				ocs_log_test(hw->os, "indexed FCFI %#x doesn't match provided %#x @ %d\n",
8329 					     hw->fcf_index_fcfi[fcf_index], fcfi, fcf_index);
8330 			}
8331 		} else {
8332 			ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8333 				     fcf_index, SLI4_MAX_FCF_INDEX);
8334 		}
8335 	} else {
8336 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8337 				fcfi, SLI4_MAX_FCFI);
8338 	}
8339 
8340 	return rc;
8341 }
8342 
8343 ocs_domain_t *
8344 ocs_hw_domain_get(ocs_hw_t *hw, uint16_t fcfi)
8345 {
8346 
8347 	if (hw == NULL) {
8348 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8349 		return NULL;
8350 	}
8351 
8352 	if (fcfi < SLI4_MAX_FCFI) {
8353 		return hw->domains[fcfi];
8354 	} else {
8355 		ocs_log_test(hw->os, "FCFI %#x out of range (max %#x)\n",
8356 				fcfi, SLI4_MAX_FCFI);
8357 		return NULL;
8358 	}
8359 }
8360 
8361 static ocs_domain_t *
8362 ocs_hw_domain_get_indexed(ocs_hw_t *hw, uint16_t fcf_index)
8363 {
8364 
8365 	if (hw == NULL) {
8366 		ocs_log_err(NULL, "bad parameter hw=%p\n", hw);
8367 		return NULL;
8368 	}
8369 
8370 	if (fcf_index < SLI4_MAX_FCF_INDEX) {
8371 		return ocs_hw_domain_get(hw, hw->fcf_index_fcfi[fcf_index]);
8372 	} else {
8373 		ocs_log_test(hw->os, "FCF index %d out of range (max %d)\n",
8374 			     fcf_index, SLI4_MAX_FCF_INDEX);
8375 		return NULL;
8376 	}
8377 }
8378 
8379 /**
8380  * @brief Quaratine an IO by taking a reference count and adding it to the
8381  *        quarantine list. When the IO is popped from the list then the
8382  *        count is released and the IO MAY be freed depending on whether
8383  *        it is still referenced by the IO.
8384  *
8385  *        @n @b Note: BZ 160124 - If this is a target write or an initiator read using
8386  *        DIF, then we must add the XRI to a quarantine list until we receive
8387  *        4 more completions of this same type.
8388  *
8389  * @param hw Hardware context.
8390  * @param wq Pointer to the WQ associated with the IO object to quarantine.
8391  * @param io Pointer to the io object to quarantine.
8392  */
8393 static void
8394 ocs_hw_io_quarantine(ocs_hw_t *hw, hw_wq_t *wq, ocs_hw_io_t *io)
8395 {
8396 	ocs_quarantine_info_t *q_info = &wq->quarantine_info;
8397 	uint32_t	index;
8398 	ocs_hw_io_t	*free_io = NULL;
8399 
8400 	/* return if the QX bit was clear */
8401 	if (!io->quarantine) {
8402 		return;
8403 	}
8404 
8405 	/* increment the IO refcount to prevent it from being freed before the quarantine is over */
8406 	if (ocs_ref_get_unless_zero(&io->ref) == 0) {
8407 		/* command no longer active */
8408 		ocs_log_debug(hw ? hw->os : NULL,
8409 			      "io not active xri=0x%x tag=0x%x\n",
8410 			      io->indicator, io->reqtag);
8411 		return;
8412 	}
8413 
8414 	sli_queue_lock(wq->queue);
8415 		index = q_info->quarantine_index;
8416 		free_io = q_info->quarantine_ios[index];
8417 		q_info->quarantine_ios[index] = io;
8418 		q_info->quarantine_index = (index + 1) % OCS_HW_QUARANTINE_QUEUE_DEPTH;
8419 	sli_queue_unlock(wq->queue);
8420 
8421 	if (free_io != NULL) {
8422 		ocs_ref_put(&free_io->ref); /* ocs_ref_get(): same function */
8423 	}
8424 }
8425 
8426 /**
8427  * @brief Process entries on the given completion queue.
8428  *
8429  * @param hw Hardware context.
8430  * @param cq Pointer to the HW completion queue object.
8431  *
8432  * @return None.
8433  */
8434 void
8435 ocs_hw_cq_process(ocs_hw_t *hw, hw_cq_t *cq)
8436 {
8437 	uint8_t		cqe[sizeof(sli4_mcqe_t)];
8438 	uint16_t	rid = UINT16_MAX;
8439 	sli4_qentry_e	ctype;		/* completion type */
8440 	int32_t		status;
8441 	uint32_t	n_processed = 0;
8442 	time_t		tstart;
8443 	time_t		telapsed;
8444 
8445 	tstart = ocs_msectime();
8446 
8447 	while (!sli_queue_read(&hw->sli, cq->queue, cqe)) {
8448 		status = sli_cq_parse(&hw->sli, cq->queue, cqe, &ctype, &rid);
8449 		/*
8450 		 * The sign of status is significant. If status is:
8451 		 * == 0 : call completed correctly and the CQE indicated success
8452 		 *  > 0 : call completed correctly and the CQE indicated an error
8453 		 *  < 0 : call failed and no information is available about the CQE
8454 		 */
8455 		if (status < 0) {
8456 			if (status == -2) {
8457 				/* Notification that an entry was consumed, but not completed */
8458 				continue;
8459 			}
8460 
8461 			break;
8462 		}
8463 
8464 		switch (ctype) {
8465 		case SLI_QENTRY_ASYNC:
8466 			CPUTRACE("async");
8467 			sli_cqe_async(&hw->sli, cqe);
8468 			break;
8469 		case SLI_QENTRY_MQ:
8470 			/*
8471 			 * Process MQ entry. Note there is no way to determine
8472 			 * the MQ_ID from the completion entry.
8473 			 */
8474 			CPUTRACE("mq");
8475 			ocs_hw_mq_process(hw, status, hw->mq);
8476 			break;
8477 		case SLI_QENTRY_OPT_WRITE_CMD:
8478 			ocs_hw_rqpair_process_auto_xfr_rdy_cmd(hw, cq, cqe);
8479 			break;
8480 		case SLI_QENTRY_OPT_WRITE_DATA:
8481 			ocs_hw_rqpair_process_auto_xfr_rdy_data(hw, cq, cqe);
8482 			break;
8483 		case SLI_QENTRY_WQ:
8484 			CPUTRACE("wq");
8485 			ocs_hw_wq_process(hw, cq, cqe, status, rid);
8486 			break;
8487 		case SLI_QENTRY_WQ_RELEASE: {
8488 			uint32_t wq_id = rid;
8489 			uint32_t index = ocs_hw_queue_hash_find(hw->wq_hash, wq_id);
8490 			hw_wq_t *wq = hw->hw_wq[index];
8491 
8492 			/* Submit any HW IOs that are on the WQ pending list */
8493 			hw_wq_submit_pending(wq, wq->wqec_set_count);
8494 
8495 			break;
8496 		}
8497 
8498 		case SLI_QENTRY_RQ:
8499 			CPUTRACE("rq");
8500 			ocs_hw_rqpair_process_rq(hw, cq, cqe);
8501 			break;
8502 		case SLI_QENTRY_XABT: {
8503 			CPUTRACE("xabt");
8504 			ocs_hw_xabt_process(hw, cq, cqe, rid);
8505 			break;
8506 
8507 		}
8508 		default:
8509 			ocs_log_test(hw->os, "unhandled ctype=%#x rid=%#x\n", ctype, rid);
8510 			break;
8511 		}
8512 
8513 		n_processed++;
8514 		if (n_processed == cq->queue->proc_limit) {
8515 			break;
8516 		}
8517 
8518 		if (cq->queue->n_posted >= (cq->queue->posted_limit)) {
8519 			sli_queue_arm(&hw->sli, cq->queue, FALSE);
8520 		}
8521 	}
8522 
8523 	sli_queue_arm(&hw->sli, cq->queue, TRUE);
8524 
8525 	if (n_processed > cq->queue->max_num_processed) {
8526 		cq->queue->max_num_processed = n_processed;
8527 	}
8528 	telapsed = ocs_msectime() - tstart;
8529 	if (telapsed > cq->queue->max_process_time) {
8530 		cq->queue->max_process_time = telapsed;
8531 	}
8532 }
8533 
8534 /**
8535  * @brief Process WQ completion queue entries.
8536  *
8537  * @param hw Hardware context.
8538  * @param cq Pointer to the HW completion queue object.
8539  * @param cqe Pointer to WQ completion queue.
8540  * @param status Completion status.
8541  * @param rid Resource ID (IO tag).
8542  *
8543  * @return none
8544  */
8545 void
8546 ocs_hw_wq_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, int32_t status, uint16_t rid)
8547 {
8548 	hw_wq_callback_t *wqcb;
8549 
8550 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_WQ, (void *)cqe, ((sli4_fc_wcqe_t *)cqe)->status, cq->queue->id,
8551 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8552 
8553 	if(rid == OCS_HW_REQUE_XRI_REGTAG) {
8554 		if(status) {
8555 			ocs_log_err(hw->os, "reque xri failed, status = %d \n", status);
8556 		}
8557 		return;
8558 	}
8559 
8560 	wqcb = ocs_hw_reqtag_get_instance(hw, rid);
8561 	if (wqcb == NULL) {
8562 		ocs_log_err(hw->os, "invalid request tag: x%x\n", rid);
8563 		return;
8564 	}
8565 
8566 	if (wqcb->callback == NULL) {
8567 		ocs_log_err(hw->os, "wqcb callback is NULL\n");
8568 		return;
8569 	}
8570 
8571 	(*wqcb->callback)(wqcb->arg, cqe, status);
8572 }
8573 
8574 /**
8575  * @brief Process WQ completions for IO requests
8576  *
8577  * @param arg Generic callback argument
8578  * @param cqe Pointer to completion queue entry
8579  * @param status Completion status
8580  *
8581  * @par Description
8582  * @n @b Note:  Regarding io->reqtag, the reqtag is assigned once when HW IOs are initialized
8583  * in ocs_hw_setup_io(), and don't need to be returned to the hw->wq_reqtag_pool.
8584  *
8585  * @return None.
8586  */
8587 static void
8588 ocs_hw_wq_process_io(void *arg, uint8_t *cqe, int32_t status)
8589 {
8590 	ocs_hw_io_t *io = arg;
8591 	ocs_hw_t *hw = io->hw;
8592 	sli4_fc_wcqe_t *wcqe = (void *)cqe;
8593 	uint32_t	len = 0;
8594 	uint32_t ext = 0;
8595 	uint8_t out_of_order_axr_cmd = 0;
8596 	uint8_t out_of_order_axr_data = 0;
8597 	uint8_t lock_taken = 0;
8598 #if defined(OCS_DISC_SPIN_DELAY)
8599 	uint32_t delay = 0;
8600 	char prop_buf[32];
8601 #endif
8602 
8603 	/*
8604 	 * For the primary IO, this will also be used for the
8605 	 * response. So it is important to only set/clear this
8606 	 * flag on the first data phase of the IO because
8607 	 * subsequent phases will be done on the secondary XRI.
8608 	 */
8609 	if (io->quarantine && io->quarantine_first_phase) {
8610 		io->quarantine = (wcqe->qx == 1);
8611 		ocs_hw_io_quarantine(hw, io->wq, io);
8612 	}
8613 	io->quarantine_first_phase = FALSE;
8614 
8615 	/* BZ 161832 - free secondary HW IO */
8616 	if (io->sec_hio != NULL &&
8617 	    io->sec_hio->quarantine) {
8618 		/*
8619 		 * If the quarantine flag is set on the
8620 		 * IO, then set it on the secondary IO
8621 		 * based on the quarantine XRI (QX) bit
8622 		 * sent by the FW.
8623 		 */
8624 		io->sec_hio->quarantine = (wcqe->qx == 1);
8625 		/* use the primary io->wq because it is not set on the secondary IO. */
8626 		ocs_hw_io_quarantine(hw, io->wq, io->sec_hio);
8627 	}
8628 
8629 	ocs_hw_remove_io_timed_wqe(hw, io);
8630 
8631 	/* clear xbusy flag if WCQE[XB] is clear */
8632 	if (io->xbusy && wcqe->xb == 0) {
8633 		io->xbusy = FALSE;
8634 	}
8635 
8636 	/* get extended CQE status */
8637 	switch (io->type) {
8638 	case OCS_HW_BLS_ACC:
8639 	case OCS_HW_BLS_ACC_SID:
8640 		break;
8641 	case OCS_HW_ELS_REQ:
8642 		sli_fc_els_did(&hw->sli, cqe, &ext);
8643 		len = sli_fc_response_length(&hw->sli, cqe);
8644 		break;
8645 	case OCS_HW_ELS_RSP:
8646 	case OCS_HW_ELS_RSP_SID:
8647 	case OCS_HW_FC_CT_RSP:
8648 		break;
8649 	case OCS_HW_FC_CT:
8650 		len = sli_fc_response_length(&hw->sli, cqe);
8651 		break;
8652 	case OCS_HW_IO_TARGET_WRITE:
8653 		len = sli_fc_io_length(&hw->sli, cqe);
8654 #if defined(OCS_DISC_SPIN_DELAY)
8655 		if (ocs_get_property("disk_spin_delay", prop_buf, sizeof(prop_buf)) == 0) {
8656 			delay = ocs_strtoul(prop_buf, 0, 0);
8657 			ocs_udelay(delay);
8658 		}
8659 #endif
8660 		break;
8661 	case OCS_HW_IO_TARGET_READ:
8662 		len = sli_fc_io_length(&hw->sli, cqe);
8663 		/*
8664 		 * if_type == 2 seems to return 0 "total length placed" on
8665 		 * FCP_TSEND64_WQE completions. If this appears to happen,
8666 		 * use the CTIO data transfer length instead.
8667 		 */
8668 		if (hw->workaround.retain_tsend_io_length && !len && !status) {
8669 			len = io->length;
8670 		}
8671 
8672 		break;
8673 	case OCS_HW_IO_TARGET_RSP:
8674 		if(io->is_port_owned) {
8675 			ocs_lock(&io->axr_lock);
8676 			lock_taken = 1;
8677 			if(io->axr_buf->call_axr_cmd) {
8678 				out_of_order_axr_cmd = 1;
8679 			}
8680 			if(io->axr_buf->call_axr_data) {
8681 				out_of_order_axr_data = 1;
8682 			}
8683 		}
8684 		break;
8685 	case OCS_HW_IO_INITIATOR_READ:
8686 		len = sli_fc_io_length(&hw->sli, cqe);
8687 		break;
8688 	case OCS_HW_IO_INITIATOR_WRITE:
8689 		len = sli_fc_io_length(&hw->sli, cqe);
8690 		break;
8691 	case OCS_HW_IO_INITIATOR_NODATA:
8692 		break;
8693 	case OCS_HW_IO_DNRX_REQUEUE:
8694 		/* release the count for re-posting the buffer */
8695 		//ocs_hw_io_free(hw, io);
8696 		break;
8697 	default:
8698 		ocs_log_test(hw->os, "XXX unhandled io type %#x for XRI 0x%x\n",
8699 			     io->type, io->indicator);
8700 		break;
8701 	}
8702 	if (status) {
8703 		ext = sli_fc_ext_status(&hw->sli, cqe);
8704 		/* Emulate IAAB=0 for initiator WQEs only; i.e. automatically
8705 		 * abort exchange if an error occurred and exchange is still busy.
8706 		 */
8707 		if (hw->config.i_only_aab &&
8708 		    (ocs_hw_iotype_is_originator(io->type)) &&
8709 		    (ocs_hw_wcqe_abort_needed(status, ext, wcqe->xb))) {
8710 			ocs_hw_rtn_e rc;
8711 
8712 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n",
8713 				      io->indicator, io->reqtag);
8714 			/*
8715 			 * Because the initiator will not issue another IO phase, then it is OK to to issue the
8716 			 * callback on the abort completion, but for consistency with the target, wait for the
8717 			 * XRI_ABORTED CQE to issue the IO callback.
8718 			 */
8719 			rc = ocs_hw_io_abort(hw, io, TRUE, NULL, NULL);
8720 
8721 			if (rc == OCS_HW_RTN_SUCCESS) {
8722 				/* latch status to return after abort is complete */
8723 				io->status_saved = 1;
8724 				io->saved_status = status;
8725 				io->saved_ext = ext;
8726 				io->saved_len = len;
8727 				goto exit_ocs_hw_wq_process_io;
8728 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8729 				/*
8730 				 * Already being aborted by someone else (ABTS
8731 				 * perhaps). Just fall through and return original
8732 				 * error.
8733 				 */
8734 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8735 					      io->indicator, io->reqtag);
8736 
8737 			} else {
8738 				/* Failed to abort for some other reason, log error */
8739 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8740 					     io->indicator, io->reqtag, rc);
8741 			}
8742 		}
8743 
8744 		/*
8745 		 * If we're not an originator IO, and XB is set, then issue abort for the IO from within the HW
8746 		 */
8747 		if ( (! ocs_hw_iotype_is_originator(io->type)) && wcqe->xb) {
8748 			ocs_hw_rtn_e rc;
8749 
8750 			ocs_log_debug(hw->os, "aborting xri=%#x tag=%#x\n", io->indicator, io->reqtag);
8751 
8752 			/*
8753 			 * Because targets may send a response when the IO completes using the same XRI, we must
8754 			 * wait for the XRI_ABORTED CQE to issue the IO callback
8755 			 */
8756 			rc = ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
8757 			if (rc == OCS_HW_RTN_SUCCESS) {
8758 				/* latch status to return after abort is complete */
8759 				io->status_saved = 1;
8760 				io->saved_status = status;
8761 				io->saved_ext = ext;
8762 				io->saved_len = len;
8763 				goto exit_ocs_hw_wq_process_io;
8764 			} else if (rc == OCS_HW_RTN_IO_ABORT_IN_PROGRESS) {
8765 				/*
8766 				 * Already being aborted by someone else (ABTS
8767 				 * perhaps). Just fall through and return original
8768 				 * error.
8769 				 */
8770 				ocs_log_debug(hw->os, "abort in progress xri=%#x tag=%#x\n",
8771 					      io->indicator, io->reqtag);
8772 
8773 			} else {
8774 				/* Failed to abort for some other reason, log error */
8775 				ocs_log_test(hw->os, "Failed to abort xri=%#x tag=%#x rc=%d\n",
8776 					     io->indicator, io->reqtag, rc);
8777 			}
8778 		}
8779 	}
8780 	/* BZ 161832 - free secondary HW IO */
8781 	if (io->sec_hio != NULL) {
8782 		ocs_hw_io_free(hw, io->sec_hio);
8783 		io->sec_hio = NULL;
8784 	}
8785 
8786 	if (io->done != NULL) {
8787 		ocs_hw_done_t  done = io->done;
8788 		void		*arg = io->arg;
8789 
8790 		io->done = NULL;
8791 
8792 		if (io->status_saved) {
8793 			/* use latched status if exists */
8794 			status = io->saved_status;
8795 			len = io->saved_len;
8796 			ext = io->saved_ext;
8797 			io->status_saved = 0;
8798 		}
8799 
8800 		/* Restore default SGL */
8801 		ocs_hw_io_restore_sgl(hw, io);
8802 		done(io, io->rnode, len, status, ext, arg);
8803 	}
8804 
8805 	if(out_of_order_axr_cmd) {
8806 		/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8807 		if (hw->config.bounce) {
8808 			fc_header_t *hdr = io->axr_buf->cmd_seq->header->dma.virt;
8809 			uint32_t s_id = fc_be24toh(hdr->s_id);
8810 			uint32_t d_id = fc_be24toh(hdr->d_id);
8811 			uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8812 			if (hw->callback.bounce != NULL) {
8813 				(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, io->axr_buf->cmd_seq, s_id, d_id, ox_id);
8814 			}
8815 		}else {
8816 			hw->callback.unsolicited(hw->args.unsolicited, io->axr_buf->cmd_seq);
8817 		}
8818 
8819 		if(out_of_order_axr_data) {
8820 			/* bounce enabled, single RQ, we snoop the ox_id to choose the cpuidx */
8821 			if (hw->config.bounce) {
8822 				fc_header_t *hdr = io->axr_buf->seq.header->dma.virt;
8823 				uint32_t s_id = fc_be24toh(hdr->s_id);
8824 				uint32_t d_id = fc_be24toh(hdr->d_id);
8825 				uint32_t ox_id =  ocs_be16toh(hdr->ox_id);
8826 				if (hw->callback.bounce != NULL) {
8827 					(*hw->callback.bounce)(ocs_hw_unsol_process_bounce, &io->axr_buf->seq, s_id, d_id, ox_id);
8828 				}
8829 			}else {
8830 				hw->callback.unsolicited(hw->args.unsolicited, &io->axr_buf->seq);
8831 			}
8832 		}
8833 	}
8834 
8835 exit_ocs_hw_wq_process_io:
8836 	if(lock_taken) {
8837 		ocs_unlock(&io->axr_lock);
8838 	}
8839 }
8840 
8841 /**
8842  * @brief Process WQ completions for abort requests.
8843  *
8844  * @param arg Generic callback argument.
8845  * @param cqe Pointer to completion queue entry.
8846  * @param status Completion status.
8847  *
8848  * @return None.
8849  */
8850 static void
8851 ocs_hw_wq_process_abort(void *arg, uint8_t *cqe, int32_t status)
8852 {
8853 	ocs_hw_io_t *io = arg;
8854 	ocs_hw_t *hw = io->hw;
8855 	uint32_t ext = 0;
8856 	uint32_t len = 0;
8857 	hw_wq_callback_t *wqcb;
8858 
8859 	/*
8860 	 * For IOs that were aborted internally, we may need to issue the callback here depending
8861 	 * on whether a XRI_ABORTED CQE is expected ot not. If the status is Local Reject/No XRI, then
8862 	 * issue the callback now.
8863 	*/
8864 	ext = sli_fc_ext_status(&hw->sli, cqe);
8865 	if (status == SLI4_FC_WCQE_STATUS_LOCAL_REJECT &&
8866 	    ext == SLI4_FC_LOCAL_REJECT_NO_XRI &&
8867 		io->done != NULL) {
8868 		ocs_hw_done_t  done = io->done;
8869 		void		*arg = io->arg;
8870 
8871 		io->done = NULL;
8872 
8873 		/*
8874 		 * Use latched status as this is always saved for an internal abort
8875 		 *
8876 		 * Note: We wont have both a done and abort_done function, so don't worry about
8877 		 *       clobbering the len, status and ext fields.
8878 		 */
8879 		status = io->saved_status;
8880 		len = io->saved_len;
8881 		ext = io->saved_ext;
8882 		io->status_saved = 0;
8883 		done(io, io->rnode, len, status, ext, arg);
8884 	}
8885 
8886 	if (io->abort_done != NULL) {
8887 		ocs_hw_done_t  done = io->abort_done;
8888 		void		*arg = io->abort_arg;
8889 
8890 		io->abort_done = NULL;
8891 
8892 		done(io, io->rnode, len, status, ext, arg);
8893 	}
8894 	ocs_lock(&hw->io_abort_lock);
8895 		/* clear abort bit to indicate abort is complete */
8896 		io->abort_in_progress = 0;
8897 	ocs_unlock(&hw->io_abort_lock);
8898 
8899 	/* Free the WQ callback */
8900 	ocs_hw_assert(io->abort_reqtag != UINT32_MAX);
8901 	wqcb = ocs_hw_reqtag_get_instance(hw, io->abort_reqtag);
8902 	ocs_hw_reqtag_free(hw, wqcb);
8903 
8904 	/*
8905 	 * Call ocs_hw_io_free() because this releases the WQ reservation as
8906 	 * well as doing the refcount put. Don't duplicate the code here.
8907 	 */
8908 	(void)ocs_hw_io_free(hw, io);
8909 }
8910 
8911 /**
8912  * @brief Process XABT completions
8913  *
8914  * @param hw Hardware context.
8915  * @param cq Pointer to the HW completion queue object.
8916  * @param cqe Pointer to WQ completion queue.
8917  * @param rid Resource ID (IO tag).
8918  *
8919  *
8920  * @return None.
8921  */
8922 void
8923 ocs_hw_xabt_process(ocs_hw_t *hw, hw_cq_t *cq, uint8_t *cqe, uint16_t rid)
8924 {
8925 	/* search IOs wait free list */
8926 	ocs_hw_io_t *io = NULL;
8927 
8928 	io = ocs_hw_io_lookup(hw, rid);
8929 
8930 	ocs_queue_history_cqe(&hw->q_hist, SLI_QENTRY_XABT, (void *)cqe, 0, cq->queue->id,
8931 			      ((cq->queue->index - 1) & (cq->queue->length - 1)));
8932 	if (io == NULL) {
8933 		/* IO lookup failure should never happen */
8934 		ocs_log_err(hw->os, "Error: xabt io lookup failed rid=%#x\n", rid);
8935 		return;
8936 	}
8937 
8938 	if (!io->xbusy) {
8939 		ocs_log_debug(hw->os, "xabt io not busy rid=%#x\n", rid);
8940 	} else {
8941 		/* mark IO as no longer busy */
8942 		io->xbusy = FALSE;
8943 	}
8944 
8945        if (io->is_port_owned) {
8946                ocs_lock(&hw->io_lock);
8947                /* Take reference so that below callback will not free io before reque */
8948                ocs_ref_get(&io->ref);
8949                ocs_unlock(&hw->io_lock);
8950        }
8951 
8952 
8953 
8954 	/* For IOs that were aborted internally, we need to issue any pending callback here. */
8955 	if (io->done != NULL) {
8956 		ocs_hw_done_t  done = io->done;
8957 		void		*arg = io->arg;
8958 
8959 		/* Use latched status as this is always saved for an internal abort */
8960 		int32_t status = io->saved_status;
8961 		uint32_t len = io->saved_len;
8962 		uint32_t ext = io->saved_ext;
8963 
8964 		io->done = NULL;
8965 		io->status_saved = 0;
8966 
8967 		done(io, io->rnode, len, status, ext, arg);
8968 	}
8969 
8970 	/* Check to see if this is a port owned XRI */
8971 	if (io->is_port_owned) {
8972 		ocs_lock(&hw->io_lock);
8973 		ocs_hw_reque_xri(hw, io);
8974 		ocs_unlock(&hw->io_lock);
8975 		/* Not hanlding reque xri completion, free io */
8976 		ocs_hw_io_free(hw, io);
8977 		return;
8978 	}
8979 
8980 	ocs_lock(&hw->io_lock);
8981 		if ((io->state == OCS_HW_IO_STATE_INUSE) || (io->state == OCS_HW_IO_STATE_WAIT_FREE)) {
8982 			/* if on wait_free list, caller has already freed IO;
8983 			 * remove from wait_free list and add to free list.
8984 			 * if on in-use list, already marked as no longer busy;
8985 			 * just leave there and wait for caller to free.
8986 			 */
8987 			if (io->state == OCS_HW_IO_STATE_WAIT_FREE) {
8988 				io->state = OCS_HW_IO_STATE_FREE;
8989 				ocs_list_remove(&hw->io_wait_free, io);
8990 				ocs_hw_io_free_move_correct_list(hw, io);
8991 			}
8992 		}
8993 	ocs_unlock(&hw->io_lock);
8994 }
8995 
8996 /**
8997  * @brief Adjust the number of WQs and CQs within the HW.
8998  *
8999  * @par Description
9000  * Calculates the number of WQs and associated CQs needed in the HW based on
9001  * the number of IOs. Calculates the starting CQ index for each WQ, RQ and
9002  * MQ.
9003  *
9004  * @param hw Hardware context allocated by the caller.
9005  */
9006 static void
9007 ocs_hw_adjust_wqs(ocs_hw_t *hw)
9008 {
9009 	uint32_t max_wq_num = sli_get_max_queue(&hw->sli, SLI_QTYPE_WQ);
9010 	uint32_t max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ];
9011 	uint32_t max_cq_entries = hw->num_qentries[SLI_QTYPE_CQ];
9012 
9013 	/*
9014 	 * possibly adjust the the size of the WQs so that the CQ is twice as
9015 	 * big as the WQ to allow for 2 completions per IO. This allows us to
9016 	 * handle multi-phase as well as aborts.
9017 	 */
9018 	if (max_cq_entries < max_wq_entries * 2) {
9019 		max_wq_entries = hw->num_qentries[SLI_QTYPE_WQ] = max_cq_entries / 2;
9020 	}
9021 
9022 	/*
9023 	 * Calculate the number of WQs to use base on the number of IOs.
9024 	 *
9025 	 * Note: We need to reserve room for aborts which must be sent down
9026 	 *       the same WQ as the IO. So we allocate enough WQ space to
9027 	 *       handle 2 times the number of IOs. Half of the space will be
9028 	 *       used for normal IOs and the other hwf is reserved for aborts.
9029 	 */
9030 	hw->config.n_wq = ((hw->config.n_io * 2) + (max_wq_entries - 1)) / max_wq_entries;
9031 
9032 	/*
9033 	 * For performance reasons, it is best to use use a minimum of 4 WQs
9034 	 * for BE3 and Skyhawk.
9035 	 */
9036 	if (hw->config.n_wq < 4 &&
9037 	    SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
9038 		hw->config.n_wq = 4;
9039 	}
9040 
9041 	/*
9042 	 * For dual-chute support, we need to have at least one WQ per chute.
9043 	 */
9044 	if (hw->config.n_wq < 2 &&
9045 	    ocs_hw_get_num_chutes(hw) > 1) {
9046 		hw->config.n_wq = 2;
9047 	}
9048 
9049 	/* make sure we haven't exceeded the max supported in the HW */
9050 	if (hw->config.n_wq > OCS_HW_MAX_NUM_WQ) {
9051 		hw->config.n_wq = OCS_HW_MAX_NUM_WQ;
9052 	}
9053 
9054 	/* make sure we haven't exceeded the chip maximum */
9055 	if (hw->config.n_wq > max_wq_num) {
9056 		hw->config.n_wq = max_wq_num;
9057 	}
9058 
9059 	/*
9060 	 * Using Queue Topology string, we divide by number of chutes
9061 	 */
9062 	hw->config.n_wq /= ocs_hw_get_num_chutes(hw);
9063 }
9064 
9065 static int32_t
9066 ocs_hw_command_process(ocs_hw_t *hw, int32_t status, uint8_t *mqe, size_t size)
9067 {
9068 	ocs_command_ctx_t *ctx = NULL;
9069 
9070 	ocs_lock(&hw->cmd_lock);
9071 		if (NULL == (ctx = ocs_list_remove_head(&hw->cmd_head))) {
9072 			ocs_log_err(hw->os, "XXX no command context?!?\n");
9073 			ocs_unlock(&hw->cmd_lock);
9074 			return -1;
9075 		}
9076 
9077 		hw->cmd_head_count--;
9078 
9079 		/* Post any pending requests */
9080 		ocs_hw_cmd_submit_pending(hw);
9081 
9082 	ocs_unlock(&hw->cmd_lock);
9083 
9084 	if (ctx->cb) {
9085 		if (ctx->buf) {
9086 			ocs_memcpy(ctx->buf, mqe, size);
9087 		}
9088 		ctx->cb(hw, status, ctx->buf, ctx->arg);
9089 	}
9090 
9091 	ocs_memset(ctx, 0, sizeof(ocs_command_ctx_t));
9092 	ocs_free(hw->os, ctx, sizeof(ocs_command_ctx_t));
9093 
9094 	return 0;
9095 }
9096 
9097 
9098 
9099 
9100 /**
9101  * @brief Process entries on the given mailbox queue.
9102  *
9103  * @param hw Hardware context.
9104  * @param status CQE status.
9105  * @param mq Pointer to the mailbox queue object.
9106  *
9107  * @return Returns 0 on success, or a non-zero value on failure.
9108  */
9109 static int32_t
9110 ocs_hw_mq_process(ocs_hw_t *hw, int32_t status, sli4_queue_t *mq)
9111 {
9112 	uint8_t		mqe[SLI4_BMBX_SIZE];
9113 
9114 	if (!sli_queue_read(&hw->sli, mq, mqe)) {
9115 		ocs_hw_command_process(hw, status, mqe, mq->size);
9116 	}
9117 
9118 	return 0;
9119 }
9120 
9121 /**
9122  * @brief Read a FCF table entry.
9123  *
9124  * @param hw Hardware context.
9125  * @param index Table index to read. Use SLI4_FCOE_FCF_TABLE_FIRST for the first
9126  * read and the next_index field from the FCOE_READ_FCF_TABLE command
9127  * for subsequent reads.
9128  *
9129  * @return Returns 0 on success, or a non-zero value on failure.
9130  */
9131 static ocs_hw_rtn_e
9132 ocs_hw_read_fcf(ocs_hw_t *hw, uint32_t index)
9133 {
9134 	uint8_t		*buf = NULL;
9135 	int32_t		rc = OCS_HW_RTN_ERROR;
9136 
9137 	buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9138 	if (!buf) {
9139 		ocs_log_err(hw->os, "no buffer for command\n");
9140 		return OCS_HW_RTN_NO_MEMORY;
9141 	}
9142 
9143 	if (sli_cmd_fcoe_read_fcf_table(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->fcf_dmem,
9144 			index)) {
9145 		rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, ocs_hw_cb_read_fcf, &hw->fcf_dmem);
9146 	}
9147 
9148 	if (rc != OCS_HW_RTN_SUCCESS) {
9149 		ocs_log_test(hw->os, "FCOE_READ_FCF_TABLE failed\n");
9150 		ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9151 	}
9152 
9153 	return rc;
9154 }
9155 
9156 /**
9157  * @brief Callback function for the FCOE_READ_FCF_TABLE command.
9158  *
9159  * @par Description
9160  * Note that the caller has allocated:
9161  *  - DMA memory to hold the table contents
9162  *  - DMA memory structure
9163  *  - Command/results buffer
9164  *  .
9165  * Each of these must be freed here.
9166  *
9167  * @param hw Hardware context.
9168  * @param status Hardware status.
9169  * @param mqe Pointer to the mailbox command/results buffer.
9170  * @param arg Pointer to the DMA memory structure.
9171  *
9172  * @return Returns 0 on success, or a non-zero value on failure.
9173  */
9174 static int32_t
9175 ocs_hw_cb_read_fcf(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9176 {
9177 	ocs_dma_t	*dma = arg;
9178 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9179 
9180 	if (status || hdr->status) {
9181 		ocs_log_test(hw->os, "bad status cqe=%#x mqe=%#x\n",
9182 				status, hdr->status);
9183 	} else if (dma->virt) {
9184 		sli4_res_fcoe_read_fcf_table_t *read_fcf = dma->virt;
9185 
9186 		/* if FC or FCOE and FCF entry valid, process it */
9187 		if (read_fcf->fcf_entry.fc ||
9188 				(read_fcf->fcf_entry.val && !read_fcf->fcf_entry.sol)) {
9189 			if (hw->callback.domain != NULL) {
9190 				ocs_domain_record_t drec = {0};
9191 
9192 				if (read_fcf->fcf_entry.fc) {
9193 					/*
9194 					 * This is a pseudo FCF entry. Create a domain
9195 					 * record based on the read topology information
9196 					 */
9197 					drec.speed = hw->link.speed;
9198 					drec.fc_id = hw->link.fc_id;
9199 					drec.is_fc = TRUE;
9200 					if (SLI_LINK_TOPO_LOOP == hw->link.topology) {
9201 						drec.is_loop = TRUE;
9202 						ocs_memcpy(drec.map.loop, hw->link.loop_map,
9203 							   sizeof(drec.map.loop));
9204 					} else if (SLI_LINK_TOPO_NPORT == hw->link.topology) {
9205 						drec.is_nport = TRUE;
9206 					}
9207 				} else {
9208 					drec.index = read_fcf->fcf_entry.fcf_index;
9209 					drec.priority = read_fcf->fcf_entry.fip_priority;
9210 
9211 					/* copy address, wwn and vlan_bitmap */
9212 					ocs_memcpy(drec.address, read_fcf->fcf_entry.fcf_mac_address,
9213 						   sizeof(drec.address));
9214 					ocs_memcpy(drec.wwn, read_fcf->fcf_entry.fabric_name_id,
9215 						   sizeof(drec.wwn));
9216 					ocs_memcpy(drec.map.vlan, read_fcf->fcf_entry.vlan_bitmap,
9217 						   sizeof(drec.map.vlan));
9218 
9219 					drec.is_ethernet = TRUE;
9220 					drec.is_nport = TRUE;
9221 				}
9222 
9223 				hw->callback.domain(hw->args.domain,
9224 						OCS_HW_DOMAIN_FOUND,
9225 						&drec);
9226 			}
9227 		} else {
9228 			/* if FCOE and FCF is not valid, ignore it */
9229 			ocs_log_test(hw->os, "ignore invalid FCF entry\n");
9230 		}
9231 
9232 		if (SLI4_FCOE_FCF_TABLE_LAST != read_fcf->next_index) {
9233 			ocs_hw_read_fcf(hw, read_fcf->next_index);
9234 		}
9235 	}
9236 
9237 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9238 	//ocs_dma_free(hw->os, dma);
9239 	//ocs_free(hw->os, dma, sizeof(ocs_dma_t));
9240 
9241 	return 0;
9242 }
9243 
9244 /**
9245  * @brief Callback function for the SLI link events.
9246  *
9247  * @par Description
9248  * This function allocates memory which must be freed in its callback.
9249  *
9250  * @param ctx Hardware context pointer (that is, ocs_hw_t *).
9251  * @param e Event structure pointer (that is, sli4_link_event_t *).
9252  *
9253  * @return Returns 0 on success, or a non-zero value on failure.
9254  */
9255 static int32_t
9256 ocs_hw_cb_link(void *ctx, void *e)
9257 {
9258 	ocs_hw_t	*hw = ctx;
9259 	sli4_link_event_t *event = e;
9260 	ocs_domain_t	*d = NULL;
9261 	uint32_t	i = 0;
9262 	int32_t		rc = OCS_HW_RTN_ERROR;
9263 	ocs_t 		*ocs = hw->os;
9264 
9265 	ocs_hw_link_event_init(hw);
9266 
9267 	switch (event->status) {
9268 	case SLI_LINK_STATUS_UP:
9269 
9270 		hw->link = *event;
9271 
9272 		if (SLI_LINK_TOPO_NPORT == event->topology) {
9273 			device_printf(ocs->dev, "Link Up, NPORT, speed is %d\n", event->speed);
9274 			ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9275 		} else if (SLI_LINK_TOPO_LOOP == event->topology) {
9276 			uint8_t	*buf = NULL;
9277 			device_printf(ocs->dev, "Link Up, LOOP, speed is %d\n", event->speed);
9278 
9279 			buf = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
9280 			if (!buf) {
9281 				ocs_log_err(hw->os, "no buffer for command\n");
9282 				break;
9283 			}
9284 
9285 			if (sli_cmd_read_topology(&hw->sli, buf, SLI4_BMBX_SIZE, &hw->loop_map)) {
9286 				rc = ocs_hw_command(hw, buf, OCS_CMD_NOWAIT, __ocs_read_topology_cb, NULL);
9287 			}
9288 
9289 			if (rc != OCS_HW_RTN_SUCCESS) {
9290 				ocs_log_test(hw->os, "READ_TOPOLOGY failed\n");
9291 				ocs_free(hw->os, buf, SLI4_BMBX_SIZE);
9292 			}
9293 		} else {
9294 			device_printf(ocs->dev, "Link Up, unsupported topology (%#x), speed is %d\n",
9295 					event->topology, event->speed);
9296 		}
9297 		break;
9298 	case SLI_LINK_STATUS_DOWN:
9299 		device_printf(ocs->dev, "Link Down\n");
9300 
9301 		hw->link.status = event->status;
9302 
9303 		for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
9304 			if (d != NULL &&
9305 			    hw->callback.domain != NULL) {
9306 				hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, d);
9307 			}
9308 		}
9309 		break;
9310 	default:
9311 		ocs_log_test(hw->os, "unhandled link status %#x\n", event->status);
9312 		break;
9313 	}
9314 
9315 	return 0;
9316 }
9317 
9318 static int32_t
9319 ocs_hw_cb_fip(void *ctx, void *e)
9320 {
9321 	ocs_hw_t	*hw = ctx;
9322 	ocs_domain_t	*domain = NULL;
9323 	sli4_fip_event_t *event = e;
9324 
9325 	/* Find the associated domain object */
9326 	if (event->type == SLI4_FCOE_FIP_FCF_CLEAR_VLINK) {
9327 		ocs_domain_t *d = NULL;
9328 		uint32_t	i = 0;
9329 
9330 		/* Clear VLINK is different from the other FIP events as it passes back
9331 		 * a VPI instead of a FCF index. Check all attached SLI ports for a
9332 		 * matching VPI */
9333 		for (i = 0; d = hw->domains[i], i < SLI4_MAX_FCFI; i++) {
9334 			if (d != NULL) {
9335 				ocs_sport_t	*sport = NULL;
9336 
9337 				ocs_list_foreach(&d->sport_list, sport) {
9338 					if (sport->indicator == event->index) {
9339 						domain = d;
9340 						break;
9341 					}
9342 				}
9343 
9344 				if (domain != NULL) {
9345 					break;
9346 				}
9347 			}
9348 		}
9349 	} else {
9350 		domain = ocs_hw_domain_get_indexed(hw, event->index);
9351 	}
9352 
9353 	switch (event->type) {
9354 	case SLI4_FCOE_FIP_FCF_DISCOVERED:
9355 		ocs_hw_read_fcf(hw, event->index);
9356 		break;
9357 	case SLI4_FCOE_FIP_FCF_DEAD:
9358 		if (domain != NULL &&
9359 		    hw->callback.domain != NULL) {
9360 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9361 		}
9362 		break;
9363 	case SLI4_FCOE_FIP_FCF_CLEAR_VLINK:
9364 		if (domain != NULL &&
9365 		    hw->callback.domain != NULL) {
9366 			/*
9367 			 * We will want to issue rediscover FCF when this domain is free'd  in order
9368 			 * to invalidate the FCF table
9369 			 */
9370 			domain->req_rediscover_fcf = TRUE;
9371 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9372 		}
9373 		break;
9374 	case SLI4_FCOE_FIP_FCF_MODIFIED:
9375 		if (domain != NULL &&
9376 		    hw->callback.domain != NULL) {
9377 			hw->callback.domain(hw->args.domain, OCS_HW_DOMAIN_LOST, domain);
9378 		}
9379 
9380 		ocs_hw_read_fcf(hw, event->index);
9381 		break;
9382 	default:
9383 		ocs_log_test(hw->os, "unsupported event %#x\n", event->type);
9384 	}
9385 
9386 	return 0;
9387 }
9388 
9389 static int32_t
9390 ocs_hw_cb_node_attach(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9391 {
9392 	ocs_remote_node_t *rnode = arg;
9393 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9394 	ocs_hw_remote_node_event_e	evt = 0;
9395 
9396 	if (status || hdr->status) {
9397 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9398 				hdr->status);
9399 		ocs_atomic_sub_return(&hw->rpi_ref[rnode->index].rpi_count, 1);
9400 		rnode->attached = FALSE;
9401 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9402 		evt = OCS_HW_NODE_ATTACH_FAIL;
9403 	} else {
9404 		rnode->attached = TRUE;
9405 		ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 1);
9406 		evt = OCS_HW_NODE_ATTACH_OK;
9407 	}
9408 
9409 	if (hw->callback.rnode != NULL) {
9410 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9411 	}
9412 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9413 
9414 	return 0;
9415 }
9416 
9417 static int32_t
9418 ocs_hw_cb_node_free(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9419 {
9420 	ocs_remote_node_t *rnode = arg;
9421 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9422 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9423 	int32_t		rc = 0;
9424 
9425 	if (status || hdr->status) {
9426 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9427 				hdr->status);
9428 
9429 		/*
9430 		 * In certain cases, a non-zero MQE status is OK (all must be true):
9431 		 *   - node is attached
9432 		 *   - if High Login Mode is enabled, node is part of a node group
9433 		 *   - status is 0x1400
9434 		 */
9435 		if (!rnode->attached || ((sli_get_hlm(&hw->sli) == TRUE) && !rnode->node_group) ||
9436 				(hdr->status != SLI4_MBOX_STATUS_RPI_NOT_REG)) {
9437 			rc = -1;
9438 		}
9439 	}
9440 
9441 	if (rc == 0) {
9442 		rnode->node_group = FALSE;
9443 		rnode->attached = FALSE;
9444 
9445 		if (ocs_atomic_read(&hw->rpi_ref[rnode->index].rpi_count) == 0) {
9446 			ocs_atomic_set(&hw->rpi_ref[rnode->index].rpi_attached, 0);
9447 		}
9448 
9449 		evt = OCS_HW_NODE_FREE_OK;
9450 	}
9451 
9452 	if (hw->callback.rnode != NULL) {
9453 		hw->callback.rnode(hw->args.rnode, evt, rnode);
9454 	}
9455 
9456 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9457 
9458 	return rc;
9459 }
9460 
9461 static int32_t
9462 ocs_hw_cb_node_free_all(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9463 {
9464 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
9465 	ocs_hw_remote_node_event_e	evt = OCS_HW_NODE_FREE_FAIL;
9466 	int32_t		rc = 0;
9467 	uint32_t	i;
9468 
9469 	if (status || hdr->status) {
9470 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n", status,
9471 				hdr->status);
9472 	} else {
9473 		evt = OCS_HW_NODE_FREE_ALL_OK;
9474 	}
9475 
9476 	if (evt == OCS_HW_NODE_FREE_ALL_OK) {
9477 		for (i = 0; i < sli_get_max_rsrc(&hw->sli, SLI_RSRC_FCOE_RPI); i++) {
9478 			ocs_atomic_set(&hw->rpi_ref[i].rpi_count, 0);
9479 		}
9480 
9481 		if (sli_resource_reset(&hw->sli, SLI_RSRC_FCOE_RPI)) {
9482 			ocs_log_test(hw->os, "FCOE_RPI free all failure\n");
9483 			rc = -1;
9484 		}
9485 	}
9486 
9487 	if (hw->callback.rnode != NULL) {
9488 		hw->callback.rnode(hw->args.rnode, evt, NULL);
9489 	}
9490 
9491 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9492 
9493 	return rc;
9494 }
9495 
9496 /**
9497  * @brief Initialize the pool of HW IO objects.
9498  *
9499  * @param hw Hardware context.
9500  *
9501  * @return Returns 0 on success, or a non-zero value on failure.
9502  */
9503 static ocs_hw_rtn_e
9504 ocs_hw_setup_io(ocs_hw_t *hw)
9505 {
9506 	uint32_t	i = 0;
9507 	ocs_hw_io_t	*io = NULL;
9508 	uintptr_t	xfer_virt = 0;
9509 	uintptr_t	xfer_phys = 0;
9510 	uint32_t	index;
9511 	uint8_t		new_alloc = TRUE;
9512 
9513 	if (NULL == hw->io) {
9514 		hw->io = ocs_malloc(hw->os, hw->config.n_io * sizeof(ocs_hw_io_t *), OCS_M_ZERO | OCS_M_NOWAIT);
9515 
9516 		if (NULL == hw->io) {
9517 			ocs_log_err(hw->os, "IO pointer memory allocation failed, %d Ios at size %zu\n",
9518 				    hw->config.n_io,
9519 				    sizeof(ocs_hw_io_t *));
9520 			return OCS_HW_RTN_NO_MEMORY;
9521 		}
9522 		for (i = 0; i < hw->config.n_io; i++) {
9523 			hw->io[i] = ocs_malloc(hw->os, sizeof(ocs_hw_io_t),
9524 						OCS_M_ZERO | OCS_M_NOWAIT);
9525 			if (hw->io[i] == NULL) {
9526 				ocs_log_err(hw->os, "IO(%d) memory allocation failed\n", i);
9527 				goto error;
9528 			}
9529 		}
9530 
9531 		/* Create WQE buffs for IO */
9532 		hw->wqe_buffs = ocs_malloc(hw->os, hw->config.n_io * hw->sli.config.wqe_size,
9533 				OCS_M_ZERO | OCS_M_NOWAIT);
9534 		if (NULL == hw->wqe_buffs) {
9535 			ocs_free(hw->os, hw->io, hw->config.n_io * sizeof(ocs_hw_io_t));
9536 			ocs_log_err(hw->os, "%s: IO WQE buff allocation failed, %d Ios at size %zu\n",
9537 					__func__, hw->config.n_io, hw->sli.config.wqe_size);
9538 			return OCS_HW_RTN_NO_MEMORY;
9539 		}
9540 
9541 	} else {
9542 		/* re-use existing IOs, including SGLs */
9543 		new_alloc = FALSE;
9544 	}
9545 
9546 	if (new_alloc) {
9547 		if (ocs_dma_alloc(hw->os, &hw->xfer_rdy,
9548 					sizeof(fcp_xfer_rdy_iu_t) * hw->config.n_io,
9549 					4/*XXX what does this need to be? */)) {
9550 			ocs_log_err(hw->os, "XFER_RDY buffer allocation failed\n");
9551 			return OCS_HW_RTN_NO_MEMORY;
9552 		}
9553 	}
9554 	xfer_virt = (uintptr_t)hw->xfer_rdy.virt;
9555 	xfer_phys = hw->xfer_rdy.phys;
9556 
9557 	for (i = 0; i < hw->config.n_io; i++) {
9558 		hw_wq_callback_t *wqcb;
9559 
9560 		io = hw->io[i];
9561 
9562 		/* initialize IO fields */
9563 		io->hw = hw;
9564 
9565 		/* Assign a WQE buff */
9566 		io->wqe.wqebuf = &hw->wqe_buffs[i * hw->sli.config.wqe_size];
9567 
9568 		/* Allocate the request tag for this IO */
9569 		wqcb = ocs_hw_reqtag_alloc(hw, ocs_hw_wq_process_io, io);
9570 		if (wqcb == NULL) {
9571 			ocs_log_err(hw->os, "can't allocate request tag\n");
9572 			return OCS_HW_RTN_NO_RESOURCES;
9573 		}
9574 		io->reqtag = wqcb->instance_index;
9575 
9576 		/* Now for the fields that are initialized on each free */
9577 		ocs_hw_init_free_io(io);
9578 
9579 		/* The XB flag isn't cleared on IO free, so initialize it to zero here */
9580 		io->xbusy = 0;
9581 
9582 		if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_XRI, &io->indicator, &index)) {
9583 			ocs_log_err(hw->os, "sli_resource_alloc failed @ %d\n", i);
9584 			return OCS_HW_RTN_NO_MEMORY;
9585 		}
9586 
9587 		if (new_alloc && ocs_dma_alloc(hw->os, &io->def_sgl, hw->config.n_sgl * sizeof(sli4_sge_t), 64)) {
9588 			ocs_log_err(hw->os, "ocs_dma_alloc failed @ %d\n", i);
9589 			ocs_memset(&io->def_sgl, 0, sizeof(ocs_dma_t));
9590 			return OCS_HW_RTN_NO_MEMORY;
9591 		}
9592 		io->def_sgl_count = hw->config.n_sgl;
9593 		io->sgl = &io->def_sgl;
9594 		io->sgl_count = io->def_sgl_count;
9595 
9596 		if (hw->xfer_rdy.size) {
9597 			io->xfer_rdy.virt = (void *)xfer_virt;
9598 			io->xfer_rdy.phys = xfer_phys;
9599 			io->xfer_rdy.size = sizeof(fcp_xfer_rdy_iu_t);
9600 
9601 			xfer_virt += sizeof(fcp_xfer_rdy_iu_t);
9602 			xfer_phys += sizeof(fcp_xfer_rdy_iu_t);
9603 		}
9604 	}
9605 
9606 	return OCS_HW_RTN_SUCCESS;
9607 error:
9608 	for (i = 0; i < hw->config.n_io && hw->io[i]; i++) {
9609 		ocs_free(hw->os, hw->io[i], sizeof(ocs_hw_io_t));
9610 		hw->io[i] = NULL;
9611 	}
9612 
9613 	return OCS_HW_RTN_NO_MEMORY;
9614 }
9615 
9616 static ocs_hw_rtn_e
9617 ocs_hw_init_io(ocs_hw_t *hw)
9618 {
9619 	uint32_t        i = 0, io_index = 0;
9620 	uint32_t        prereg = 0;
9621 	ocs_hw_io_t	*io = NULL;
9622 	uint8_t		cmd[SLI4_BMBX_SIZE];
9623 	ocs_hw_rtn_e rc = OCS_HW_RTN_SUCCESS;
9624 	uint32_t	nremaining;
9625 	uint32_t	n = 0;
9626 	uint32_t	sgls_per_request = 256;
9627 	ocs_dma_t	**sgls = NULL;
9628 	ocs_dma_t	reqbuf = { 0 };
9629 
9630 	prereg = sli_get_sgl_preregister(&hw->sli);
9631 
9632 	if (prereg) {
9633 		sgls = ocs_malloc(hw->os, sizeof(*sgls) * sgls_per_request, OCS_M_NOWAIT);
9634 		if (sgls == NULL) {
9635 			ocs_log_err(hw->os, "ocs_malloc sgls failed\n");
9636 			return OCS_HW_RTN_NO_MEMORY;
9637 		}
9638 
9639 		rc = ocs_dma_alloc(hw->os, &reqbuf, 32 + sgls_per_request*16, OCS_MIN_DMA_ALIGNMENT);
9640 		if (rc) {
9641 			ocs_log_err(hw->os, "ocs_dma_alloc reqbuf failed\n");
9642 			ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9643 			return OCS_HW_RTN_NO_MEMORY;
9644 		}
9645 	}
9646 
9647 	io = hw->io[io_index];
9648 	for (nremaining = hw->config.n_io; nremaining; nremaining -= n) {
9649 		if (prereg) {
9650 			/* Copy address of SGL's into local sgls[] array, break out if the xri
9651 			 * is not contiguous.
9652 			 */
9653 			for (n = 0; n < MIN(sgls_per_request, nremaining); n++) {
9654 				/* Check that we have contiguous xri values */
9655 				if (n > 0) {
9656 					if (hw->io[io_index + n]->indicator != (hw->io[io_index + n-1]->indicator+1)) {
9657 						break;
9658 					}
9659 				}
9660 				sgls[n] = hw->io[io_index + n]->sgl;
9661 			}
9662 
9663 			if (sli_cmd_fcoe_post_sgl_pages(&hw->sli, cmd, sizeof(cmd),
9664 						io->indicator, n, sgls, NULL, &reqbuf)) {
9665 				if (ocs_hw_command(hw, cmd, OCS_CMD_POLL, NULL, NULL)) {
9666 					rc = OCS_HW_RTN_ERROR;
9667 					ocs_log_err(hw->os, "SGL post failed\n");
9668 					break;
9669 				}
9670 			}
9671 		} else {
9672 			n = nremaining;
9673 		}
9674 
9675 		/* Add to tail if successful */
9676 		for (i = 0; i < n; i ++) {
9677 			io->is_port_owned = 0;
9678 			io->state = OCS_HW_IO_STATE_FREE;
9679 			ocs_list_add_tail(&hw->io_free, io);
9680 			io = hw->io[io_index+1];
9681 			io_index++;
9682 		}
9683 	}
9684 
9685 	if (prereg) {
9686 		ocs_dma_free(hw->os, &reqbuf);
9687 		ocs_free(hw->os, sgls, sizeof(*sgls) * sgls_per_request);
9688 	}
9689 
9690 	return rc;
9691 }
9692 
9693 static int32_t
9694 ocs_hw_flush(ocs_hw_t *hw)
9695 {
9696 	uint32_t	i = 0;
9697 
9698 	/* Process any remaining completions */
9699 	for (i = 0; i < hw->eq_count; i++) {
9700 		ocs_hw_process(hw, i, ~0);
9701 	}
9702 
9703 	return 0;
9704 }
9705 
9706 static int32_t
9707 ocs_hw_command_cancel(ocs_hw_t *hw)
9708 {
9709 
9710 	ocs_lock(&hw->cmd_lock);
9711 
9712 	/*
9713 	 * Manually clean up remaining commands. Note: since this calls
9714 	 * ocs_hw_command_process(), we'll also process the cmd_pending
9715 	 * list, so no need to manually clean that out.
9716 	 */
9717 	while (!ocs_list_empty(&hw->cmd_head)) {
9718 		uint8_t		mqe[SLI4_BMBX_SIZE] = { 0 };
9719 		ocs_command_ctx_t *ctx = ocs_list_get_head(&hw->cmd_head);
9720 
9721 		ocs_log_test(hw->os, "hung command %08x\n",
9722 				NULL == ctx ? UINT32_MAX :
9723 				(NULL == ctx->buf ? UINT32_MAX : *((uint32_t *)ctx->buf)));
9724 		ocs_unlock(&hw->cmd_lock);
9725 		ocs_hw_command_process(hw, -1/*Bad status*/, mqe, SLI4_BMBX_SIZE);
9726 		ocs_lock(&hw->cmd_lock);
9727 	}
9728 
9729 	ocs_unlock(&hw->cmd_lock);
9730 
9731 	return 0;
9732 }
9733 
9734 /**
9735  * @brief Find IO given indicator (xri).
9736  *
9737  * @param hw Hal context.
9738  * @param indicator Indicator (xri) to look for.
9739  *
9740  * @return Returns io if found, NULL otherwise.
9741  */
9742 ocs_hw_io_t *
9743 ocs_hw_io_lookup(ocs_hw_t *hw, uint32_t xri)
9744 {
9745 	uint32_t ioindex;
9746 	ioindex = xri - hw->sli.config.extent[SLI_RSRC_FCOE_XRI].base[0];
9747 	return hw->io[ioindex];
9748 }
9749 
9750 /**
9751  * @brief Issue any pending callbacks for an IO and remove off the timer and pending lists.
9752  *
9753  * @param hw Hal context.
9754  * @param io Pointer to the IO to cleanup.
9755  */
9756 static void
9757 ocs_hw_io_cancel_cleanup(ocs_hw_t *hw, ocs_hw_io_t *io)
9758 {
9759 	ocs_hw_done_t  done = io->done;
9760 	ocs_hw_done_t  abort_done = io->abort_done;
9761 
9762 	/* first check active_wqe list and remove if there */
9763 	if (ocs_list_on_list(&io->wqe_link)) {
9764 		ocs_list_remove(&hw->io_timed_wqe, io);
9765 	}
9766 
9767 	/* Remove from WQ pending list */
9768 	if ((io->wq != NULL) && ocs_list_on_list(&io->wq->pending_list)) {
9769 		ocs_list_remove(&io->wq->pending_list, io);
9770 	}
9771 
9772 	if (io->done) {
9773 		void		*arg = io->arg;
9774 
9775 		io->done = NULL;
9776 		ocs_unlock(&hw->io_lock);
9777 		done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, arg);
9778 		ocs_lock(&hw->io_lock);
9779 	}
9780 
9781 	if (io->abort_done != NULL) {
9782 		void		*abort_arg = io->abort_arg;
9783 
9784 		io->abort_done = NULL;
9785 		ocs_unlock(&hw->io_lock);
9786 		abort_done(io, io->rnode, 0, SLI4_FC_WCQE_STATUS_SHUTDOWN, 0, abort_arg);
9787 		ocs_lock(&hw->io_lock);
9788 	}
9789 }
9790 
9791 static int32_t
9792 ocs_hw_io_cancel(ocs_hw_t *hw)
9793 {
9794 	ocs_hw_io_t	*io = NULL;
9795 	ocs_hw_io_t	*tmp_io = NULL;
9796 	uint32_t	iters = 100; /* One second limit */
9797 
9798 	/*
9799 	 * Manually clean up outstanding IO.
9800 	 * Only walk through list once: the backend will cleanup any IOs when done/abort_done is called.
9801 	 */
9802 	ocs_lock(&hw->io_lock);
9803 	ocs_list_foreach_safe(&hw->io_inuse, io, tmp_io) {
9804 		ocs_hw_done_t  done = io->done;
9805 		ocs_hw_done_t  abort_done = io->abort_done;
9806 
9807 		ocs_hw_io_cancel_cleanup(hw, io);
9808 
9809 		/*
9810 		 * Since this is called in a reset/shutdown
9811 		 * case, If there is no callback, then just
9812 		 * free the IO.
9813 		 *
9814 		 * Note: A port owned XRI cannot be on
9815 		 *       the in use list. We cannot call
9816 		 *       ocs_hw_io_free() because we already
9817 		 *       hold the io_lock.
9818 		 */
9819 		if (done == NULL &&
9820 		    abort_done == NULL) {
9821 			/*
9822 			 * Since this is called in a reset/shutdown
9823 			 * case, If there is no callback, then just
9824 			 * free the IO.
9825 			 */
9826 			ocs_hw_io_free_common(hw, io);
9827 			ocs_list_remove(&hw->io_inuse, io);
9828 			ocs_hw_io_free_move_correct_list(hw, io);
9829 		}
9830 	}
9831 
9832 	/*
9833 	 * For port owned XRIs, they are not on the in use list, so
9834 	 * walk though XRIs and issue any callbacks.
9835 	 */
9836 	ocs_list_foreach_safe(&hw->io_port_owned, io, tmp_io) {
9837 		/* check  list and remove if there */
9838 		if (ocs_list_on_list(&io->dnrx_link)) {
9839 			ocs_list_remove(&hw->io_port_dnrx, io);
9840 			ocs_ref_put(&io->ref); /* ocs_ref_get(): same function */
9841 		}
9842 		ocs_hw_io_cancel_cleanup(hw, io);
9843 		ocs_list_remove(&hw->io_port_owned, io);
9844 		ocs_hw_io_free_common(hw, io);
9845 	}
9846 	ocs_unlock(&hw->io_lock);
9847 
9848 	/* Give time for the callbacks to complete */
9849 	do {
9850 		ocs_udelay(10000);
9851 		iters--;
9852 	} while (!ocs_list_empty(&hw->io_inuse) && iters);
9853 
9854 	/* Leave a breadcrumb that cleanup is not yet complete. */
9855 	if (!ocs_list_empty(&hw->io_inuse)) {
9856 		ocs_log_test(hw->os, "io_inuse list is not empty\n");
9857 	}
9858 
9859 	return 0;
9860 }
9861 
9862 static int32_t
9863 ocs_hw_io_ini_sge(ocs_hw_t *hw, ocs_hw_io_t *io, ocs_dma_t *cmnd, uint32_t cmnd_size,
9864 		ocs_dma_t *rsp)
9865 {
9866 	sli4_sge_t	*data = NULL;
9867 
9868 	if (!hw || !io) {
9869 		ocs_log_err(NULL, "bad parm hw=%p io=%p\n", hw, io);
9870 		return OCS_HW_RTN_ERROR;
9871 	}
9872 
9873 	data = io->def_sgl.virt;
9874 
9875 	/* setup command pointer */
9876 	data->buffer_address_high = ocs_addr32_hi(cmnd->phys);
9877 	data->buffer_address_low  = ocs_addr32_lo(cmnd->phys);
9878 	data->buffer_length = cmnd_size;
9879 	data++;
9880 
9881 	/* setup response pointer */
9882 	data->buffer_address_high = ocs_addr32_hi(rsp->phys);
9883 	data->buffer_address_low  = ocs_addr32_lo(rsp->phys);
9884 	data->buffer_length = rsp->size;
9885 
9886 	return 0;
9887 }
9888 
9889 static int32_t
9890 __ocs_read_topology_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
9891 {
9892 	sli4_cmd_read_topology_t *read_topo = (sli4_cmd_read_topology_t *)mqe;
9893 
9894 	if (status || read_topo->hdr.status) {
9895 		ocs_log_debug(hw->os, "bad status cqe=%#x mqe=%#x\n",
9896 				status, read_topo->hdr.status);
9897 		ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9898 		return -1;
9899 	}
9900 
9901 	switch (read_topo->attention_type) {
9902 	case SLI4_READ_TOPOLOGY_LINK_UP:
9903 		hw->link.status = SLI_LINK_STATUS_UP;
9904 		break;
9905 	case SLI4_READ_TOPOLOGY_LINK_DOWN:
9906 		hw->link.status = SLI_LINK_STATUS_DOWN;
9907 		break;
9908 	case SLI4_READ_TOPOLOGY_LINK_NO_ALPA:
9909 		hw->link.status = SLI_LINK_STATUS_NO_ALPA;
9910 		break;
9911 	default:
9912 		hw->link.status = SLI_LINK_STATUS_MAX;
9913 		break;
9914 	}
9915 
9916 	switch (read_topo->topology) {
9917 	case SLI4_READ_TOPOLOGY_NPORT:
9918 		hw->link.topology = SLI_LINK_TOPO_NPORT;
9919 		break;
9920 	case SLI4_READ_TOPOLOGY_FC_AL:
9921 		hw->link.topology = SLI_LINK_TOPO_LOOP;
9922 		if (SLI_LINK_STATUS_UP == hw->link.status) {
9923 			hw->link.loop_map = hw->loop_map.virt;
9924 		}
9925 		hw->link.fc_id = read_topo->acquired_al_pa;
9926 		break;
9927 	default:
9928 		hw->link.topology = SLI_LINK_TOPO_MAX;
9929 		break;
9930 	}
9931 
9932 	hw->link.medium = SLI_LINK_MEDIUM_FC;
9933 
9934 	switch (read_topo->link_current.link_speed) {
9935 	case SLI4_READ_TOPOLOGY_SPEED_1G:
9936 		hw->link.speed =  1 * 1000;
9937 		break;
9938 	case SLI4_READ_TOPOLOGY_SPEED_2G:
9939 		hw->link.speed =  2 * 1000;
9940 		break;
9941 	case SLI4_READ_TOPOLOGY_SPEED_4G:
9942 		hw->link.speed =  4 * 1000;
9943 		break;
9944 	case SLI4_READ_TOPOLOGY_SPEED_8G:
9945 		hw->link.speed =  8 * 1000;
9946 		break;
9947 	case SLI4_READ_TOPOLOGY_SPEED_16G:
9948 		hw->link.speed = 16 * 1000;
9949 		hw->link.loop_map = NULL;
9950 		break;
9951 	case SLI4_READ_TOPOLOGY_SPEED_32G:
9952 		hw->link.speed = 32 * 1000;
9953 		hw->link.loop_map = NULL;
9954 		break;
9955 	}
9956 
9957 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
9958 
9959 	ocs_hw_read_fcf(hw, SLI4_FCOE_FCF_TABLE_FIRST);
9960 
9961 	return 0;
9962 }
9963 
9964 static int32_t
9965 __ocs_hw_port_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9966 {
9967 	ocs_sli_port_t	*sport = ctx->app;
9968 	ocs_hw_t	*hw = sport->hw;
9969 
9970 	smtrace("port");
9971 
9972 	switch (evt) {
9973 	case OCS_EVT_EXIT:
9974 		/* ignore */
9975 		break;
9976 
9977 	case OCS_EVT_HW_PORT_REQ_FREE:
9978 	case OCS_EVT_HW_PORT_REQ_ATTACH:
9979 		if (data != NULL) {
9980 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
9981 		}
9982 		/* fall through */
9983 	default:
9984 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
9985 		break;
9986 	}
9987 
9988 	return 0;
9989 }
9990 
9991 static void *
9992 __ocs_hw_port_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
9993 {
9994 	ocs_sli_port_t	*sport = ctx->app;
9995 	ocs_hw_t	*hw = sport->hw;
9996 
9997 	smtrace("port");
9998 
9999 	switch (evt) {
10000 	case OCS_EVT_ENTER:
10001 		if (data != NULL) {
10002 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10003 		}
10004 		if (hw->callback.port != NULL) {
10005 			hw->callback.port(hw->args.port,
10006 					OCS_HW_PORT_FREE_FAIL, sport);
10007 		}
10008 		break;
10009 	default:
10010 		break;
10011 	}
10012 
10013 	return NULL;
10014 }
10015 
10016 static void *
10017 __ocs_hw_port_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10018 {
10019 	ocs_sli_port_t	*sport = ctx->app;
10020 	ocs_hw_t	*hw = sport->hw;
10021 
10022 	smtrace("port");
10023 
10024 	switch (evt) {
10025 	case OCS_EVT_ENTER:
10026 		/* free SLI resource */
10027 		if (sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator)) {
10028 			ocs_log_err(hw->os, "FCOE_VPI free failure addr=%#x\n", sport->fc_id);
10029 		}
10030 
10031 		/* free mailbox buffer */
10032 		if (data != NULL) {
10033 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10034 		}
10035 		if (hw->callback.port != NULL) {
10036 			hw->callback.port(hw->args.port,
10037 					OCS_HW_PORT_FREE_OK, sport);
10038 		}
10039 		break;
10040 	default:
10041 		break;
10042 	}
10043 
10044 	return NULL;
10045 }
10046 
10047 static void *
10048 __ocs_hw_port_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10049 {
10050 	ocs_sli_port_t	*sport = ctx->app;
10051 	ocs_hw_t	*hw = sport->hw;
10052 
10053 	smtrace("port");
10054 
10055 	switch (evt) {
10056 	case OCS_EVT_ENTER:
10057 		/* free SLI resource */
10058 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10059 
10060 		/* free mailbox buffer */
10061 		if (data != NULL) {
10062 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10063 		}
10064 
10065 		if (hw->callback.port != NULL) {
10066 			hw->callback.port(hw->args.port,
10067 					OCS_HW_PORT_ATTACH_FAIL, sport);
10068 		}
10069 		if (sport->sm_free_req_pending) {
10070 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10071 		}
10072 		break;
10073 	default:
10074 		__ocs_hw_port_common(__func__, ctx, evt, data);
10075 		break;
10076 	}
10077 
10078 	return NULL;
10079 }
10080 
10081 static void *
10082 __ocs_hw_port_free_unreg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10083 {
10084 	ocs_sli_port_t	*sport = ctx->app;
10085 	ocs_hw_t	*hw = sport->hw;
10086 	uint8_t		*cmd = NULL;
10087 
10088 	smtrace("port");
10089 
10090 	switch (evt) {
10091 	case OCS_EVT_ENTER:
10092 		/* allocate memory and send unreg_vpi */
10093 		cmd = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10094 		if (!cmd) {
10095 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10096 			break;
10097 		}
10098 
10099 		if (0 == sli_cmd_unreg_vpi(&hw->sli, cmd, SLI4_BMBX_SIZE, sport->indicator,
10100 					   SLI4_UNREG_TYPE_PORT)) {
10101 			ocs_log_err(hw->os, "UNREG_VPI format failure\n");
10102 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10103 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10104 			break;
10105 		}
10106 
10107 		if (ocs_hw_command(hw, cmd, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10108 			ocs_log_err(hw->os, "UNREG_VPI command failure\n");
10109 			ocs_free(hw->os, cmd, SLI4_BMBX_SIZE);
10110 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10111 			break;
10112 		}
10113 		break;
10114 	case OCS_EVT_RESPONSE:
10115 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10116 		break;
10117 	case OCS_EVT_ERROR:
10118 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10119 		break;
10120 	default:
10121 		__ocs_hw_port_common(__func__, ctx, evt, data);
10122 		break;
10123 	}
10124 
10125 	return NULL;
10126 }
10127 
10128 static void *
10129 __ocs_hw_port_free_nop(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10130 {
10131 	ocs_sli_port_t	*sport = ctx->app;
10132 	ocs_hw_t	*hw = sport->hw;
10133 
10134 	smtrace("port");
10135 
10136 	switch (evt) {
10137 	case OCS_EVT_ENTER:
10138 		/* Forward to execute in mailbox completion processing context */
10139 		if (ocs_hw_async_call(hw, __ocs_hw_port_realloc_cb, sport)) {
10140 			ocs_log_err(hw->os, "ocs_hw_async_call failed\n");
10141 		}
10142 		break;
10143 	case OCS_EVT_RESPONSE:
10144 		ocs_sm_transition(ctx, __ocs_hw_port_freed, data);
10145 		break;
10146 	case OCS_EVT_ERROR:
10147 		ocs_sm_transition(ctx, __ocs_hw_port_free_report_fail, data);
10148 		break;
10149 	default:
10150 		break;
10151 	}
10152 
10153 	return NULL;
10154 }
10155 
10156 static void *
10157 __ocs_hw_port_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10158 {
10159 	ocs_sli_port_t	*sport = ctx->app;
10160 	ocs_hw_t	*hw = sport->hw;
10161 
10162 	smtrace("port");
10163 
10164 	switch (evt) {
10165 	case OCS_EVT_ENTER:
10166 		if (data != NULL) {
10167 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10168 		}
10169 		if (hw->callback.port != NULL) {
10170 			hw->callback.port(hw->args.port,
10171 					OCS_HW_PORT_ATTACH_OK, sport);
10172 		}
10173 		if (sport->sm_free_req_pending) {
10174 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10175 		}
10176 		break;
10177 	case OCS_EVT_HW_PORT_REQ_FREE:
10178 		/* virtual/physical port request free */
10179 		ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10180 		break;
10181 	default:
10182 		__ocs_hw_port_common(__func__, ctx, evt, data);
10183 		break;
10184 	}
10185 
10186 	return NULL;
10187 }
10188 
10189 static void *
10190 __ocs_hw_port_attach_reg_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10191 {
10192 	ocs_sli_port_t	*sport = ctx->app;
10193 	ocs_hw_t	*hw = sport->hw;
10194 
10195 	smtrace("port");
10196 
10197 	switch (evt) {
10198 	case OCS_EVT_ENTER:
10199 		if (0 == sli_cmd_reg_vpi(&hw->sli, data, SLI4_BMBX_SIZE, sport, FALSE)) {
10200 			ocs_log_err(hw->os, "REG_VPI format failure\n");
10201 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10202 			break;
10203 		}
10204 
10205 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10206 			ocs_log_err(hw->os, "REG_VPI command failure\n");
10207 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10208 			break;
10209 		}
10210 		break;
10211 	case OCS_EVT_RESPONSE:
10212 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10213 		break;
10214 	case OCS_EVT_ERROR:
10215 		ocs_sm_transition(ctx, __ocs_hw_port_attach_report_fail, data);
10216 		break;
10217 	case OCS_EVT_HW_PORT_REQ_FREE:
10218 		/* Wait for attach response and then free */
10219 		sport->sm_free_req_pending = 1;
10220 		break;
10221 	default:
10222 		__ocs_hw_port_common(__func__, ctx, evt, data);
10223 		break;
10224 	}
10225 
10226 	return NULL;
10227 }
10228 
10229 static void *
10230 __ocs_hw_port_done(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10231 {
10232 	ocs_sli_port_t	*sport = ctx->app;
10233 	ocs_hw_t	*hw = sport->hw;
10234 
10235 	smtrace("port");
10236 
10237 	switch (evt) {
10238 	case OCS_EVT_ENTER:
10239 		/* free SLI resource */
10240 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10241 
10242 		/* free mailbox buffer */
10243 		if (data != NULL) {
10244 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10245 		}
10246 		break;
10247 	default:
10248 		__ocs_hw_port_common(__func__, ctx, evt, data);
10249 		break;
10250 	}
10251 
10252 	return NULL;
10253 }
10254 
10255 static void *
10256 __ocs_hw_port_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10257 {
10258 	ocs_sli_port_t	*sport = ctx->app;
10259 	ocs_hw_t	*hw = sport->hw;
10260 
10261 	smtrace("port");
10262 
10263 	switch (evt) {
10264 	case OCS_EVT_ENTER:
10265 		if (data != NULL) {
10266 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10267 		}
10268 		if (hw->callback.port != NULL) {
10269 			hw->callback.port(hw->args.port,
10270 					OCS_HW_PORT_ALLOC_OK, sport);
10271 		}
10272 		/* If there is a pending free request, then handle it now */
10273 		if (sport->sm_free_req_pending) {
10274 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10275 		}
10276 		break;
10277 	case OCS_EVT_HW_PORT_REQ_ATTACH:
10278 		/* virtual port requests attach */
10279 		ocs_sm_transition(ctx, __ocs_hw_port_attach_reg_vpi, data);
10280 		break;
10281 	case OCS_EVT_HW_PORT_ATTACH_OK:
10282 		/* physical port attached (as part of attaching domain) */
10283 		ocs_sm_transition(ctx, __ocs_hw_port_attached, data);
10284 		break;
10285 	case OCS_EVT_HW_PORT_REQ_FREE:
10286 		/* virtual port request free */
10287 		if (SLI4_IF_TYPE_LANCER_FC_ETH == sli_get_if_type(&hw->sli)) {
10288 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10289 		} else {
10290 			/*
10291 			 * Note: BE3/Skyhawk will respond with a status of 0x20
10292 			 *       unless the reg_vpi has been issued, so we can
10293 			 *       skip the unreg_vpi for these adapters.
10294 			 *
10295 			 * Send a nop to make sure that free doesn't occur in
10296 			 * same context
10297 			 */
10298 			ocs_sm_transition(ctx, __ocs_hw_port_free_nop, NULL);
10299 		}
10300 		break;
10301 	default:
10302 		__ocs_hw_port_common(__func__, ctx, evt, data);
10303 		break;
10304 	}
10305 
10306 	return NULL;
10307 }
10308 
10309 static void *
10310 __ocs_hw_port_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10311 {
10312 	ocs_sli_port_t	*sport = ctx->app;
10313 	ocs_hw_t	*hw = sport->hw;
10314 
10315 	smtrace("port");
10316 
10317 	switch (evt) {
10318 	case OCS_EVT_ENTER:
10319 		/* free SLI resource */
10320 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VPI, sport->indicator);
10321 
10322 		/* free mailbox buffer */
10323 		if (data != NULL) {
10324 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10325 		}
10326 
10327 		if (hw->callback.port != NULL) {
10328 			hw->callback.port(hw->args.port,
10329 					OCS_HW_PORT_ALLOC_FAIL, sport);
10330 		}
10331 
10332 		/* If there is a pending free request, then handle it now */
10333 		if (sport->sm_free_req_pending) {
10334 			ocs_sm_transition(ctx, __ocs_hw_port_free_unreg_vpi, NULL);
10335 		}
10336 		break;
10337 	default:
10338 		__ocs_hw_port_common(__func__, ctx, evt, data);
10339 		break;
10340 	}
10341 
10342 	return NULL;
10343 }
10344 
10345 static void *
10346 __ocs_hw_port_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10347 {
10348 	ocs_sli_port_t	*sport = ctx->app;
10349 	ocs_hw_t	*hw = sport->hw;
10350 	uint8_t		*payload = NULL;
10351 
10352 	smtrace("port");
10353 
10354 	switch (evt) {
10355 	case OCS_EVT_ENTER:
10356 		/* allocate memory for the service parameters */
10357 		if (ocs_dma_alloc(hw->os, &sport->dma, 112, 4)) {
10358 			ocs_log_err(hw->os, "Failed to allocate DMA memory\n");
10359 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10360 			break;
10361 		}
10362 
10363 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10364 					&sport->dma, sport->indicator)) {
10365 			ocs_log_err(hw->os, "READ_SPARM64 allocation failure\n");
10366 			ocs_dma_free(hw->os, &sport->dma);
10367 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10368 			break;
10369 		}
10370 
10371 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10372 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10373 			ocs_dma_free(hw->os, &sport->dma);
10374 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10375 			break;
10376 		}
10377 		break;
10378 	case OCS_EVT_RESPONSE:
10379 		payload = sport->dma.virt;
10380 
10381 		ocs_display_sparams(sport->display_name, "sport sparm64", 0, NULL, payload);
10382 
10383 		ocs_memcpy(&sport->sli_wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET,
10384 				sizeof(sport->sli_wwpn));
10385 		ocs_memcpy(&sport->sli_wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET,
10386 				sizeof(sport->sli_wwnn));
10387 
10388 		ocs_dma_free(hw->os, &sport->dma);
10389 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_init_vpi, data);
10390 		break;
10391 	case OCS_EVT_ERROR:
10392 		ocs_dma_free(hw->os, &sport->dma);
10393 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10394 		break;
10395 	case OCS_EVT_HW_PORT_REQ_FREE:
10396 		/* Wait for attach response and then free */
10397 		sport->sm_free_req_pending = 1;
10398 		break;
10399 	case OCS_EVT_EXIT:
10400 		break;
10401 	default:
10402 		__ocs_hw_port_common(__func__, ctx, evt, data);
10403 		break;
10404 	}
10405 
10406 	return NULL;
10407 }
10408 
10409 static void *
10410 __ocs_hw_port_alloc_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10411 {
10412 	ocs_sli_port_t	*sport = ctx->app;
10413 
10414 	smtrace("port");
10415 
10416 	switch (evt) {
10417 	case OCS_EVT_ENTER:
10418 		/* no-op */
10419 		break;
10420 	case OCS_EVT_HW_PORT_ALLOC_OK:
10421 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, NULL);
10422 		break;
10423 	case OCS_EVT_HW_PORT_ALLOC_FAIL:
10424 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, NULL);
10425 		break;
10426 	case OCS_EVT_HW_PORT_REQ_FREE:
10427 		/* Wait for attach response and then free */
10428 		sport->sm_free_req_pending = 1;
10429 		break;
10430 	default:
10431 		__ocs_hw_port_common(__func__, ctx, evt, data);
10432 		break;
10433 	}
10434 
10435 	return NULL;
10436 }
10437 
10438 static void *
10439 __ocs_hw_port_alloc_init_vpi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10440 {
10441 	ocs_sli_port_t	*sport = ctx->app;
10442 	ocs_hw_t	*hw = sport->hw;
10443 
10444 	smtrace("port");
10445 
10446 	switch (evt) {
10447 	case OCS_EVT_ENTER:
10448 		/* If there is a pending free request, then handle it now */
10449 		if (sport->sm_free_req_pending) {
10450 			ocs_sm_transition(ctx, __ocs_hw_port_freed, NULL);
10451 			return NULL;
10452 		}
10453 
10454 		/* TODO XXX transitioning to done only works if this is called
10455 		 * directly from ocs_hw_port_alloc BUT not if called from
10456 		 * read_sparm64. In the later case, we actually want to go
10457 		 * through report_ok/fail
10458 		 */
10459 		if (0 == sli_cmd_init_vpi(&hw->sli, data, SLI4_BMBX_SIZE,
10460 					sport->indicator, sport->domain->indicator)) {
10461 			ocs_log_err(hw->os, "INIT_VPI allocation failure\n");
10462 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10463 			break;
10464 		}
10465 
10466 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_port_cb, sport)) {
10467 			ocs_log_err(hw->os, "INIT_VPI command failure\n");
10468 			ocs_sm_transition(ctx, __ocs_hw_port_done, data);
10469 			break;
10470 		}
10471 		break;
10472 	case OCS_EVT_RESPONSE:
10473 		ocs_sm_transition(ctx, __ocs_hw_port_allocated, data);
10474 		break;
10475 	case OCS_EVT_ERROR:
10476 		ocs_sm_transition(ctx, __ocs_hw_port_alloc_report_fail, data);
10477 		break;
10478 	case OCS_EVT_HW_PORT_REQ_FREE:
10479 		/* Wait for attach response and then free */
10480 		sport->sm_free_req_pending = 1;
10481 		break;
10482 	case OCS_EVT_EXIT:
10483 		break;
10484 	default:
10485 		__ocs_hw_port_common(__func__, ctx, evt, data);
10486 		break;
10487 	}
10488 
10489 	return NULL;
10490 }
10491 
10492 static int32_t
10493 __ocs_hw_port_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10494 {
10495 	ocs_sli_port_t *sport = arg;
10496 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10497 	ocs_sm_event_t	evt;
10498 
10499 	if (status || hdr->status) {
10500 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10501 			      sport->indicator, status, hdr->status);
10502 		evt = OCS_EVT_ERROR;
10503 	} else {
10504 		evt = OCS_EVT_RESPONSE;
10505 	}
10506 
10507 	ocs_sm_post_event(&sport->ctx, evt, mqe);
10508 
10509 	return 0;
10510 }
10511 
10512 static int32_t
10513 __ocs_hw_port_realloc_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
10514 {
10515 	ocs_sli_port_t *sport = arg;
10516 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
10517 	ocs_sm_event_t	evt;
10518 	uint8_t *mqecpy;
10519 
10520 	if (status || hdr->status) {
10521 		ocs_log_debug(hw->os, "bad status vpi=%#x st=%x hdr=%x\n",
10522 			      sport->indicator, status, hdr->status);
10523 		evt = OCS_EVT_ERROR;
10524 	} else {
10525 		evt = OCS_EVT_RESPONSE;
10526 	}
10527 
10528 	/*
10529 	 * In this case we have to malloc a mailbox command buffer, as it is reused
10530 	 * in the state machine post event call, and eventually freed
10531 	 */
10532 	mqecpy = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
10533 	if (mqecpy == NULL) {
10534 		ocs_log_err(hw->os, "malloc mqecpy failed\n");
10535 		return -1;
10536 	}
10537 	ocs_memcpy(mqecpy, mqe, SLI4_BMBX_SIZE);
10538 
10539 	ocs_sm_post_event(&sport->ctx, evt, mqecpy);
10540 
10541 	return 0;
10542 }
10543 
10544 /***************************************************************************
10545  * Domain state machine
10546  */
10547 
10548 static int32_t
10549 __ocs_hw_domain_common(const char *funcname, ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10550 {
10551 	ocs_domain_t	*domain = ctx->app;
10552 	ocs_hw_t	*hw = domain->hw;
10553 
10554 	smtrace("domain");
10555 
10556 	switch (evt) {
10557 	case OCS_EVT_EXIT:
10558 		/* ignore */
10559 		break;
10560 
10561 	default:
10562 		ocs_log_test(hw->os, "%s %-20s not handled\n", funcname, ocs_sm_event_name(evt));
10563 		break;
10564 	}
10565 
10566 	return 0;
10567 }
10568 
10569 static void *
10570 __ocs_hw_domain_alloc_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10571 {
10572 	ocs_domain_t	*domain = ctx->app;
10573 	ocs_hw_t	*hw = domain->hw;
10574 
10575 	smtrace("domain");
10576 
10577 	switch (evt) {
10578 	case OCS_EVT_ENTER:
10579 		/* free command buffer */
10580 		if (data != NULL) {
10581 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10582 		}
10583 		/* free SLI resources */
10584 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10585 		/* TODO how to free FCFI (or do we at all)? */
10586 
10587 		if (hw->callback.domain != NULL) {
10588 			hw->callback.domain(hw->args.domain,
10589 					OCS_HW_DOMAIN_ALLOC_FAIL,
10590 					domain);
10591 		}
10592 		break;
10593 	default:
10594 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10595 		break;
10596 	}
10597 
10598 	return NULL;
10599 }
10600 
10601 static void *
10602 __ocs_hw_domain_attached(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10603 {
10604 	ocs_domain_t	*domain = ctx->app;
10605 	ocs_hw_t	*hw = domain->hw;
10606 
10607 	smtrace("domain");
10608 
10609 	switch (evt) {
10610 	case OCS_EVT_ENTER:
10611 		/* free mailbox buffer and send alloc ok to physical sport */
10612 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10613 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ATTACH_OK, NULL);
10614 
10615 		/* now inform registered callbacks */
10616 		if (hw->callback.domain != NULL) {
10617 			hw->callback.domain(hw->args.domain,
10618 					OCS_HW_DOMAIN_ATTACH_OK,
10619 					domain);
10620 		}
10621 		break;
10622 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10623 		ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10624 		break;
10625 	default:
10626 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10627 		break;
10628 	}
10629 
10630 	return NULL;
10631 }
10632 
10633 static void *
10634 __ocs_hw_domain_attach_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10635 {
10636 	ocs_domain_t	*domain = ctx->app;
10637 	ocs_hw_t	*hw = domain->hw;
10638 
10639 	smtrace("domain");
10640 
10641 	switch (evt) {
10642 	case OCS_EVT_ENTER:
10643 		if (data != NULL) {
10644 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10645 		}
10646 		/* free SLI resources */
10647 		sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI, domain->indicator);
10648 		/* TODO how to free FCFI (or do we at all)? */
10649 
10650 		if (hw->callback.domain != NULL) {
10651 			hw->callback.domain(hw->args.domain,
10652 					OCS_HW_DOMAIN_ATTACH_FAIL,
10653 					domain);
10654 		}
10655 		break;
10656 	case OCS_EVT_EXIT:
10657 		break;
10658 	default:
10659 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10660 		break;
10661 	}
10662 
10663 	return NULL;
10664 }
10665 
10666 static void *
10667 __ocs_hw_domain_attach_reg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10668 {
10669 	ocs_domain_t	*domain = ctx->app;
10670 	ocs_hw_t	*hw = domain->hw;
10671 
10672 	smtrace("domain");
10673 
10674 	switch (evt) {
10675 	case OCS_EVT_ENTER:
10676 
10677 		ocs_display_sparams("", "reg vpi", 0, NULL, domain->dma.virt);
10678 
10679 		if (0 == sli_cmd_reg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain)) {
10680 			ocs_log_err(hw->os, "REG_VFI format failure\n");
10681 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10682 			break;
10683 		}
10684 
10685 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10686 			ocs_log_err(hw->os, "REG_VFI command failure\n");
10687 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10688 			break;
10689 		}
10690 		break;
10691 	case OCS_EVT_RESPONSE:
10692 		ocs_sm_transition(ctx, __ocs_hw_domain_attached, data);
10693 		break;
10694 	case OCS_EVT_ERROR:
10695 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_report_fail, data);
10696 		break;
10697 	default:
10698 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10699 		break;
10700 	}
10701 
10702 	return NULL;
10703 }
10704 
10705 static void *
10706 __ocs_hw_domain_allocated(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10707 {
10708 	ocs_domain_t	*domain = ctx->app;
10709 	ocs_hw_t	*hw = domain->hw;
10710 
10711 	smtrace("domain");
10712 
10713 	switch (evt) {
10714 	case OCS_EVT_ENTER:
10715 		/* free mailbox buffer and send alloc ok to physical sport */
10716 		ocs_free(hw->os, data, SLI4_BMBX_SIZE);
10717 		ocs_sm_post_event(&domain->sport->ctx, OCS_EVT_HW_PORT_ALLOC_OK, NULL);
10718 
10719 		ocs_hw_domain_add(hw, domain);
10720 
10721 		/* now inform registered callbacks */
10722 		if (hw->callback.domain != NULL) {
10723 			hw->callback.domain(hw->args.domain,
10724 					OCS_HW_DOMAIN_ALLOC_OK,
10725 					domain);
10726 		}
10727 		break;
10728 	case OCS_EVT_HW_DOMAIN_REQ_ATTACH:
10729 		ocs_sm_transition(ctx, __ocs_hw_domain_attach_reg_vfi, data);
10730 		break;
10731 	case OCS_EVT_HW_DOMAIN_REQ_FREE:
10732 		/* unreg_fcfi/vfi */
10733 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10734 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, NULL);
10735 		} else {
10736 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_vfi, NULL);
10737 		}
10738 		break;
10739 	default:
10740 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10741 		break;
10742 	}
10743 
10744 	return NULL;
10745 }
10746 
10747 static void *
10748 __ocs_hw_domain_alloc_read_sparm64(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10749 {
10750 	ocs_domain_t	*domain = ctx->app;
10751 	ocs_hw_t	*hw = domain->hw;
10752 
10753 	smtrace("domain");
10754 
10755 	switch (evt) {
10756 	case OCS_EVT_ENTER:
10757 		if (0 == sli_cmd_read_sparm64(&hw->sli, data, SLI4_BMBX_SIZE,
10758 					&domain->dma, SLI4_READ_SPARM64_VPI_DEFAULT)) {
10759 			ocs_log_err(hw->os, "READ_SPARM64 format failure\n");
10760 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10761 			break;
10762 		}
10763 
10764 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10765 			ocs_log_err(hw->os, "READ_SPARM64 command failure\n");
10766 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10767 			break;
10768 		}
10769 		break;
10770 	case OCS_EVT_EXIT:
10771 		break;
10772 	case OCS_EVT_RESPONSE:
10773 		ocs_display_sparams(domain->display_name, "domain sparm64", 0, NULL, domain->dma.virt);
10774 
10775 		ocs_sm_transition(ctx, __ocs_hw_domain_allocated, data);
10776 		break;
10777 	case OCS_EVT_ERROR:
10778 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10779 		break;
10780 	default:
10781 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10782 		break;
10783 	}
10784 
10785 	return NULL;
10786 }
10787 
10788 static void *
10789 __ocs_hw_domain_alloc_init_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10790 {
10791 	ocs_domain_t	*domain = ctx->app;
10792 	ocs_sli_port_t	*sport = domain->sport;
10793 	ocs_hw_t	*hw = domain->hw;
10794 
10795 	smtrace("domain");
10796 
10797 	switch (evt) {
10798 	case OCS_EVT_ENTER:
10799 		if (0 == sli_cmd_init_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->indicator,
10800 					domain->fcf_indicator, sport->indicator)) {
10801 			ocs_log_err(hw->os, "INIT_VFI format failure\n");
10802 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10803 			break;
10804 		}
10805 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10806 			ocs_log_err(hw->os, "INIT_VFI command failure\n");
10807 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10808 			break;
10809 		}
10810 		break;
10811 	case OCS_EVT_EXIT:
10812 		break;
10813 	case OCS_EVT_RESPONSE:
10814 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10815 		break;
10816 	case OCS_EVT_ERROR:
10817 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10818 		break;
10819 	default:
10820 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10821 		break;
10822 	}
10823 
10824 	return NULL;
10825 }
10826 
10827 static void *
10828 __ocs_hw_domain_alloc_reg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10829 {
10830 	ocs_domain_t	*domain = ctx->app;
10831 	ocs_hw_t	*hw = domain->hw;
10832 
10833 	smtrace("domain");
10834 
10835 	switch (evt) {
10836 	case OCS_EVT_ENTER: {
10837 		sli4_cmd_rq_cfg_t rq_cfg[SLI4_CMD_REG_FCFI_NUM_RQ_CFG];
10838 		uint32_t i;
10839 
10840 		/* Set the filter match/mask values from hw's filter_def values */
10841 		for (i = 0; i < SLI4_CMD_REG_FCFI_NUM_RQ_CFG; i++) {
10842 			rq_cfg[i].rq_id = 0xffff;
10843 			rq_cfg[i].r_ctl_mask = (uint8_t) hw->config.filter_def[i];
10844 			rq_cfg[i].r_ctl_match = (uint8_t) (hw->config.filter_def[i] >> 8);
10845 			rq_cfg[i].type_mask = (uint8_t) (hw->config.filter_def[i] >> 16);
10846 			rq_cfg[i].type_match = (uint8_t) (hw->config.filter_def[i] >> 24);
10847 		}
10848 
10849 		/* Set the rq_id for each, in order of RQ definition */
10850 		for (i = 0; i < hw->hw_rq_count; i++) {
10851 			if (i >= ARRAY_SIZE(rq_cfg)) {
10852 				ocs_log_warn(hw->os, "more RQs than REG_FCFI filter entries\n");
10853 				break;
10854 			}
10855 			rq_cfg[i].rq_id = hw->hw_rq[i]->hdr->id;
10856 		}
10857 
10858 		if (!data) {
10859 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10860 			break;
10861 		}
10862 
10863 		if (hw->hw_mrq_count) {
10864 			if (OCS_HW_RTN_SUCCESS != ocs_hw_config_mrq(hw, SLI4_CMD_REG_FCFI_SET_FCFI_MODE,
10865 				 domain->vlan_id, domain->fcf)) {
10866 				ocs_log_err(hw->os, "REG_FCFI_MRQ format failure\n");
10867 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10868 				break;
10869 			}
10870 
10871 		} else {
10872 			if (0 == sli_cmd_reg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf,
10873 						rq_cfg, domain->vlan_id)) {
10874 				ocs_log_err(hw->os, "REG_FCFI format failure\n");
10875 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10876 				break;
10877 			}
10878 		}
10879 
10880 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
10881 			ocs_log_err(hw->os, "REG_FCFI command failure\n");
10882 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10883 			break;
10884 		}
10885 		break;
10886 	}
10887 	case OCS_EVT_EXIT:
10888 		break;
10889 	case OCS_EVT_RESPONSE:
10890 		if (!data) {
10891 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
10892 			break;
10893 		}
10894 
10895 		domain->fcf_indicator = ((sli4_cmd_reg_fcfi_t *)data)->fcfi;
10896 
10897 		/*
10898 		 * IF_TYPE 0 devices do not support explicit VFI and VPI initialization
10899 		 * and instead rely on implicit initialization during VFI registration.
10900 		 * Short circuit normal processing here for those devices.
10901 		 */
10902 		if (SLI4_IF_TYPE_BE3_SKH_PF == sli_get_if_type(&hw->sli)) {
10903 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_read_sparm64, data);
10904 		} else {
10905 			ocs_sm_transition(ctx, __ocs_hw_domain_alloc_init_vfi, data);
10906 		}
10907 		break;
10908 	case OCS_EVT_ERROR:
10909 		ocs_sm_transition(ctx, __ocs_hw_domain_alloc_report_fail, data);
10910 		break;
10911 	default:
10912 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10913 		break;
10914 	}
10915 
10916 	return NULL;
10917 }
10918 
10919 static void *
10920 __ocs_hw_domain_init(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10921 {
10922 	ocs_domain_t	*domain = ctx->app;
10923 	ocs_hw_t	*hw = domain->hw;
10924 
10925 	smtrace("domain");
10926 
10927 	switch (evt) {
10928 	case OCS_EVT_ENTER:
10929 		if (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC) {
10930 			/*
10931 			 * For FC, the HW alread registered a FCFI
10932 			 * Copy FCF information into the domain and jump to INIT_VFI
10933 			 */
10934 			domain->fcf_indicator = hw->fcf_indicator;
10935 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_init_vfi, data);
10936 		} else {
10937 			ocs_sm_transition(&domain->sm, __ocs_hw_domain_alloc_reg_fcfi, data);
10938 		}
10939 		break;
10940 	default:
10941 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10942 		break;
10943 	}
10944 
10945 	return NULL;
10946 }
10947 
10948 static void *
10949 __ocs_hw_domain_free_report_fail(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10950 {
10951 	ocs_domain_t	*domain = ctx->app;
10952 
10953 	smtrace("domain");
10954 
10955 	switch (evt) {
10956 	case OCS_EVT_ENTER:
10957 		if (domain != NULL) {
10958 			ocs_hw_t	*hw = domain->hw;
10959 
10960 			ocs_hw_domain_del(hw, domain);
10961 
10962 			if (hw->callback.domain != NULL) {
10963 				hw->callback.domain(hw->args.domain,
10964 						     OCS_HW_DOMAIN_FREE_FAIL,
10965 						     domain);
10966 			}
10967 		}
10968 
10969 		/* free command buffer */
10970 		if (data != NULL) {
10971 			ocs_free(domain != NULL ? domain->hw->os : NULL, data, SLI4_BMBX_SIZE);
10972 		}
10973 		break;
10974 	case OCS_EVT_EXIT:
10975 		break;
10976 	default:
10977 		__ocs_hw_domain_common(__func__, ctx, evt, data);
10978 		break;
10979 	}
10980 
10981 	return NULL;
10982 }
10983 
10984 static void *
10985 __ocs_hw_domain_freed(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
10986 {
10987 	ocs_domain_t	*domain = ctx->app;
10988 
10989 	smtrace("domain");
10990 
10991 	switch (evt) {
10992 	case OCS_EVT_ENTER:
10993 		/* Free DMA and mailbox buffer */
10994 		if (domain != NULL) {
10995 			ocs_hw_t *hw = domain->hw;
10996 
10997 			/* free VFI resource */
10998 			sli_resource_free(&hw->sli, SLI_RSRC_FCOE_VFI,
10999 					  domain->indicator);
11000 
11001 			ocs_hw_domain_del(hw, domain);
11002 
11003 			/* inform registered callbacks */
11004 			if (hw->callback.domain != NULL) {
11005 				hw->callback.domain(hw->args.domain,
11006 						     OCS_HW_DOMAIN_FREE_OK,
11007 						     domain);
11008 			}
11009 		}
11010 		if (data != NULL) {
11011 			ocs_free(NULL, data, SLI4_BMBX_SIZE);
11012 		}
11013 		break;
11014 	case OCS_EVT_EXIT:
11015 		break;
11016 	default:
11017 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11018 		break;
11019 	}
11020 
11021 	return NULL;
11022 }
11023 
11024 
11025 static void *
11026 __ocs_hw_domain_free_redisc_fcf(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11027 {
11028 	ocs_domain_t	*domain = ctx->app;
11029 	ocs_hw_t	*hw = domain->hw;
11030 
11031 	smtrace("domain");
11032 
11033 	switch (evt) {
11034 	case OCS_EVT_ENTER:
11035 		/* if we're in the middle of a teardown, skip sending rediscover */
11036 		if (hw->state == OCS_HW_STATE_TEARDOWN_IN_PROGRESS) {
11037 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11038 			break;
11039 		}
11040 		if (0 == sli_cmd_fcoe_rediscover_fcf(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf)) {
11041 			ocs_log_err(hw->os, "REDISCOVER_FCF format failure\n");
11042 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11043 			break;
11044 		}
11045 
11046 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11047 			ocs_log_err(hw->os, "REDISCOVER_FCF command failure\n");
11048 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11049 		}
11050 		break;
11051 	case OCS_EVT_RESPONSE:
11052 	case OCS_EVT_ERROR:
11053 		/* REDISCOVER_FCF can fail if none exist */
11054 		ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11055 		break;
11056 	case OCS_EVT_EXIT:
11057 		break;
11058 	default:
11059 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11060 		break;
11061 	}
11062 
11063 	return NULL;
11064 }
11065 
11066 static void *
11067 __ocs_hw_domain_free_unreg_fcfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11068 {
11069 	ocs_domain_t	*domain = ctx->app;
11070 	ocs_hw_t	*hw = domain->hw;
11071 
11072 	smtrace("domain");
11073 
11074 	switch (evt) {
11075 	case OCS_EVT_ENTER:
11076 		if (data == NULL) {
11077 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11078 			if (!data) {
11079 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11080 				break;
11081 			}
11082 		}
11083 
11084 		if (0 == sli_cmd_unreg_fcfi(&hw->sli, data, SLI4_BMBX_SIZE, domain->fcf_indicator)) {
11085 			ocs_log_err(hw->os, "UNREG_FCFI format failure\n");
11086 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11087 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11088 			break;
11089 		}
11090 
11091 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11092 			ocs_log_err(hw->os, "UNREG_FCFI command failure\n");
11093 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11094 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11095 			break;
11096 		}
11097 		break;
11098 	case OCS_EVT_RESPONSE:
11099 		if (domain->req_rediscover_fcf) {
11100 			domain->req_rediscover_fcf = FALSE;
11101 			ocs_sm_transition(ctx, __ocs_hw_domain_free_redisc_fcf, data);
11102 		} else {
11103 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11104 		}
11105 		break;
11106 	case OCS_EVT_ERROR:
11107 		ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11108 		break;
11109 	case OCS_EVT_EXIT:
11110 		break;
11111 	default:
11112 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11113 		break;
11114 	}
11115 
11116 	return NULL;
11117 }
11118 
11119 static void *
11120 __ocs_hw_domain_free_unreg_vfi(ocs_sm_ctx_t *ctx, ocs_sm_event_t evt, void *data)
11121 {
11122 	ocs_domain_t	*domain = ctx->app;
11123 	ocs_hw_t	*hw = domain->hw;
11124 	uint8_t		is_fc = FALSE;
11125 
11126 	smtrace("domain");
11127 
11128 	is_fc = (sli_get_medium(&hw->sli) == SLI_LINK_MEDIUM_FC);
11129 
11130 	switch (evt) {
11131 	case OCS_EVT_ENTER:
11132 		if (data == NULL) {
11133 			data = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_ZERO | OCS_M_NOWAIT);
11134 			if (!data) {
11135 				ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11136 				break;
11137 			}
11138 		}
11139 
11140 		if (0 == sli_cmd_unreg_vfi(&hw->sli, data, SLI4_BMBX_SIZE, domain,
11141 					SLI4_UNREG_TYPE_DOMAIN)) {
11142 			ocs_log_err(hw->os, "UNREG_VFI format failure\n");
11143 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11144 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11145 			break;
11146 		}
11147 
11148 		if (ocs_hw_command(hw, data, OCS_CMD_NOWAIT, __ocs_hw_domain_cb, domain)) {
11149 			ocs_log_err(hw->os, "UNREG_VFI command failure\n");
11150 			ocs_free(hw->os, data, SLI4_BMBX_SIZE);
11151 			ocs_sm_post_event(ctx, OCS_EVT_ERROR, NULL);
11152 			break;
11153 		}
11154 		break;
11155 	case OCS_EVT_ERROR:
11156 		if (is_fc) {
11157 			ocs_sm_transition(ctx, __ocs_hw_domain_free_report_fail, data);
11158 		} else {
11159 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11160 		}
11161 		break;
11162 	case OCS_EVT_RESPONSE:
11163 		if (is_fc) {
11164 			ocs_sm_transition(ctx, __ocs_hw_domain_freed, data);
11165 		} else {
11166 			ocs_sm_transition(ctx, __ocs_hw_domain_free_unreg_fcfi, data);
11167 		}
11168 		break;
11169 	default:
11170 		__ocs_hw_domain_common(__func__, ctx, evt, data);
11171 		break;
11172 	}
11173 
11174 	return NULL;
11175 }
11176 
11177 /* callback for domain alloc/attach/free */
11178 static int32_t
11179 __ocs_hw_domain_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11180 {
11181 	ocs_domain_t	*domain = arg;
11182 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11183 	ocs_sm_event_t	evt;
11184 
11185 	if (status || hdr->status) {
11186 		ocs_log_debug(hw->os, "bad status vfi=%#x st=%x hdr=%x\n",
11187 			      domain->indicator, status, hdr->status);
11188 		evt = OCS_EVT_ERROR;
11189 	} else {
11190 		evt = OCS_EVT_RESPONSE;
11191 	}
11192 
11193 	ocs_sm_post_event(&domain->sm, evt, mqe);
11194 
11195 	return 0;
11196 }
11197 
11198 static int32_t
11199 target_wqe_timer_nop_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11200 {
11201 	ocs_hw_io_t *io = NULL;
11202 	ocs_hw_io_t *io_next = NULL;
11203 	uint64_t ticks_current = ocs_get_os_ticks();
11204 	uint32_t sec_elapsed;
11205 
11206 	sli4_mbox_command_header_t	*hdr = (sli4_mbox_command_header_t *)mqe;
11207 
11208 	if (status || hdr->status) {
11209 		ocs_log_debug(hw->os, "bad status st=%x hdr=%x\n",
11210 			      status, hdr->status);
11211 		/* go ahead and proceed with wqe timer checks... */
11212 	}
11213 
11214 	/* loop through active WQE list and check for timeouts */
11215 	ocs_lock(&hw->io_lock);
11216 		ocs_list_foreach_safe(&hw->io_timed_wqe, io, io_next) {
11217 			sec_elapsed = ((ticks_current - io->submit_ticks) / ocs_get_os_tick_freq());
11218 
11219 			/*
11220 			 * If elapsed time > timeout, abort it. No need to check type since
11221 			 * it wouldn't be on this list unless it was a target WQE
11222 			 */
11223 			if (sec_elapsed > io->tgt_wqe_timeout) {
11224 				ocs_log_test(hw->os, "IO timeout xri=0x%x tag=0x%x type=%d\n",
11225 					     io->indicator, io->reqtag, io->type);
11226 
11227 				/* remove from active_wqe list so won't try to abort again */
11228 				ocs_list_remove(&hw->io_timed_wqe, io);
11229 
11230 				/* save status of "timed out" for when abort completes */
11231 				io->status_saved = 1;
11232 				io->saved_status = SLI4_FC_WCQE_STATUS_TARGET_WQE_TIMEOUT;
11233 				io->saved_ext = 0;
11234 				io->saved_len = 0;
11235 
11236 				/* now abort outstanding IO */
11237 				ocs_hw_io_abort(hw, io, FALSE, NULL, NULL);
11238 			}
11239 			/*
11240 			 * need to go through entire list since each IO could have a
11241 			 * different timeout value
11242 			 */
11243 		}
11244 	ocs_unlock(&hw->io_lock);
11245 
11246 	/* if we're not in the middle of shutting down, schedule next timer */
11247 	if (!hw->active_wqe_timer_shutdown) {
11248 		ocs_setup_timer(hw->os, &hw->wqe_timer, target_wqe_timer_cb, hw, OCS_HW_WQ_TIMER_PERIOD_MS);
11249 	}
11250 	hw->in_active_wqe_timer = FALSE;
11251 	return 0;
11252 }
11253 
11254 static void
11255 target_wqe_timer_cb(void *arg)
11256 {
11257 	ocs_hw_t *hw = (ocs_hw_t *)arg;
11258 
11259 	/* delete existing timer; will kick off new timer after checking wqe timeouts */
11260 	hw->in_active_wqe_timer = TRUE;
11261 	ocs_del_timer(&hw->wqe_timer);
11262 
11263 	/* Forward timer callback to execute in the mailbox completion processing context */
11264 	if (ocs_hw_async_call(hw, target_wqe_timer_nop_cb, hw)) {
11265 		ocs_log_test(hw->os, "ocs_hw_async_call failed\n");
11266 	}
11267 }
11268 
11269 static void
11270 shutdown_target_wqe_timer(ocs_hw_t *hw)
11271 {
11272 	uint32_t	iters = 100;
11273 
11274 	if (hw->config.emulate_tgt_wqe_timeout) {
11275 		/* request active wqe timer shutdown, then wait for it to complete */
11276 		hw->active_wqe_timer_shutdown = TRUE;
11277 
11278 		/* delete WQE timer and wait for timer handler to complete (if necessary) */
11279 		ocs_del_timer(&hw->wqe_timer);
11280 
11281 		/* now wait for timer handler to complete (if necessary) */
11282 		while (hw->in_active_wqe_timer && iters) {
11283 			/*
11284 			 * if we happen to have just sent NOP mailbox command, make sure
11285 			 * completions are being processed
11286 			 */
11287 			ocs_hw_flush(hw);
11288 			iters--;
11289 		}
11290 
11291 		if (iters == 0) {
11292 			ocs_log_test(hw->os, "Failed to shutdown active wqe timer\n");
11293 		}
11294 	}
11295 }
11296 
11297 /**
11298  * @brief Determine if HW IO is owned by the port.
11299  *
11300  * @par Description
11301  * Determines if the given HW IO has been posted to the chip.
11302  *
11303  * @param hw Hardware context allocated by the caller.
11304  * @param io HW IO.
11305  *
11306  * @return Returns TRUE if given HW IO is port-owned.
11307  */
11308 uint8_t
11309 ocs_hw_is_io_port_owned(ocs_hw_t *hw, ocs_hw_io_t *io)
11310 {
11311 	/* Check to see if this is a port owned XRI */
11312 	return io->is_port_owned;
11313 }
11314 
11315 /**
11316  * @brief Return TRUE if exchange is port-owned.
11317  *
11318  * @par Description
11319  * Test to see if the xri is a port-owned xri.
11320  *
11321  * @param hw Hardware context.
11322  * @param xri Exchange indicator.
11323  *
11324  * @return Returns TRUE if XRI is a port owned XRI.
11325  */
11326 
11327 uint8_t
11328 ocs_hw_is_xri_port_owned(ocs_hw_t *hw, uint32_t xri)
11329 {
11330 	ocs_hw_io_t *io = ocs_hw_io_lookup(hw, xri);
11331 	return (io == NULL ? FALSE : io->is_port_owned);
11332 }
11333 
11334 /**
11335  * @brief Returns an XRI from the port owned list to the host.
11336  *
11337  * @par Description
11338  * Used when the POST_XRI command fails as well as when the RELEASE_XRI completes.
11339  *
11340  * @param hw Hardware context.
11341  * @param xri_base The starting XRI number.
11342  * @param xri_count The number of XRIs to free from the base.
11343  */
11344 static void
11345 ocs_hw_reclaim_xri(ocs_hw_t *hw, uint16_t xri_base, uint16_t xri_count)
11346 {
11347 	ocs_hw_io_t	*io;
11348 	uint32_t i;
11349 
11350 	for (i = 0; i < xri_count; i++) {
11351 		io = ocs_hw_io_lookup(hw, xri_base + i);
11352 
11353 		/*
11354 		 * if this is an auto xfer rdy XRI, then we need to release any
11355 		 * buffer attached to the XRI before moving the XRI back to the free pool.
11356 		 */
11357 		if (hw->auto_xfer_rdy_enabled) {
11358 			ocs_hw_rqpair_auto_xfer_rdy_move_to_host(hw, io);
11359 		}
11360 
11361 		ocs_lock(&hw->io_lock);
11362 			ocs_list_remove(&hw->io_port_owned, io);
11363 			io->is_port_owned = 0;
11364 			ocs_list_add_tail(&hw->io_free, io);
11365 		ocs_unlock(&hw->io_lock);
11366 	}
11367 }
11368 
11369 /**
11370  * @brief Called when the POST_XRI command completes.
11371  *
11372  * @par Description
11373  * Free the mailbox command buffer and reclaim the XRIs on failure.
11374  *
11375  * @param hw Hardware context.
11376  * @param status Status field from the mbox completion.
11377  * @param mqe Mailbox response structure.
11378  * @param arg Pointer to a callback function that signals the caller that the command is done.
11379  *
11380  * @return Returns 0.
11381  */
11382 static int32_t
11383 ocs_hw_cb_post_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11384 {
11385 	sli4_cmd_post_xri_t	*post_xri = (sli4_cmd_post_xri_t*)mqe;
11386 
11387 	/* Reclaim the XRIs as host owned if the command fails */
11388 	if (status != 0) {
11389 		ocs_log_debug(hw->os, "Status 0x%x for XRI base 0x%x, cnt =x%x\n",
11390 			      status, post_xri->xri_base, post_xri->xri_count);
11391 		ocs_hw_reclaim_xri(hw, post_xri->xri_base, post_xri->xri_count);
11392 	}
11393 
11394 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11395 	return 0;
11396 }
11397 
11398 /**
11399  * @brief Issues a mailbox command to move XRIs from the host-controlled pool to the port.
11400  *
11401  * @param hw Hardware context.
11402  * @param xri_start The starting XRI to post.
11403  * @param num_to_post The number of XRIs to post.
11404  *
11405  * @return Returns OCS_HW_RTN_NO_MEMORY, OCS_HW_RTN_ERROR, or OCS_HW_RTN_SUCCESS.
11406  */
11407 
11408 static ocs_hw_rtn_e
11409 ocs_hw_post_xri(ocs_hw_t *hw, uint32_t xri_start, uint32_t num_to_post)
11410 {
11411 	uint8_t	*post_xri;
11412 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11413 
11414 	/* Since we need to allocate for mailbox queue, just always allocate */
11415 	post_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11416 	if (post_xri == NULL) {
11417 		ocs_log_err(hw->os, "no buffer for command\n");
11418 		return OCS_HW_RTN_NO_MEMORY;
11419 	}
11420 
11421 	/* Register the XRIs */
11422 	if (sli_cmd_post_xri(&hw->sli, post_xri, SLI4_BMBX_SIZE,
11423 			     xri_start, num_to_post)) {
11424 		rc = ocs_hw_command(hw, post_xri, OCS_CMD_NOWAIT, ocs_hw_cb_post_xri, NULL);
11425 		if (rc != OCS_HW_RTN_SUCCESS) {
11426 			ocs_free(hw->os, post_xri, SLI4_BMBX_SIZE);
11427 			ocs_log_err(hw->os, "post_xri failed\n");
11428 		}
11429 	}
11430 	return rc;
11431 }
11432 
11433 /**
11434  * @brief Move XRIs from the host-controlled pool to the port.
11435  *
11436  * @par Description
11437  * Removes IOs from the free list and moves them to the port.
11438  *
11439  * @param hw Hardware context.
11440  * @param num_xri The number of XRIs being requested to move to the chip.
11441  *
11442  * @return Returns the number of XRIs that were moved.
11443  */
11444 
11445 uint32_t
11446 ocs_hw_xri_move_to_port_owned(ocs_hw_t *hw, uint32_t num_xri)
11447 {
11448 	ocs_hw_io_t	*io;
11449 	uint32_t i;
11450 	uint32_t num_posted = 0;
11451 
11452 	/*
11453 	 * Note: We cannot use ocs_hw_io_alloc() because that would place the
11454 	 *       IO on the io_inuse list. We need to move from the io_free to
11455 	 *       the io_port_owned list.
11456 	 */
11457 	ocs_lock(&hw->io_lock);
11458 
11459 	for (i = 0; i < num_xri; i++) {
11460 
11461 		if (NULL != (io = ocs_list_remove_head(&hw->io_free))) {
11462 			ocs_hw_rtn_e rc;
11463 
11464 			/*
11465 			 * if this is an auto xfer rdy XRI, then we need to attach a
11466 			 * buffer to the XRI before submitting it to the chip. If a
11467 			 * buffer is unavailable, then we cannot post it, so return it
11468 			 * to the free pool.
11469 			 */
11470 			if (hw->auto_xfer_rdy_enabled) {
11471 				/* Note: uses the IO lock to get the auto xfer rdy buffer */
11472 				ocs_unlock(&hw->io_lock);
11473 				rc = ocs_hw_rqpair_auto_xfer_rdy_move_to_port(hw, io);
11474 				ocs_lock(&hw->io_lock);
11475 				if (rc != OCS_HW_RTN_SUCCESS) {
11476 					ocs_list_add_head(&hw->io_free, io);
11477 					break;
11478 				}
11479 			}
11480 			ocs_lock_init(hw->os, &io->axr_lock, "HW_axr_lock[%d]", io->indicator);
11481 			io->is_port_owned = 1;
11482 			ocs_list_add_tail(&hw->io_port_owned, io);
11483 
11484 			/* Post XRI */
11485 			if (ocs_hw_post_xri(hw, io->indicator, 1) != OCS_HW_RTN_SUCCESS ) {
11486 				ocs_hw_reclaim_xri(hw, io->indicator, i);
11487 				break;
11488 			}
11489 			num_posted++;
11490 		} else {
11491 			/* no more free XRIs */
11492 			break;
11493 		}
11494 	}
11495 	ocs_unlock(&hw->io_lock);
11496 
11497 	return num_posted;
11498 }
11499 
11500 /**
11501  * @brief Called when the RELEASE_XRI command completes.
11502  *
11503  * @par Description
11504  * Move the IOs back to the free pool on success.
11505  *
11506  * @param hw Hardware context.
11507  * @param status Status field from the mbox completion.
11508  * @param mqe Mailbox response structure.
11509  * @param arg Pointer to a callback function that signals the caller that the command is done.
11510  *
11511  * @return Returns 0.
11512  */
11513 static int32_t
11514 ocs_hw_cb_release_xri(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void  *arg)
11515 {
11516 	sli4_cmd_release_xri_t	*release_xri = (sli4_cmd_release_xri_t*)mqe;
11517 	uint8_t i;
11518 
11519 	/* Reclaim the XRIs as host owned if the command fails */
11520 	if (status != 0) {
11521 		ocs_log_err(hw->os, "Status 0x%x\n", status);
11522 	} else {
11523 		for (i = 0; i < release_xri->released_xri_count; i++) {
11524 			uint16_t xri = ((i & 1) == 0 ? release_xri->xri_tbl[i/2].xri_tag0 :
11525 					release_xri->xri_tbl[i/2].xri_tag1);
11526 			ocs_hw_reclaim_xri(hw, xri, 1);
11527 		}
11528 	}
11529 
11530 	ocs_free(hw->os, mqe, SLI4_BMBX_SIZE);
11531 	return 0;
11532 }
11533 
11534 /**
11535  * @brief Move XRIs from the port-controlled pool to the host.
11536  *
11537  * Requests XRIs from the FW to return to the host-owned pool.
11538  *
11539  * @param hw Hardware context.
11540  * @param num_xri The number of XRIs being requested to moved from the chip.
11541  *
11542  * @return Returns 0 for success, or a negative error code value for failure.
11543  */
11544 
11545 ocs_hw_rtn_e
11546 ocs_hw_xri_move_to_host_owned(ocs_hw_t *hw, uint8_t num_xri)
11547 {
11548 	uint8_t	*release_xri;
11549 	ocs_hw_rtn_e rc = OCS_HW_RTN_ERROR;
11550 
11551 	/* non-local buffer required for mailbox queue */
11552 	release_xri = ocs_malloc(hw->os, SLI4_BMBX_SIZE, OCS_M_NOWAIT);
11553 	if (release_xri == NULL) {
11554 		ocs_log_err(hw->os, "no buffer for command\n");
11555 		return OCS_HW_RTN_NO_MEMORY;
11556 	}
11557 
11558 	/* release the XRIs */
11559 	if (sli_cmd_release_xri(&hw->sli, release_xri, SLI4_BMBX_SIZE, num_xri)) {
11560 		rc = ocs_hw_command(hw, release_xri, OCS_CMD_NOWAIT, ocs_hw_cb_release_xri, NULL);
11561 		if (rc != OCS_HW_RTN_SUCCESS) {
11562 			ocs_log_err(hw->os, "release_xri failed\n");
11563 		}
11564 	}
11565 	/* If we are polling or an error occurred, then free the mailbox buffer */
11566 	if (release_xri != NULL && rc != OCS_HW_RTN_SUCCESS) {
11567 		ocs_free(hw->os, release_xri, SLI4_BMBX_SIZE);
11568 	}
11569 	return rc;
11570 }
11571 
11572 
11573 /**
11574  * @brief Allocate an ocs_hw_rx_buffer_t array.
11575  *
11576  * @par Description
11577  * An ocs_hw_rx_buffer_t array is allocated, along with the required DMA memory.
11578  *
11579  * @param hw Pointer to HW object.
11580  * @param rqindex RQ index for this buffer.
11581  * @param count Count of buffers in array.
11582  * @param size Size of buffer.
11583  *
11584  * @return Returns the pointer to the allocated ocs_hw_rq_buffer_t array.
11585  */
11586 static ocs_hw_rq_buffer_t *
11587 ocs_hw_rx_buffer_alloc(ocs_hw_t *hw, uint32_t rqindex, uint32_t count, uint32_t size)
11588 {
11589 	ocs_t *ocs = hw->os;
11590 	ocs_hw_rq_buffer_t *rq_buf = NULL;
11591 	ocs_hw_rq_buffer_t *prq;
11592 	uint32_t i;
11593 
11594 	if (count != 0) {
11595 		rq_buf = ocs_malloc(hw->os, sizeof(*rq_buf) * count, OCS_M_NOWAIT | OCS_M_ZERO);
11596 		if (rq_buf == NULL) {
11597 			ocs_log_err(hw->os, "Failure to allocate unsolicited DMA trackers\n");
11598 			return NULL;
11599 		}
11600 
11601 		for (i = 0, prq = rq_buf; i < count; i ++, prq++) {
11602 			prq->rqindex = rqindex;
11603 			if (ocs_dma_alloc(ocs, &prq->dma, size, OCS_MIN_DMA_ALIGNMENT)) {
11604 				ocs_log_err(hw->os, "DMA allocation failed\n");
11605 				ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11606 				rq_buf = NULL;
11607 				break;
11608 			}
11609 		}
11610 	}
11611 	return rq_buf;
11612 }
11613 
11614 /**
11615  * @brief Free an ocs_hw_rx_buffer_t array.
11616  *
11617  * @par Description
11618  * The ocs_hw_rx_buffer_t array is freed, along with allocated DMA memory.
11619  *
11620  * @param hw Pointer to HW object.
11621  * @param rq_buf Pointer to ocs_hw_rx_buffer_t array.
11622  * @param count Count of buffers in array.
11623  *
11624  * @return None.
11625  */
11626 static void
11627 ocs_hw_rx_buffer_free(ocs_hw_t *hw, ocs_hw_rq_buffer_t *rq_buf, uint32_t count)
11628 {
11629 	ocs_t *ocs = hw->os;
11630 	uint32_t i;
11631 	ocs_hw_rq_buffer_t *prq;
11632 
11633 	if (rq_buf != NULL) {
11634 		for (i = 0, prq = rq_buf; i < count; i++, prq++) {
11635 			ocs_dma_free(ocs, &prq->dma);
11636 		}
11637 		ocs_free(hw->os, rq_buf, sizeof(*rq_buf) * count);
11638 	}
11639 }
11640 
11641 /**
11642  * @brief Allocate the RQ data buffers.
11643  *
11644  * @param hw Pointer to HW object.
11645  *
11646  * @return Returns 0 on success, or a non-zero value on failure.
11647  */
11648 ocs_hw_rtn_e
11649 ocs_hw_rx_allocate(ocs_hw_t *hw)
11650 {
11651 	ocs_t *ocs = hw->os;
11652 	uint32_t i;
11653 	int32_t rc = OCS_HW_RTN_SUCCESS;
11654 	uint32_t rqindex = 0;
11655 	hw_rq_t *rq;
11656 	uint32_t hdr_size = OCS_HW_RQ_SIZE_HDR;
11657 	uint32_t payload_size = hw->config.rq_default_buffer_size;
11658 
11659 	rqindex = 0;
11660 
11661 	for (i = 0; i < hw->hw_rq_count; i++) {
11662 		rq = hw->hw_rq[i];
11663 
11664 		/* Allocate header buffers */
11665 		rq->hdr_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, hdr_size);
11666 		if (rq->hdr_buf == NULL) {
11667 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc hdr_buf failed\n");
11668 			rc = OCS_HW_RTN_ERROR;
11669 			break;
11670 		}
11671 
11672 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d header  %4d by %4d bytes\n", i, rq->hdr->id,
11673 			      rq->entry_count, hdr_size);
11674 
11675 		rqindex++;
11676 
11677 		/* Allocate payload buffers */
11678 		rq->payload_buf = ocs_hw_rx_buffer_alloc(hw, rqindex, rq->entry_count, payload_size);
11679 		if (rq->payload_buf == NULL) {
11680 			ocs_log_err(ocs, "ocs_hw_rx_buffer_alloc fb_buf failed\n");
11681 			rc = OCS_HW_RTN_ERROR;
11682 			break;
11683 		}
11684 		ocs_log_debug(hw->os, "rq[%2d] rq_id %02d default %4d by %4d bytes\n", i, rq->data->id,
11685 			      rq->entry_count, payload_size);
11686 		rqindex++;
11687 	}
11688 
11689 	return rc ? OCS_HW_RTN_ERROR : OCS_HW_RTN_SUCCESS;
11690 }
11691 
11692 /**
11693  * @brief Post the RQ data buffers to the chip.
11694  *
11695  * @param hw Pointer to HW object.
11696  *
11697  * @return Returns 0 on success, or a non-zero value on failure.
11698  */
11699 ocs_hw_rtn_e
11700 ocs_hw_rx_post(ocs_hw_t *hw)
11701 {
11702 	uint32_t i;
11703 	uint32_t idx;
11704 	uint32_t rq_idx;
11705 	int32_t rc = 0;
11706 
11707 	/*
11708 	 * In RQ pair mode, we MUST post the header and payload buffer at the
11709 	 * same time.
11710 	 */
11711 	for (rq_idx = 0, idx = 0; rq_idx < hw->hw_rq_count; rq_idx++) {
11712 		hw_rq_t *rq = hw->hw_rq[rq_idx];
11713 
11714 		for (i = 0; i < rq->entry_count-1; i++) {
11715 			ocs_hw_sequence_t *seq = ocs_array_get(hw->seq_pool, idx++);
11716 			ocs_hw_assert(seq != NULL);
11717 
11718 			seq->header = &rq->hdr_buf[i];
11719 
11720 			seq->payload = &rq->payload_buf[i];
11721 
11722 			rc = ocs_hw_sequence_free(hw, seq);
11723 			if (rc) {
11724 				break;
11725 			}
11726 		}
11727 		if (rc) {
11728 			break;
11729 		}
11730 	}
11731 
11732 	return rc;
11733 }
11734 
11735 /**
11736  * @brief Free the RQ data buffers.
11737  *
11738  * @param hw Pointer to HW object.
11739  *
11740  */
11741 void
11742 ocs_hw_rx_free(ocs_hw_t *hw)
11743 {
11744 	hw_rq_t *rq;
11745 	uint32_t i;
11746 
11747 	/* Free hw_rq buffers */
11748 	for (i = 0; i < hw->hw_rq_count; i++) {
11749 		rq = hw->hw_rq[i];
11750 		if (rq != NULL) {
11751 			ocs_hw_rx_buffer_free(hw, rq->hdr_buf, rq->entry_count);
11752 			rq->hdr_buf = NULL;
11753 			ocs_hw_rx_buffer_free(hw, rq->payload_buf, rq->entry_count);
11754 			rq->payload_buf = NULL;
11755 		}
11756 	}
11757 }
11758 
11759 /**
11760  * @brief HW async call context structure.
11761  */
11762 typedef struct {
11763 	ocs_hw_async_cb_t callback;
11764 	void *arg;
11765 	uint8_t cmd[SLI4_BMBX_SIZE];
11766 } ocs_hw_async_call_ctx_t;
11767 
11768 /**
11769  * @brief HW async callback handler
11770  *
11771  * @par Description
11772  * This function is called when the NOP mailbox command completes.  The callback stored
11773  * in the requesting context is invoked.
11774  *
11775  * @param hw Pointer to HW object.
11776  * @param status Completion status.
11777  * @param mqe Pointer to mailbox completion queue entry.
11778  * @param arg Caller-provided argument.
11779  *
11780  * @return None.
11781  */
11782 static void
11783 ocs_hw_async_cb(ocs_hw_t *hw, int32_t status, uint8_t *mqe, void *arg)
11784 {
11785 	ocs_hw_async_call_ctx_t *ctx = arg;
11786 
11787 	if (ctx != NULL) {
11788 		if (ctx->callback != NULL) {
11789 			(*ctx->callback)(hw, status, mqe, ctx->arg);
11790 		}
11791 		ocs_free(hw->os, ctx, sizeof(*ctx));
11792 	}
11793 }
11794 
11795 /**
11796  * @brief Make an async callback using NOP mailbox command
11797  *
11798  * @par Description
11799  * Post a NOP mailbox command; the callback with argument is invoked upon completion
11800  * while in the event processing context.
11801  *
11802  * @param hw Pointer to HW object.
11803  * @param callback Pointer to callback function.
11804  * @param arg Caller-provided callback.
11805  *
11806  * @return Returns 0 on success, or a negative error code value on failure.
11807  */
11808 int32_t
11809 ocs_hw_async_call(ocs_hw_t *hw, ocs_hw_async_cb_t callback, void *arg)
11810 {
11811 	int32_t rc = 0;
11812 	ocs_hw_async_call_ctx_t *ctx;
11813 
11814 	/*
11815 	 * Allocate a callback context (which includes the mailbox command buffer), we need
11816 	 * this to be persistent as the mailbox command submission may be queued and executed later
11817 	 * execution.
11818 	 */
11819 	ctx = ocs_malloc(hw->os, sizeof(*ctx), OCS_M_ZERO | OCS_M_NOWAIT);
11820 	if (ctx == NULL) {
11821 		ocs_log_err(hw->os, "failed to malloc async call context\n");
11822 		return OCS_HW_RTN_NO_MEMORY;
11823 	}
11824 	ctx->callback = callback;
11825 	ctx->arg = arg;
11826 
11827 	/* Build and send a NOP mailbox command */
11828 	if (sli_cmd_common_nop(&hw->sli, ctx->cmd, sizeof(ctx->cmd), 0) == 0) {
11829 		ocs_log_err(hw->os, "COMMON_NOP format failure\n");
11830 		ocs_free(hw->os, ctx, sizeof(*ctx));
11831 		rc = -1;
11832 	}
11833 
11834 	if (ocs_hw_command(hw, ctx->cmd, OCS_CMD_NOWAIT, ocs_hw_async_cb, ctx)) {
11835 		ocs_log_err(hw->os, "COMMON_NOP command failure\n");
11836 		ocs_free(hw->os, ctx, sizeof(*ctx));
11837 		rc = -1;
11838 	}
11839 	return rc;
11840 }
11841 
11842 /**
11843  * @brief Initialize the reqtag pool.
11844  *
11845  * @par Description
11846  * The WQ request tag pool is initialized.
11847  *
11848  * @param hw Pointer to HW object.
11849  *
11850  * @return Returns 0 on success, or a negative error code value on failure.
11851  */
11852 ocs_hw_rtn_e
11853 ocs_hw_reqtag_init(ocs_hw_t *hw)
11854 {
11855 	if (hw->wq_reqtag_pool == NULL) {
11856 		hw->wq_reqtag_pool = ocs_pool_alloc(hw->os, sizeof(hw_wq_callback_t), 65536, TRUE);
11857 		if (hw->wq_reqtag_pool == NULL) {
11858 			ocs_log_err(hw->os, "ocs_pool_alloc hw_wq_callback_t failed\n");
11859 			return OCS_HW_RTN_NO_MEMORY;
11860 		}
11861 	}
11862 	ocs_hw_reqtag_reset(hw);
11863 	return OCS_HW_RTN_SUCCESS;
11864 }
11865 
11866 /**
11867  * @brief Allocate a WQ request tag.
11868  *
11869  * Allocate and populate a WQ request tag from the WQ request tag pool.
11870  *
11871  * @param hw Pointer to HW object.
11872  * @param callback Callback function.
11873  * @param arg Pointer to callback argument.
11874  *
11875  * @return Returns pointer to allocated WQ request tag, or NULL if object cannot be allocated.
11876  */
11877 hw_wq_callback_t *
11878 ocs_hw_reqtag_alloc(ocs_hw_t *hw, void (*callback)(void *arg, uint8_t *cqe, int32_t status), void *arg)
11879 {
11880 	hw_wq_callback_t *wqcb;
11881 
11882 	ocs_hw_assert(callback != NULL);
11883 
11884 	wqcb = ocs_pool_get(hw->wq_reqtag_pool);
11885 	if (wqcb != NULL) {
11886 		ocs_hw_assert(wqcb->callback == NULL);
11887 		wqcb->callback = callback;
11888 		wqcb->arg = arg;
11889 	}
11890 	return wqcb;
11891 }
11892 
11893 /**
11894  * @brief Free a WQ request tag.
11895  *
11896  * Free the passed in WQ request tag.
11897  *
11898  * @param hw Pointer to HW object.
11899  * @param wqcb Pointer to WQ request tag object to free.
11900  *
11901  * @return None.
11902  */
11903 void
11904 ocs_hw_reqtag_free(ocs_hw_t *hw, hw_wq_callback_t *wqcb)
11905 {
11906 	ocs_hw_assert(wqcb->callback != NULL);
11907 	wqcb->callback = NULL;
11908 	wqcb->arg = NULL;
11909 	ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11910 }
11911 
11912 /**
11913  * @brief Return WQ request tag by index.
11914  *
11915  * @par Description
11916  * Return pointer to WQ request tag object given an index.
11917  *
11918  * @param hw Pointer to HW object.
11919  * @param instance_index Index of WQ request tag to return.
11920  *
11921  * @return Pointer to WQ request tag, or NULL.
11922  */
11923 hw_wq_callback_t *
11924 ocs_hw_reqtag_get_instance(ocs_hw_t *hw, uint32_t instance_index)
11925 {
11926 	hw_wq_callback_t *wqcb;
11927 
11928 	wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, instance_index);
11929 	if (wqcb == NULL) {
11930 		ocs_log_err(hw->os, "wqcb for instance %d is null\n", instance_index);
11931 	}
11932 	return wqcb;
11933 }
11934 
11935 /**
11936  * @brief Reset the WQ request tag pool.
11937  *
11938  * @par Description
11939  * Reset the WQ request tag pool, returning all to the free list.
11940  *
11941  * @param hw pointer to HW object.
11942  *
11943  * @return None.
11944  */
11945 void
11946 ocs_hw_reqtag_reset(ocs_hw_t *hw)
11947 {
11948 	hw_wq_callback_t *wqcb;
11949 	uint32_t i;
11950 
11951 	/* Remove all from freelist */
11952 	while(ocs_pool_get(hw->wq_reqtag_pool) != NULL) {
11953 		;
11954 	}
11955 
11956 	/* Put them all back */
11957 	for (i = 0; ((wqcb = ocs_pool_get_instance(hw->wq_reqtag_pool, i)) != NULL); i++) {
11958 		wqcb->instance_index = i;
11959 		wqcb->callback = NULL;
11960 		wqcb->arg = NULL;
11961 		ocs_pool_put(hw->wq_reqtag_pool, wqcb);
11962 	}
11963 }
11964 
11965 /**
11966  * @brief Handle HW assertion
11967  *
11968  * HW assert, display diagnostic message, and abort.
11969  *
11970  * @param cond string describing failing assertion condition
11971  * @param filename file name
11972  * @param linenum line number
11973  *
11974  * @return none
11975  */
11976 void
11977 _ocs_hw_assert(const char *cond, const char *filename, int linenum)
11978 {
11979 	ocs_printf("%s(%d): HW assertion (%s) failed\n", filename, linenum, cond);
11980 	ocs_abort();
11981 		/* no return */
11982 }
11983 
11984 /**
11985  * @brief Handle HW verify
11986  *
11987  * HW verify, display diagnostic message, dump stack and return.
11988  *
11989  * @param cond string describing failing verify condition
11990  * @param filename file name
11991  * @param linenum line number
11992  *
11993  * @return none
11994  */
11995 void
11996 _ocs_hw_verify(const char *cond, const char *filename, int linenum)
11997 {
11998 	ocs_printf("%s(%d): HW verify (%s) failed\n", filename, linenum, cond);
11999 	ocs_print_stack();
12000 }
12001 
12002 /**
12003  * @brief Reque XRI
12004  *
12005  * @par Description
12006  * Reque XRI
12007  *
12008  * @param hw Pointer to HW object.
12009  * @param io Pointer to HW IO
12010  *
12011  * @return Return 0 if successful else returns -1
12012  */
12013 int32_t
12014 ocs_hw_reque_xri( ocs_hw_t *hw, ocs_hw_io_t *io )
12015 {
12016 	int32_t rc = 0;
12017 
12018 	rc = ocs_hw_rqpair_auto_xfer_rdy_buffer_post(hw, io, 1);
12019 	if (rc) {
12020 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12021 		rc = -1;
12022 		goto exit_ocs_hw_reque_xri;
12023 	}
12024 
12025 	io->auto_xfer_rdy_dnrx = 0;
12026 	io->type = OCS_HW_IO_DNRX_REQUEUE;
12027 	if (sli_requeue_xri_wqe(&hw->sli, io->wqe.wqebuf, hw->sli.config.wqe_size, io->indicator, OCS_HW_REQUE_XRI_REGTAG, SLI4_CQ_DEFAULT)) {
12028 		/* Clear buffer from XRI */
12029 		ocs_pool_put(hw->auto_xfer_rdy_buf_pool, io->axr_buf);
12030 		io->axr_buf = NULL;
12031 
12032 		ocs_log_err(hw->os, "requeue_xri WQE error\n");
12033 		ocs_list_add_tail(&hw->io_port_dnrx, io);
12034 
12035 		rc = -1;
12036 		goto exit_ocs_hw_reque_xri;
12037 	}
12038 
12039 	if (io->wq == NULL) {
12040 		io->wq = ocs_hw_queue_next_wq(hw, io);
12041 		ocs_hw_assert(io->wq != NULL);
12042 	}
12043 
12044 	/*
12045 	 * Add IO to active io wqe list before submitting, in case the
12046 	 * wcqe processing preempts this thread.
12047 	 */
12048 	OCS_STAT(hw->tcmd_wq_submit[io->wq->instance]++);
12049 	OCS_STAT(io->wq->use_count++);
12050 
12051 	rc = hw_wq_write(io->wq, &io->wqe);
12052 	if (rc < 0) {
12053 		ocs_log_err(hw->os, "sli_queue_write reque xri failed: %d\n", rc);
12054 		rc = -1;
12055 	}
12056 
12057 exit_ocs_hw_reque_xri:
12058 	return 0;
12059 }
12060 
12061 uint32_t
12062 ocs_hw_get_def_wwn(ocs_t *ocs, uint32_t chan, uint64_t *wwpn, uint64_t *wwnn)
12063 {
12064 	sli4_t *sli4 = &ocs->hw.sli;
12065 	ocs_dma_t       dma;
12066 	uint8_t		*payload = NULL;
12067 
12068 	int indicator = sli4->config.extent[SLI_RSRC_FCOE_VPI].base[0] + chan;
12069 
12070 	/* allocate memory for the service parameters */
12071 	if (ocs_dma_alloc(ocs, &dma, 112, 4)) {
12072 		ocs_log_err(ocs, "Failed to allocate DMA memory\n");
12073 		return 1;
12074 	}
12075 
12076 	if (0 == sli_cmd_read_sparm64(sli4, sli4->bmbx.virt, SLI4_BMBX_SIZE,
12077 				&dma, indicator)) {
12078 		ocs_log_err(ocs, "READ_SPARM64 allocation failure\n");
12079 		ocs_dma_free(ocs, &dma);
12080 		return 1;
12081 	}
12082 
12083 	if (sli_bmbx_command(sli4)) {
12084 		ocs_log_err(ocs, "READ_SPARM64 command failure\n");
12085 		ocs_dma_free(ocs, &dma);
12086 		return 1;
12087 	}
12088 
12089 	payload = dma.virt;
12090 	ocs_memcpy(wwpn, payload + SLI4_READ_SPARM64_WWPN_OFFSET, sizeof(*wwpn));
12091 	ocs_memcpy(wwnn, payload + SLI4_READ_SPARM64_WWNN_OFFSET, sizeof(*wwnn));
12092 	ocs_dma_free(ocs, &dma);
12093 	return 0;
12094 }
12095 
12096 /**
12097  * @page fc_hw_api_overview HW APIs
12098  * - @ref devInitShutdown
12099  * - @ref domain
12100  * - @ref port
12101  * - @ref node
12102  * - @ref io
12103  * - @ref interrupt
12104  *
12105  * <div class="overview">
12106  * The Hardware Abstraction Layer (HW) insulates the higher-level code from the SLI-4
12107  * message details, but the higher level code must still manage domains, ports,
12108  * IT nexuses, and IOs. The HW API is designed to help the higher level manage
12109  * these objects.<br><br>
12110  *
12111  * The HW uses function callbacks to notify the higher-level code of events
12112  * that are received from the chip. There are currently three types of
12113  * functions that may be registered:
12114  *
12115  * <ul><li>domain – This function is called whenever a domain event is generated
12116  * within the HW. Examples include a new FCF is discovered, a connection
12117  * to a domain is disrupted, and allocation callbacks.</li>
12118  * <li>unsolicited – This function is called whenever new data is received in
12119  * the SLI-4 receive queue.</li>
12120  * <li>rnode – This function is called for remote node events, such as attach status
12121  * and  allocation callbacks.</li></ul>
12122  *
12123  * Upper layer functions may be registered by using the ocs_hw_callback() function.
12124  *
12125  * <img src="elx_fc_hw.jpg" alt="FC/FCoE HW" title="FC/FCoE HW" align="right"/>
12126  * <h2>FC/FCoE HW API</h2>
12127  * The FC/FCoE HW component builds upon the SLI-4 component to establish a flexible
12128  * interface for creating the necessary common objects and sending I/Os. It may be used
12129  * “as is” in customer implementations or it can serve as an example of typical interactions
12130  * between a driver and the SLI-4 hardware. The broad categories of functionality include:
12131  *
12132  * <ul><li>Setting-up and tearing-down of the HW.</li>
12133  * <li>Allocating and using the common objects (SLI Port, domain, remote node).</li>
12134  * <li>Sending and receiving I/Os.</li></ul>
12135  *
12136  * <h3>HW Setup</h3>
12137  * To set up the HW:
12138  *
12139  * <ol>
12140  * <li>Set up the HW object using ocs_hw_setup().<br>
12141  * This step performs a basic configuration of the SLI-4 component and the HW to
12142  * enable querying the hardware for its capabilities. At this stage, the HW is not
12143  * capable of general operations (such as, receiving events or sending I/Os).</li><br><br>
12144  * <li>Configure the HW according to the driver requirements.<br>
12145  * The HW provides functions to discover hardware capabilities (ocs_hw_get()), as
12146  * well as configures the amount of resources required (ocs_hw_set()). The driver
12147  * must also register callback functions (ocs_hw_callback()) to receive notification of
12148  * various asynchronous events.<br><br>
12149  * @b Note: Once configured, the driver must initialize the HW (ocs_hw_init()). This
12150  * step creates the underlying queues, commits resources to the hardware, and
12151  * prepares the hardware for operation. While the hardware is operational, the
12152  * port is not online, and cannot send or receive data.</li><br><br>
12153  * <br><br>
12154  * <li>Finally, the driver can bring the port online (ocs_hw_port_control()).<br>
12155  * When the link comes up, the HW determines if a domain is present and notifies the
12156  * driver using the domain callback function. This is the starting point of the driver's
12157  * interaction with the common objects.<br><br>
12158  * @b Note: For FCoE, there may be more than one domain available and, therefore,
12159  * more than one callback.</li>
12160  * </ol>
12161  *
12162  * <h3>Allocating and Using Common Objects</h3>
12163  * Common objects provide a mechanism through which the various OneCore Storage
12164  * driver components share and track information. These data structures are primarily
12165  * used to track SLI component information but can be extended by other components, if
12166  * needed. The main objects are:
12167  *
12168  * <ul><li>DMA – the ocs_dma_t object describes a memory region suitable for direct
12169  * memory access (DMA) transactions.</li>
12170  * <li>SCSI domain – the ocs_domain_t object represents the SCSI domain, including
12171  * any infrastructure devices such as FC switches and FC forwarders. The domain
12172  * object contains both an FCFI and a VFI.</li>
12173  * <li>SLI Port (sport) – the ocs_sli_port_t object represents the connection between
12174  * the driver and the SCSI domain. The SLI Port object contains a VPI.</li>
12175  * <li>Remote node – the ocs_remote_node_t represents a connection between the SLI
12176  * Port and another device in the SCSI domain. The node object contains an RPI.</li></ul>
12177  *
12178  * Before the driver can send I/Os, it must allocate the SCSI domain, SLI Port, and remote
12179  * node common objects and establish the connections between them. The goal is to
12180  * connect the driver to the SCSI domain to exchange I/Os with other devices. These
12181  * common object connections are shown in the following figure, FC Driver Common Objects:
12182  * <img src="elx_fc_common_objects.jpg"
12183  * alt="FC Driver Common Objects" title="FC Driver Common Objects" align="center"/>
12184  *
12185  * The first step is to create a connection to the domain by allocating an SLI Port object.
12186  * The SLI Port object represents a particular FC ID and must be initialized with one. With
12187  * the SLI Port object, the driver can discover the available SCSI domain(s). On identifying
12188  * a domain, the driver allocates a domain object and attaches to it using the previous SLI
12189  * port object.<br><br>
12190  *
12191  * @b Note: In some cases, the driver may need to negotiate service parameters (that is,
12192  * FLOGI) with the domain before attaching.<br><br>
12193  *
12194  * Once attached to the domain, the driver can discover and attach to other devices
12195  * (remote nodes). The exact discovery method depends on the driver, but it typically
12196  * includes using a position map, querying the fabric name server, or an out-of-band
12197  * method. In most cases, it is necessary to log in with devices before performing I/Os.
12198  * Prior to sending login-related ELS commands (ocs_hw_srrs_send()), the driver must
12199  * allocate a remote node object (ocs_hw_node_alloc()). If the login negotiation is
12200  * successful, the driver must attach the nodes (ocs_hw_node_attach()) to the SLI Port
12201  * before exchanging FCP I/O.<br><br>
12202  *
12203  * @b Note: The HW manages both the well known fabric address and the name server as
12204  * nodes in the domain. Therefore, the driver must allocate node objects prior to
12205  * communicating with either of these entities.
12206  *
12207  * <h3>Sending and Receiving I/Os</h3>
12208  * The HW provides separate interfaces for sending BLS/ ELS/ FC-CT and FCP, but the
12209  * commands are conceptually similar. Since the commands complete asynchronously,
12210  * the caller must provide a HW I/O object that maintains the I/O state, as well as
12211  * provide a callback function. The driver may use the same callback function for all I/O
12212  * operations, but each operation must use a unique HW I/O object. In the SLI-4
12213  * architecture, there is a direct association between the HW I/O object and the SGL used
12214  * to describe the data. Therefore, a driver typically performs the following operations:
12215  *
12216  * <ul><li>Allocates a HW I/O object (ocs_hw_io_alloc()).</li>
12217  * <li>Formats the SGL, specifying both the HW I/O object and the SGL.
12218  * (ocs_hw_io_init_sges() and ocs_hw_io_add_sge()).</li>
12219  * <li>Sends the HW I/O (ocs_hw_io_send()).</li></ul>
12220  *
12221  * <h3>HW Tear Down</h3>
12222  * To tear-down the HW:
12223  *
12224  * <ol><li>Take the port offline (ocs_hw_port_control()) to prevent receiving further
12225  * data andevents.</li>
12226  * <li>Destroy the HW object (ocs_hw_teardown()).</li>
12227  * <li>Free any memory used by the HW, such as buffers for unsolicited data.</li></ol>
12228  * <br>
12229  * </div><!-- overview -->
12230  *
12231  */
12232 
12233 
12234 
12235 
12236 /**
12237  * This contains all hw runtime workaround code.  Based on the asic type,
12238  * asic revision, and range of fw revisions, a particular workaround may be enabled.
12239  *
12240  * A workaround may consist of overriding a particular HW/SLI4 value that was initialized
12241  * during ocs_hw_setup() (for example the MAX_QUEUE overrides for mis-reported queue
12242  * sizes). Or if required, elements of the ocs_hw_workaround_t structure may be set to
12243  * control specific runtime behavior.
12244  *
12245  * It is intended that the controls in ocs_hw_workaround_t be defined functionally.  So we
12246  * would have the driver look like:  "if (hw->workaround.enable_xxx) then ...", rather than
12247  * what we might previously see as "if this is a BE3, then do xxx"
12248  *
12249  */
12250 
12251 
12252 #define HW_FWREV_ZERO		(0ull)
12253 #define HW_FWREV_MAX		(~0ull)
12254 
12255 #define SLI4_ASIC_TYPE_ANY	0
12256 #define SLI4_ASIC_REV_ANY	0
12257 
12258 /**
12259  * @brief Internal definition of workarounds
12260  */
12261 
12262 typedef enum {
12263 	HW_WORKAROUND_TEST = 1,
12264 	HW_WORKAROUND_MAX_QUEUE,	/**< Limits all queues */
12265 	HW_WORKAROUND_MAX_RQ,		/**< Limits only the RQ */
12266 	HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH,
12267 	HW_WORKAROUND_WQE_COUNT_METHOD,
12268 	HW_WORKAROUND_RQE_COUNT_METHOD,
12269 	HW_WORKAROUND_USE_UNREGISTERD_RPI,
12270 	HW_WORKAROUND_DISABLE_AR_TGT_DIF, /**< Disable of auto-response target DIF */
12271 	HW_WORKAROUND_DISABLE_SET_DUMP_LOC,
12272 	HW_WORKAROUND_USE_DIF_QUARANTINE,
12273 	HW_WORKAROUND_USE_DIF_SEC_XRI,		/**< Use secondary xri for multiple data phases */
12274 	HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB,	/**< FCFI reported in SRB not correct, use "first" registered domain */
12275 	HW_WORKAROUND_FW_VERSION_TOO_LOW,	/**< The FW version is not the min version supported by this driver */
12276 	HW_WORKAROUND_SGLC_MISREPORTED,	/**< Chip supports SGL Chaining but SGLC is not set in SLI4_PARAMS */
12277 	HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE,	/**< Don't use SEND_FRAME capable if FW version is too old */
12278 } hw_workaround_e;
12279 
12280 /**
12281  * @brief Internal workaround structure instance
12282  */
12283 
12284 typedef struct {
12285 	sli4_asic_type_e asic_type;
12286 	sli4_asic_rev_e asic_rev;
12287 	uint64_t fwrev_low;
12288 	uint64_t fwrev_high;
12289 
12290 	hw_workaround_e workaround;
12291 	uint32_t value;
12292 } hw_workaround_t;
12293 
12294 static hw_workaround_t hw_workarounds[] = {
12295 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12296 		HW_WORKAROUND_TEST, 999},
12297 
12298 	/* Bug: 127585: if_type == 2 returns 0 for total length placed on
12299 	 * FCP_TSEND64_WQE completions.   Note, original driver code enables this
12300 	 * workaround for all asic types
12301 	 */
12302 	{SLI4_ASIC_TYPE_ANY,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12303 		HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH, 0},
12304 
12305 	/* Bug: unknown, Lancer A0 has mis-reported max queue depth */
12306 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_A0, HW_FWREV_ZERO, HW_FWREV_MAX,
12307 		HW_WORKAROUND_MAX_QUEUE, 2048},
12308 
12309 	/* Bug: 143399, BE3 has mis-reported max RQ queue depth */
12310 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,6,293,0),
12311 		HW_WORKAROUND_MAX_RQ, 2048},
12312 
12313 	/* Bug: 143399, skyhawk has mis-reported max RQ queue depth */
12314 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(10,0,594,0),
12315 		HW_WORKAROUND_MAX_RQ, 2048},
12316 
12317 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported WQE count method */
12318 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12319 		HW_WORKAROUND_WQE_COUNT_METHOD, 1},
12320 
12321 	/* Bug: 103487, BE3 before f/w 4.2.314.0 has mis-reported RQE count method */
12322 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(4,2,314,0),
12323 		HW_WORKAROUND_RQE_COUNT_METHOD, 1},
12324 
12325 	/* Bug: 142968, BE3 UE with RPI == 0xffff */
12326 	{SLI4_ASIC_TYPE_BE3,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12327 		HW_WORKAROUND_USE_UNREGISTERD_RPI, 0},
12328 
12329 	/* Bug: unknown, Skyhawk won't support auto-response on target T10-PI  */
12330 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12331 		HW_WORKAROUND_DISABLE_AR_TGT_DIF, 0},
12332 
12333 	{SLI4_ASIC_TYPE_LANCER,	SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV(1,1,65,0),
12334 		HW_WORKAROUND_DISABLE_SET_DUMP_LOC, 0},
12335 
12336 	/* Bug: 160124, Skyhawk quarantine DIF XRIs  */
12337 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12338 		HW_WORKAROUND_USE_DIF_QUARANTINE, 0},
12339 
12340 	/* Bug: 161832, Skyhawk use secondary XRI for multiple data phase TRECV */
12341 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12342 		HW_WORKAROUND_USE_DIF_SEC_XRI, 0},
12343 
12344 	/* Bug: xxxxxx, FCFI reported in SRB not corrrect */
12345 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12346 		HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB, 0},
12347 #if 0
12348 	/* Bug: 165642, FW version check for driver */
12349 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_LANCER),
12350 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12351 #endif
12352 	{SLI4_ASIC_TYPE_SKYHAWK, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_1(OCS_MIN_FW_VER_SKYHAWK),
12353 		HW_WORKAROUND_FW_VERSION_TOO_LOW, 0},
12354 
12355 	/* Bug 177061, Lancer FW does not set the SGLC bit */
12356 	{SLI4_ASIC_TYPE_LANCER, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12357 		HW_WORKAROUND_SGLC_MISREPORTED, 0},
12358 
12359 	/* BZ 181208/183914, enable this workaround for ALL revisions */
12360 	{SLI4_ASIC_TYPE_ANY, SLI4_ASIC_REV_ANY, HW_FWREV_ZERO, HW_FWREV_MAX,
12361 		HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE, 0},
12362 };
12363 
12364 /**
12365  * @brief Function prototypes
12366  */
12367 
12368 static int32_t ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w);
12369 
12370 /**
12371  * @brief Parse the firmware version (name)
12372  *
12373  * Parse a string of the form a.b.c.d, returning a uint64_t packed as defined
12374  * by the HW_FWREV() macro
12375  *
12376  * @param fwrev_string pointer to the firmware string
12377  *
12378  * @return packed firmware revision value
12379  */
12380 
12381 static uint64_t
12382 parse_fw_version(const char *fwrev_string)
12383 {
12384 	int v[4] = {0};
12385 	const char *p;
12386 	int i;
12387 
12388 	for (p = fwrev_string, i = 0; *p && (i < 4); i ++) {
12389 		v[i] = ocs_strtoul(p, 0, 0);
12390 		while(*p && *p != '.') {
12391 			p ++;
12392 		}
12393 		if (*p) {
12394 			p ++;
12395 		}
12396 	}
12397 
12398 	/* Special case for bootleg releases with f/w rev 0.0.9999.0, set to max value */
12399 	if (v[2] == 9999) {
12400 		return HW_FWREV_MAX;
12401 	} else {
12402 		return HW_FWREV(v[0], v[1], v[2], v[3]);
12403 	}
12404 }
12405 
12406 /**
12407  * @brief Test for a workaround match
12408  *
12409  * Looks at the asic type, asic revision, and fw revision, and returns TRUE if match.
12410  *
12411  * @param hw Pointer to the HW structure
12412  * @param w Pointer to a workaround structure entry
12413  *
12414  * @return Return TRUE for a match
12415  */
12416 
12417 static int32_t
12418 ocs_hw_workaround_match(ocs_hw_t *hw, hw_workaround_t *w)
12419 {
12420 	return (((w->asic_type == SLI4_ASIC_TYPE_ANY) || (w->asic_type == hw->sli.asic_type)) &&
12421 		    ((w->asic_rev == SLI4_ASIC_REV_ANY) || (w->asic_rev == hw->sli.asic_rev)) &&
12422 		    (w->fwrev_low <= hw->workaround.fwrev) &&
12423 		    ((w->fwrev_high == HW_FWREV_MAX) || (hw->workaround.fwrev < w->fwrev_high)));
12424 }
12425 
12426 /**
12427  * @brief Setup HW runtime workarounds
12428  *
12429  * The function is called at the end of ocs_hw_setup() to setup any runtime workarounds
12430  * based on the HW/SLI setup.
12431  *
12432  * @param hw Pointer to HW structure
12433  *
12434  * @return none
12435  */
12436 
12437 void
12438 ocs_hw_workaround_setup(struct ocs_hw_s *hw)
12439 {
12440 	hw_workaround_t *w;
12441 	sli4_t *sli4 = &hw->sli;
12442 	uint32_t i;
12443 
12444 	/* Initialize the workaround settings */
12445 	ocs_memset(&hw->workaround, 0, sizeof(hw->workaround));
12446 
12447 	/* If hw_war_version is non-null, then its a value that was set by a module parameter
12448 	 * (sorry for the break in abstraction, but workarounds are ... well, workarounds)
12449 	 */
12450 
12451 	if (hw->hw_war_version) {
12452 		hw->workaround.fwrev = parse_fw_version(hw->hw_war_version);
12453 	} else {
12454 		hw->workaround.fwrev = parse_fw_version((char*) sli4->config.fw_name[0]);
12455 	}
12456 
12457 	/* Walk the workaround list, if a match is found, then handle it */
12458 	for (i = 0, w = hw_workarounds; i < ARRAY_SIZE(hw_workarounds); i++, w++) {
12459 		if (ocs_hw_workaround_match(hw, w)) {
12460 			switch(w->workaround) {
12461 
12462 			case HW_WORKAROUND_TEST: {
12463 				ocs_log_debug(hw->os, "Override: test: %d\n", w->value);
12464 				break;
12465 			}
12466 
12467 			case HW_WORKAROUND_RETAIN_TSEND_IO_LENGTH: {
12468 				ocs_log_debug(hw->os, "HW Workaround: retain TSEND IO length\n");
12469 				hw->workaround.retain_tsend_io_length = 1;
12470 				break;
12471 			}
12472 			case HW_WORKAROUND_MAX_QUEUE: {
12473 				sli4_qtype_e q;
12474 
12475 				ocs_log_debug(hw->os, "HW Workaround: override max_qentries: %d\n", w->value);
12476 				for (q = SLI_QTYPE_EQ; q < SLI_QTYPE_MAX; q++) {
12477 					if (hw->num_qentries[q] > w->value) {
12478 						hw->num_qentries[q] = w->value;
12479 					}
12480 				}
12481 				break;
12482 			}
12483 			case HW_WORKAROUND_MAX_RQ: {
12484 				ocs_log_debug(hw->os, "HW Workaround: override RQ max_qentries: %d\n", w->value);
12485 				if (hw->num_qentries[SLI_QTYPE_RQ] > w->value) {
12486 					hw->num_qentries[SLI_QTYPE_RQ] = w->value;
12487 				}
12488 				break;
12489 			}
12490 			case HW_WORKAROUND_WQE_COUNT_METHOD: {
12491 				ocs_log_debug(hw->os, "HW Workaround: set WQE count method=%d\n", w->value);
12492 				sli4->config.count_method[SLI_QTYPE_WQ] = w->value;
12493 				sli_calc_max_qentries(sli4);
12494 				break;
12495 			}
12496 			case HW_WORKAROUND_RQE_COUNT_METHOD: {
12497 				ocs_log_debug(hw->os, "HW Workaround: set RQE count method=%d\n", w->value);
12498 				sli4->config.count_method[SLI_QTYPE_RQ] = w->value;
12499 				sli_calc_max_qentries(sli4);
12500 				break;
12501 			}
12502 			case HW_WORKAROUND_USE_UNREGISTERD_RPI:
12503 				ocs_log_debug(hw->os, "HW Workaround: use unreg'd RPI if rnode->indicator == 0xFFFF\n");
12504 				hw->workaround.use_unregistered_rpi = TRUE;
12505 				/*
12506 				 * Allocate an RPI that is never registered, to be used in the case where
12507 				 * a node has been unregistered, and its indicator (RPI) value is set to 0xFFFF
12508 				 */
12509 				if (sli_resource_alloc(&hw->sli, SLI_RSRC_FCOE_RPI, &hw->workaround.unregistered_rid,
12510 					&hw->workaround.unregistered_index)) {
12511 					ocs_log_err(hw->os, "sli_resource_alloc unregistered RPI failed\n");
12512 					hw->workaround.use_unregistered_rpi = FALSE;
12513 				}
12514 				break;
12515 			case HW_WORKAROUND_DISABLE_AR_TGT_DIF:
12516 				ocs_log_debug(hw->os, "HW Workaround: disable AR on T10-PI TSEND\n");
12517 				hw->workaround.disable_ar_tgt_dif = TRUE;
12518 				break;
12519 			case HW_WORKAROUND_DISABLE_SET_DUMP_LOC:
12520 				ocs_log_debug(hw->os, "HW Workaround: disable set_dump_loc\n");
12521 				hw->workaround.disable_dump_loc = TRUE;
12522 				break;
12523 			case HW_WORKAROUND_USE_DIF_QUARANTINE:
12524 				ocs_log_debug(hw->os, "HW Workaround: use DIF quarantine\n");
12525 				hw->workaround.use_dif_quarantine = TRUE;
12526 				break;
12527 			case HW_WORKAROUND_USE_DIF_SEC_XRI:
12528 				ocs_log_debug(hw->os, "HW Workaround: use DIF secondary xri\n");
12529 				hw->workaround.use_dif_sec_xri = TRUE;
12530 				break;
12531 			case HW_WORKAROUND_OVERRIDE_FCFI_IN_SRB:
12532 				ocs_log_debug(hw->os, "HW Workaround: override FCFI in SRB\n");
12533 				hw->workaround.override_fcfi = TRUE;
12534 				break;
12535 
12536 			case HW_WORKAROUND_FW_VERSION_TOO_LOW:
12537 				ocs_log_debug(hw->os, "HW Workaround: fw version is below the minimum for this driver\n");
12538 				hw->workaround.fw_version_too_low = TRUE;
12539 				break;
12540 			case HW_WORKAROUND_SGLC_MISREPORTED:
12541 				ocs_log_debug(hw->os, "HW Workaround: SGLC misreported - chaining is enabled\n");
12542 				hw->workaround.sglc_misreported = TRUE;
12543 				break;
12544 			case HW_WORKAROUND_IGNORE_SEND_FRAME_CAPABLE:
12545 				ocs_log_debug(hw->os, "HW Workaround: not SEND_FRAME capable - disabled\n");
12546 				hw->workaround.ignore_send_frame = TRUE;
12547 				break;
12548 			} /* switch(w->workaround) */
12549 		}
12550 	}
12551 }
12552